diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp --- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp +++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp @@ -773,13 +773,7 @@ // destination is tied to a source. Unless the source is undef. In that case // the user would have some control over the policy values. bool TailAgnostic = true; - bool UsesMaskPolicy = RISCVII::usesMaskPolicy(TSFlags); - // FIXME: Could we look at the above or below instructions to choose the - // matched mask policy to reduce vsetvli instructions? Default mask policy is - // agnostic if instructions use mask policy, otherwise is undisturbed. Because - // most mask operations are mask undisturbed, so we could possibly reduce the - // vsetvli between mask and nomasked instruction sequence. - bool MaskAgnostic = UsesMaskPolicy; + bool MaskAgnostic = true; unsigned UseOpIdx; if (RISCVII::hasVecPolicyOp(TSFlags)) { const MachineOperand &Op = MI.getOperand(MI.getNumExplicitOperands() - 1); @@ -794,15 +788,13 @@ MaskAgnostic = Policy & RISCVII::MASK_AGNOSTIC; } else if (MI.isRegTiedToUseOperand(0, &UseOpIdx)) { TailAgnostic = false; - if (UsesMaskPolicy) - MaskAgnostic = false; + MaskAgnostic = false; // If the tied operand is an IMPLICIT_DEF we can keep TailAgnostic. const MachineOperand &UseMO = MI.getOperand(UseOpIdx); MachineInstr *UseMI = MRI->getVRegDef(UseMO.getReg()); if (UseMI && UseMI->isImplicitDef()) { TailAgnostic = true; - if (UsesMaskPolicy) - MaskAgnostic = true; + MaskAgnostic = true; } // Some pseudo instructions force a tail agnostic policy despite having a // tied def. @@ -810,6 +802,9 @@ TailAgnostic = true; } + if (!RISCVII::usesMaskPolicy(TSFlags)) + MaskAgnostic = true; + RISCVII::VLMUL VLMul = RISCVII::getLMul(TSFlags); unsigned Log2SEW = MI.getOperand(getSEWOpNum(MI)).getImm(); diff --git a/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll b/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll --- a/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll +++ b/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll @@ -22,7 +22,7 @@ ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: lui a0, %hi(.L__const._Z3foov.var_49) ; CHECK-NEXT: addi a0, a0, %lo(.L__const._Z3foov.var_49) -; CHECK-NEXT: vsetivli zero, 2, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: lui a0, %hi(.L__const._Z3foov.var_48) ; CHECK-NEXT: addi a0, a0, %lo(.L__const._Z3foov.var_48) @@ -52,7 +52,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: lui a0, %hi(.L__const._Z3foov.var_44) ; CHECK-NEXT: addi a0, a0, %lo(.L__const._Z3foov.var_44) -; CHECK-NEXT: vsetivli zero, 2, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, m2, ta, ma ; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: slli a2, a2, 1 diff --git a/llvm/test/CodeGen/RISCV/fold-vector-cmp.ll b/llvm/test/CodeGen/RISCV/fold-vector-cmp.ll --- a/llvm/test/CodeGen/RISCV/fold-vector-cmp.ll +++ b/llvm/test/CodeGen/RISCV/fold-vector-cmp.ll @@ -13,16 +13,16 @@ ; CHECK-V-LABEL: test: ; CHECK-V: # %bb.0: ; CHECK-V-NEXT: lui a1, 524288 -; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-V-NEXT: vmv.v.x v8, a1 -; CHECK-V-NEXT: vsetvli zero, zero, e32, mf2, tu, mu +; CHECK-V-NEXT: vsetvli zero, zero, e32, mf2, tu, ma ; CHECK-V-NEXT: vmv.s.x v8, a0 ; CHECK-V-NEXT: addiw a0, a1, 2 -; CHECK-V-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-V-NEXT: vmslt.vx v0, v8, a0 ; CHECK-V-NEXT: vmv.v.i v8, 0 ; CHECK-V-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-V-NEXT: vslidedown.vi v8, v8, 1 ; CHECK-V-NEXT: vmv.x.s a0, v8 ; CHECK-V-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll b/llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll --- a/llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll +++ b/llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll @@ -37,13 +37,13 @@ ; ; CHECK-V-LABEL: stest_f64i32: ; CHECK-V: # %bb.0: # %entry -; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-V-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-V-NEXT: lui a0, 524288 ; CHECK-V-NEXT: addiw a1, a0, -1 ; CHECK-V-NEXT: vmin.vx v8, v8, a1 ; CHECK-V-NEXT: vmax.vx v8, v8, a0 -; CHECK-V-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-V-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-V-NEXT: ret entry: @@ -77,12 +77,12 @@ ; ; CHECK-V-LABEL: utest_f64i32: ; CHECK-V: # %bb.0: # %entry -; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-V-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-V-NEXT: li a0, -1 ; CHECK-V-NEXT: srli a0, a0, 32 ; CHECK-V-NEXT: vminu.vx v8, v8, a0 -; CHECK-V-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-V-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-V-NEXT: ret entry: @@ -124,13 +124,13 @@ ; ; CHECK-V-LABEL: ustest_f64i32: ; CHECK-V: # %bb.0: # %entry -; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-V-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-V-NEXT: li a0, -1 ; CHECK-V-NEXT: srli a0, a0, 32 ; CHECK-V-NEXT: vmin.vx v8, v8, a0 ; CHECK-V-NEXT: vmax.vx v8, v8, zero -; CHECK-V-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-V-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-V-NEXT: ret entry: @@ -202,14 +202,14 @@ ; ; CHECK-V-LABEL: stest_f32i32: ; CHECK-V: # %bb.0: # %entry -; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-V-NEXT: vfwcvt.rtz.x.f.v v10, v8 ; CHECK-V-NEXT: lui a0, 524288 ; CHECK-V-NEXT: addiw a1, a0, -1 -; CHECK-V-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-V-NEXT: vmin.vx v8, v10, a1 ; CHECK-V-NEXT: vmax.vx v10, v8, a0 -; CHECK-V-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-V-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-V-NEXT: ret entry: @@ -261,13 +261,13 @@ ; ; CHECK-V-LABEL: utest_f32i32: ; CHECK-V: # %bb.0: # %entry -; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-V-NEXT: vfwcvt.rtz.xu.f.v v10, v8 ; CHECK-V-NEXT: li a0, -1 ; CHECK-V-NEXT: srli a0, a0, 32 -; CHECK-V-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-V-NEXT: vminu.vx v10, v10, a0 -; CHECK-V-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-V-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-V-NEXT: ret entry: @@ -337,14 +337,14 @@ ; ; CHECK-V-LABEL: ustest_f32i32: ; CHECK-V: # %bb.0: # %entry -; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-V-NEXT: vfwcvt.rtz.x.f.v v10, v8 ; CHECK-V-NEXT: li a0, -1 ; CHECK-V-NEXT: srli a0, a0, 32 -; CHECK-V-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-V-NEXT: vmin.vx v8, v10, a0 ; CHECK-V-NEXT: vmax.vx v10, v8, zero -; CHECK-V-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-V-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-V-NEXT: ret entry: @@ -489,28 +489,28 @@ ; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz ; CHECK-V-NEXT: sd a0, 8(sp) ; CHECK-V-NEXT: addi a0, sp, 24 -; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-V-NEXT: vle64.v v8, (a0) ; CHECK-V-NEXT: mv a0, sp ; CHECK-V-NEXT: vle64.v v10, (a0) -; CHECK-V-NEXT: vsetivli zero, 2, e64, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 2, e64, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v10, v8, 1 ; CHECK-V-NEXT: addi a0, sp, 16 -; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-V-NEXT: vle64.v v8, (a0) -; CHECK-V-NEXT: vsetivli zero, 3, e64, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 3, e64, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v10, v8, 2 ; CHECK-V-NEXT: addi a0, sp, 8 -; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-V-NEXT: vle64.v v8, (a0) -; CHECK-V-NEXT: vsetivli zero, 4, e64, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 4, e64, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v10, v8, 3 ; CHECK-V-NEXT: lui a0, 524288 ; CHECK-V-NEXT: addiw a1, a0, -1 -; CHECK-V-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-V-NEXT: vmin.vx v8, v10, a1 ; CHECK-V-NEXT: vmax.vx v10, v8, a0 -; CHECK-V-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-V-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-V-NEXT: ld ra, 56(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 48(sp) # 8-byte Folded Reload @@ -640,27 +640,27 @@ ; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz ; CHECK-V-NEXT: sd a0, 8(sp) ; CHECK-V-NEXT: addi a0, sp, 24 -; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-V-NEXT: vle64.v v8, (a0) ; CHECK-V-NEXT: mv a0, sp ; CHECK-V-NEXT: vle64.v v10, (a0) -; CHECK-V-NEXT: vsetivli zero, 2, e64, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 2, e64, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v10, v8, 1 ; CHECK-V-NEXT: addi a0, sp, 16 -; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-V-NEXT: vle64.v v8, (a0) -; CHECK-V-NEXT: vsetivli zero, 3, e64, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 3, e64, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v10, v8, 2 ; CHECK-V-NEXT: addi a0, sp, 8 -; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-V-NEXT: vle64.v v8, (a0) -; CHECK-V-NEXT: vsetivli zero, 4, e64, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 4, e64, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v10, v8, 3 ; CHECK-V-NEXT: li a0, -1 ; CHECK-V-NEXT: srli a0, a0, 32 -; CHECK-V-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-V-NEXT: vminu.vx v10, v10, a0 -; CHECK-V-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-V-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-V-NEXT: ld ra, 56(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 48(sp) # 8-byte Folded Reload @@ -808,28 +808,28 @@ ; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz ; CHECK-V-NEXT: sd a0, 8(sp) ; CHECK-V-NEXT: addi a0, sp, 24 -; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-V-NEXT: vle64.v v8, (a0) ; CHECK-V-NEXT: mv a0, sp ; CHECK-V-NEXT: vle64.v v10, (a0) -; CHECK-V-NEXT: vsetivli zero, 2, e64, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 2, e64, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v10, v8, 1 ; CHECK-V-NEXT: addi a0, sp, 16 -; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-V-NEXT: vle64.v v8, (a0) -; CHECK-V-NEXT: vsetivli zero, 3, e64, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 3, e64, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v10, v8, 2 ; CHECK-V-NEXT: addi a0, sp, 8 -; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-V-NEXT: vle64.v v8, (a0) -; CHECK-V-NEXT: vsetivli zero, 4, e64, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 4, e64, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v10, v8, 3 ; CHECK-V-NEXT: li a0, -1 ; CHECK-V-NEXT: srli a0, a0, 32 -; CHECK-V-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-V-NEXT: vmin.vx v8, v10, a0 ; CHECK-V-NEXT: vmax.vx v10, v8, zero -; CHECK-V-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-V-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-V-NEXT: ld ra, 56(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 48(sp) # 8-byte Folded Reload @@ -882,14 +882,14 @@ ; ; CHECK-V-LABEL: stest_f64i16: ; CHECK-V: # %bb.0: # %entry -; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-V-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-V-NEXT: lui a0, 8 ; CHECK-V-NEXT: addiw a0, a0, -1 ; CHECK-V-NEXT: vmin.vx v8, v9, a0 ; CHECK-V-NEXT: lui a0, 1048568 ; CHECK-V-NEXT: vmax.vx v8, v8, a0 -; CHECK-V-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-V-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-V-NEXT: ret entry: @@ -923,12 +923,12 @@ ; ; CHECK-V-LABEL: utest_f64i16: ; CHECK-V: # %bb.0: # %entry -; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-V-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-V-NEXT: lui a0, 16 ; CHECK-V-NEXT: addiw a0, a0, -1 ; CHECK-V-NEXT: vminu.vx v8, v9, a0 -; CHECK-V-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-V-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-V-NEXT: ret entry: @@ -970,13 +970,13 @@ ; ; CHECK-V-LABEL: ustest_f64i16: ; CHECK-V: # %bb.0: # %entry -; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-V-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-V-NEXT: lui a0, 16 ; CHECK-V-NEXT: addiw a0, a0, -1 ; CHECK-V-NEXT: vmin.vx v8, v9, a0 ; CHECK-V-NEXT: vmax.vx v8, v8, zero -; CHECK-V-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-V-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-V-NEXT: ret entry: @@ -1050,14 +1050,14 @@ ; ; CHECK-V-LABEL: stest_f32i16: ; CHECK-V: # %bb.0: # %entry -; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-V-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-V-NEXT: lui a0, 8 ; CHECK-V-NEXT: addiw a0, a0, -1 ; CHECK-V-NEXT: vmin.vx v8, v8, a0 ; CHECK-V-NEXT: lui a0, 1048568 ; CHECK-V-NEXT: vmax.vx v8, v8, a0 -; CHECK-V-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-V-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-V-NEXT: ret entry: @@ -1109,12 +1109,12 @@ ; ; CHECK-V-LABEL: utest_f32i16: ; CHECK-V: # %bb.0: # %entry -; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-V-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-V-NEXT: lui a0, 16 ; CHECK-V-NEXT: addiw a0, a0, -1 ; CHECK-V-NEXT: vminu.vx v8, v8, a0 -; CHECK-V-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-V-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-V-NEXT: ret entry: @@ -1184,13 +1184,13 @@ ; ; CHECK-V-LABEL: ustest_f32i16: ; CHECK-V: # %bb.0: # %entry -; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-V-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-V-NEXT: lui a0, 16 ; CHECK-V-NEXT: addiw a0, a0, -1 ; CHECK-V-NEXT: vmin.vx v8, v8, a0 ; CHECK-V-NEXT: vmax.vx v8, v8, zero -; CHECK-V-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-V-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-V-NEXT: ret entry: @@ -1457,49 +1457,49 @@ ; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz ; CHECK-V-NEXT: sw a0, 4(sp) ; CHECK-V-NEXT: addi a0, sp, 28 -; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-V-NEXT: vle32.v v10, (a0) ; CHECK-V-NEXT: mv a0, sp ; CHECK-V-NEXT: vle32.v v8, (a0) -; CHECK-V-NEXT: vsetivli zero, 2, e32, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 2, e32, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v10, 1 ; CHECK-V-NEXT: addi a0, sp, 24 -; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-V-NEXT: vle32.v v10, (a0) -; CHECK-V-NEXT: vsetivli zero, 3, e32, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 3, e32, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v10, 2 ; CHECK-V-NEXT: addi a0, sp, 20 -; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-V-NEXT: vle32.v v10, (a0) -; CHECK-V-NEXT: vsetivli zero, 4, e32, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 4, e32, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v10, 3 ; CHECK-V-NEXT: addi a0, sp, 16 -; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-V-NEXT: vle32.v v10, (a0) -; CHECK-V-NEXT: vsetivli zero, 5, e32, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 5, e32, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v10, 4 ; CHECK-V-NEXT: addi a0, sp, 12 -; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-V-NEXT: vle32.v v10, (a0) -; CHECK-V-NEXT: vsetivli zero, 6, e32, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 6, e32, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v10, 5 ; CHECK-V-NEXT: addi a0, sp, 8 -; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-V-NEXT: vle32.v v10, (a0) -; CHECK-V-NEXT: vsetivli zero, 7, e32, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 7, e32, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v10, 6 ; CHECK-V-NEXT: addi a0, sp, 4 -; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-V-NEXT: vle32.v v10, (a0) -; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v10, 7 ; CHECK-V-NEXT: lui a0, 8 ; CHECK-V-NEXT: addiw a0, a0, -1 -; CHECK-V-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK-V-NEXT: vmin.vx v8, v8, a0 ; CHECK-V-NEXT: lui a0, 1048568 ; CHECK-V-NEXT: vmax.vx v10, v8, a0 -; CHECK-V-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-V-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-V-NEXT: ld ra, 88(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 80(sp) # 8-byte Folded Reload @@ -1733,47 +1733,47 @@ ; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz ; CHECK-V-NEXT: sw a0, 4(sp) ; CHECK-V-NEXT: addi a0, sp, 28 -; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-V-NEXT: vle32.v v10, (a0) ; CHECK-V-NEXT: mv a0, sp ; CHECK-V-NEXT: vle32.v v8, (a0) -; CHECK-V-NEXT: vsetivli zero, 2, e32, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 2, e32, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v10, 1 ; CHECK-V-NEXT: addi a0, sp, 24 -; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-V-NEXT: vle32.v v10, (a0) -; CHECK-V-NEXT: vsetivli zero, 3, e32, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 3, e32, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v10, 2 ; CHECK-V-NEXT: addi a0, sp, 20 -; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-V-NEXT: vle32.v v10, (a0) -; CHECK-V-NEXT: vsetivli zero, 4, e32, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 4, e32, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v10, 3 ; CHECK-V-NEXT: addi a0, sp, 16 -; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-V-NEXT: vle32.v v10, (a0) -; CHECK-V-NEXT: vsetivli zero, 5, e32, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 5, e32, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v10, 4 ; CHECK-V-NEXT: addi a0, sp, 12 -; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-V-NEXT: vle32.v v10, (a0) -; CHECK-V-NEXT: vsetivli zero, 6, e32, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 6, e32, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v10, 5 ; CHECK-V-NEXT: addi a0, sp, 8 -; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-V-NEXT: vle32.v v10, (a0) -; CHECK-V-NEXT: vsetivli zero, 7, e32, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 7, e32, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v10, 6 ; CHECK-V-NEXT: addi a0, sp, 4 -; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-V-NEXT: vle32.v v10, (a0) -; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v10, 7 ; CHECK-V-NEXT: lui a0, 16 ; CHECK-V-NEXT: addiw a0, a0, -1 -; CHECK-V-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK-V-NEXT: vminu.vx v10, v8, a0 -; CHECK-V-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-V-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-V-NEXT: ld ra, 88(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 80(sp) # 8-byte Folded Reload @@ -2045,48 +2045,48 @@ ; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz ; CHECK-V-NEXT: sw a0, 4(sp) ; CHECK-V-NEXT: addi a0, sp, 28 -; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-V-NEXT: vle32.v v10, (a0) ; CHECK-V-NEXT: mv a0, sp ; CHECK-V-NEXT: vle32.v v8, (a0) -; CHECK-V-NEXT: vsetivli zero, 2, e32, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 2, e32, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v10, 1 ; CHECK-V-NEXT: addi a0, sp, 24 -; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-V-NEXT: vle32.v v10, (a0) -; CHECK-V-NEXT: vsetivli zero, 3, e32, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 3, e32, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v10, 2 ; CHECK-V-NEXT: addi a0, sp, 20 -; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-V-NEXT: vle32.v v10, (a0) -; CHECK-V-NEXT: vsetivli zero, 4, e32, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 4, e32, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v10, 3 ; CHECK-V-NEXT: addi a0, sp, 16 -; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-V-NEXT: vle32.v v10, (a0) -; CHECK-V-NEXT: vsetivli zero, 5, e32, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 5, e32, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v10, 4 ; CHECK-V-NEXT: addi a0, sp, 12 -; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-V-NEXT: vle32.v v10, (a0) -; CHECK-V-NEXT: vsetivli zero, 6, e32, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 6, e32, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v10, 5 ; CHECK-V-NEXT: addi a0, sp, 8 -; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-V-NEXT: vle32.v v10, (a0) -; CHECK-V-NEXT: vsetivli zero, 7, e32, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 7, e32, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v10, 6 ; CHECK-V-NEXT: addi a0, sp, 4 -; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-V-NEXT: vle32.v v10, (a0) -; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v10, 7 ; CHECK-V-NEXT: lui a0, 16 ; CHECK-V-NEXT: addiw a0, a0, -1 -; CHECK-V-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK-V-NEXT: vmin.vx v8, v8, a0 ; CHECK-V-NEXT: vmax.vx v10, v8, zero -; CHECK-V-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-V-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-V-NEXT: ld ra, 88(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 80(sp) # 8-byte Folded Reload @@ -2205,13 +2205,13 @@ ; CHECK-V-NEXT: sub sp, sp, a0 ; CHECK-V-NEXT: addi a0, sp, 48 ; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill -; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-V-NEXT: vslidedown.vi v9, v8, 1 ; CHECK-V-NEXT: vfmv.f.s fa0, v9 ; CHECK-V-NEXT: call __fixdfti@plt ; CHECK-V-NEXT: mv s0, a0 ; CHECK-V-NEXT: mv s1, a1 -; CHECK-V-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; CHECK-V-NEXT: addi a0, sp, 48 ; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-V-NEXT: vfmv.f.s fa0, v8 @@ -2270,11 +2270,11 @@ ; CHECK-V-NEXT: sd a0, 24(sp) ; CHECK-V-NEXT: sd s0, 32(sp) ; CHECK-V-NEXT: addi a0, sp, 24 -; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-V-NEXT: vle64.v v8, (a0) ; CHECK-V-NEXT: addi a0, sp, 32 ; CHECK-V-NEXT: vle64.v v9, (a0) -; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, tu, mu +; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v9, 1 ; CHECK-V-NEXT: csrr a0, vlenb ; CHECK-V-NEXT: slli a0, a0, 1 @@ -2345,13 +2345,13 @@ ; CHECK-V-NEXT: sub sp, sp, a0 ; CHECK-V-NEXT: addi a0, sp, 48 ; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill -; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-V-NEXT: vslidedown.vi v9, v8, 1 ; CHECK-V-NEXT: vfmv.f.s fa0, v9 ; CHECK-V-NEXT: call __fixunsdfti@plt ; CHECK-V-NEXT: mv s0, a0 ; CHECK-V-NEXT: mv s1, a1 -; CHECK-V-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; CHECK-V-NEXT: addi a0, sp, 48 ; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-V-NEXT: vfmv.f.s fa0, v8 @@ -2367,11 +2367,11 @@ ; CHECK-V-NEXT: sd a0, 24(sp) ; CHECK-V-NEXT: sd s0, 32(sp) ; CHECK-V-NEXT: addi a0, sp, 24 -; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-V-NEXT: vle64.v v8, (a0) ; CHECK-V-NEXT: addi a0, sp, 32 ; CHECK-V-NEXT: vle64.v v9, (a0) -; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, tu, mu +; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v9, 1 ; CHECK-V-NEXT: csrr a0, vlenb ; CHECK-V-NEXT: slli a0, a0, 1 @@ -2476,13 +2476,13 @@ ; CHECK-V-NEXT: sub sp, sp, a0 ; CHECK-V-NEXT: addi a0, sp, 48 ; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill -; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-V-NEXT: vslidedown.vi v9, v8, 1 ; CHECK-V-NEXT: vfmv.f.s fa0, v9 ; CHECK-V-NEXT: call __fixdfti@plt ; CHECK-V-NEXT: mv s0, a0 ; CHECK-V-NEXT: mv s1, a1 -; CHECK-V-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; CHECK-V-NEXT: addi a0, sp, 48 ; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-V-NEXT: vfmv.f.s fa0, v8 @@ -2528,11 +2528,11 @@ ; CHECK-V-NEXT: sd a0, 24(sp) ; CHECK-V-NEXT: sd s0, 32(sp) ; CHECK-V-NEXT: addi a0, sp, 24 -; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-V-NEXT: vle64.v v8, (a0) ; CHECK-V-NEXT: addi a0, sp, 32 ; CHECK-V-NEXT: vle64.v v9, (a0) -; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, tu, mu +; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v9, 1 ; CHECK-V-NEXT: csrr a0, vlenb ; CHECK-V-NEXT: slli a0, a0, 1 @@ -2647,13 +2647,13 @@ ; CHECK-V-NEXT: sub sp, sp, a0 ; CHECK-V-NEXT: addi a0, sp, 48 ; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill -; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-V-NEXT: vslidedown.vi v9, v8, 1 ; CHECK-V-NEXT: vfmv.f.s fa0, v9 ; CHECK-V-NEXT: call __fixsfti@plt ; CHECK-V-NEXT: mv s0, a0 ; CHECK-V-NEXT: mv s1, a1 -; CHECK-V-NEXT: vsetivli zero, 0, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 0, e32, mf2, ta, ma ; CHECK-V-NEXT: addi a0, sp, 48 ; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-V-NEXT: vfmv.f.s fa0, v8 @@ -2712,11 +2712,11 @@ ; CHECK-V-NEXT: sd a0, 24(sp) ; CHECK-V-NEXT: sd s0, 32(sp) ; CHECK-V-NEXT: addi a0, sp, 24 -; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-V-NEXT: vle64.v v8, (a0) ; CHECK-V-NEXT: addi a0, sp, 32 ; CHECK-V-NEXT: vle64.v v9, (a0) -; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, tu, mu +; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v9, 1 ; CHECK-V-NEXT: csrr a0, vlenb ; CHECK-V-NEXT: slli a0, a0, 1 @@ -2787,13 +2787,13 @@ ; CHECK-V-NEXT: sub sp, sp, a0 ; CHECK-V-NEXT: addi a0, sp, 48 ; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill -; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-V-NEXT: vslidedown.vi v9, v8, 1 ; CHECK-V-NEXT: vfmv.f.s fa0, v9 ; CHECK-V-NEXT: call __fixunssfti@plt ; CHECK-V-NEXT: mv s0, a0 ; CHECK-V-NEXT: mv s1, a1 -; CHECK-V-NEXT: vsetivli zero, 0, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 0, e32, mf2, ta, ma ; CHECK-V-NEXT: addi a0, sp, 48 ; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-V-NEXT: vfmv.f.s fa0, v8 @@ -2809,11 +2809,11 @@ ; CHECK-V-NEXT: sd a0, 24(sp) ; CHECK-V-NEXT: sd s0, 32(sp) ; CHECK-V-NEXT: addi a0, sp, 24 -; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-V-NEXT: vle64.v v8, (a0) ; CHECK-V-NEXT: addi a0, sp, 32 ; CHECK-V-NEXT: vle64.v v9, (a0) -; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, tu, mu +; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v9, 1 ; CHECK-V-NEXT: csrr a0, vlenb ; CHECK-V-NEXT: slli a0, a0, 1 @@ -2918,13 +2918,13 @@ ; CHECK-V-NEXT: sub sp, sp, a0 ; CHECK-V-NEXT: addi a0, sp, 48 ; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill -; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-V-NEXT: vslidedown.vi v9, v8, 1 ; CHECK-V-NEXT: vfmv.f.s fa0, v9 ; CHECK-V-NEXT: call __fixsfti@plt ; CHECK-V-NEXT: mv s0, a0 ; CHECK-V-NEXT: mv s1, a1 -; CHECK-V-NEXT: vsetivli zero, 0, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 0, e32, mf2, ta, ma ; CHECK-V-NEXT: addi a0, sp, 48 ; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-V-NEXT: vfmv.f.s fa0, v8 @@ -2970,11 +2970,11 @@ ; CHECK-V-NEXT: sd a0, 24(sp) ; CHECK-V-NEXT: sd s0, 32(sp) ; CHECK-V-NEXT: addi a0, sp, 24 -; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-V-NEXT: vle64.v v8, (a0) ; CHECK-V-NEXT: addi a0, sp, 32 ; CHECK-V-NEXT: vle64.v v9, (a0) -; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, tu, mu +; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v9, 1 ; CHECK-V-NEXT: csrr a0, vlenb ; CHECK-V-NEXT: slli a0, a0, 1 @@ -3150,11 +3150,11 @@ ; CHECK-V-NEXT: sd a0, 8(sp) ; CHECK-V-NEXT: sd s0, 0(sp) ; CHECK-V-NEXT: addi a0, sp, 8 -; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-V-NEXT: vle64.v v9, (a0) ; CHECK-V-NEXT: mv a0, sp ; CHECK-V-NEXT: vle64.v v8, (a0) -; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, tu, mu +; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v9, 1 ; CHECK-V-NEXT: ld ra, 40(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 32(sp) # 8-byte Folded Reload @@ -3242,11 +3242,11 @@ ; CHECK-V-NEXT: sd s0, 8(sp) ; CHECK-V-NEXT: sd a0, 0(sp) ; CHECK-V-NEXT: addi a0, sp, 8 -; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-V-NEXT: vle64.v v9, (a0) ; CHECK-V-NEXT: mv a0, sp ; CHECK-V-NEXT: vle64.v v8, (a0) -; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, tu, mu +; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v9, 1 ; CHECK-V-NEXT: ld ra, 40(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 32(sp) # 8-byte Folded Reload @@ -3404,11 +3404,11 @@ ; CHECK-V-NEXT: sd s0, 8(sp) ; CHECK-V-NEXT: sd a0, 0(sp) ; CHECK-V-NEXT: addi a0, sp, 8 -; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-V-NEXT: vle64.v v9, (a0) ; CHECK-V-NEXT: mv a0, sp ; CHECK-V-NEXT: vle64.v v8, (a0) -; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, tu, mu +; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v9, 1 ; CHECK-V-NEXT: ld ra, 40(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 32(sp) # 8-byte Folded Reload @@ -3461,13 +3461,13 @@ ; ; CHECK-V-LABEL: stest_f64i32_mm: ; CHECK-V: # %bb.0: # %entry -; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-V-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-V-NEXT: lui a0, 524288 ; CHECK-V-NEXT: addiw a1, a0, -1 ; CHECK-V-NEXT: vmin.vx v8, v8, a1 ; CHECK-V-NEXT: vmax.vx v8, v8, a0 -; CHECK-V-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-V-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-V-NEXT: ret entry: @@ -3499,12 +3499,12 @@ ; ; CHECK-V-LABEL: utest_f64i32_mm: ; CHECK-V: # %bb.0: # %entry -; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-V-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-V-NEXT: li a0, -1 ; CHECK-V-NEXT: srli a0, a0, 32 ; CHECK-V-NEXT: vminu.vx v8, v8, a0 -; CHECK-V-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-V-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-V-NEXT: ret entry: @@ -3545,13 +3545,13 @@ ; ; CHECK-V-LABEL: ustest_f64i32_mm: ; CHECK-V: # %bb.0: # %entry -; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-V-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-V-NEXT: li a0, -1 ; CHECK-V-NEXT: srli a0, a0, 32 ; CHECK-V-NEXT: vmin.vx v8, v8, a0 ; CHECK-V-NEXT: vmax.vx v8, v8, zero -; CHECK-V-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-V-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-V-NEXT: ret entry: @@ -3621,14 +3621,14 @@ ; ; CHECK-V-LABEL: stest_f32i32_mm: ; CHECK-V: # %bb.0: # %entry -; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-V-NEXT: vfwcvt.rtz.x.f.v v10, v8 ; CHECK-V-NEXT: lui a0, 524288 ; CHECK-V-NEXT: addiw a1, a0, -1 -; CHECK-V-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-V-NEXT: vmin.vx v8, v10, a1 ; CHECK-V-NEXT: vmax.vx v10, v8, a0 -; CHECK-V-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-V-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-V-NEXT: ret entry: @@ -3678,13 +3678,13 @@ ; ; CHECK-V-LABEL: utest_f32i32_mm: ; CHECK-V: # %bb.0: # %entry -; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-V-NEXT: vfwcvt.rtz.xu.f.v v10, v8 ; CHECK-V-NEXT: li a0, -1 ; CHECK-V-NEXT: srli a0, a0, 32 -; CHECK-V-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-V-NEXT: vminu.vx v10, v10, a0 -; CHECK-V-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-V-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-V-NEXT: ret entry: @@ -3753,14 +3753,14 @@ ; ; CHECK-V-LABEL: ustest_f32i32_mm: ; CHECK-V: # %bb.0: # %entry -; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-V-NEXT: vfwcvt.rtz.x.f.v v10, v8 ; CHECK-V-NEXT: li a0, -1 ; CHECK-V-NEXT: srli a0, a0, 32 -; CHECK-V-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-V-NEXT: vmin.vx v8, v10, a0 ; CHECK-V-NEXT: vmax.vx v10, v8, zero -; CHECK-V-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-V-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-V-NEXT: ret entry: @@ -3903,28 +3903,28 @@ ; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz ; CHECK-V-NEXT: sd a0, 8(sp) ; CHECK-V-NEXT: addi a0, sp, 24 -; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-V-NEXT: vle64.v v8, (a0) ; CHECK-V-NEXT: mv a0, sp ; CHECK-V-NEXT: vle64.v v10, (a0) -; CHECK-V-NEXT: vsetivli zero, 2, e64, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 2, e64, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v10, v8, 1 ; CHECK-V-NEXT: addi a0, sp, 16 -; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-V-NEXT: vle64.v v8, (a0) -; CHECK-V-NEXT: vsetivli zero, 3, e64, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 3, e64, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v10, v8, 2 ; CHECK-V-NEXT: addi a0, sp, 8 -; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-V-NEXT: vle64.v v8, (a0) -; CHECK-V-NEXT: vsetivli zero, 4, e64, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 4, e64, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v10, v8, 3 ; CHECK-V-NEXT: lui a0, 524288 ; CHECK-V-NEXT: addiw a1, a0, -1 -; CHECK-V-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-V-NEXT: vmin.vx v8, v10, a1 ; CHECK-V-NEXT: vmax.vx v10, v8, a0 -; CHECK-V-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-V-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-V-NEXT: ld ra, 56(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 48(sp) # 8-byte Folded Reload @@ -4052,27 +4052,27 @@ ; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz ; CHECK-V-NEXT: sd a0, 8(sp) ; CHECK-V-NEXT: addi a0, sp, 24 -; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-V-NEXT: vle64.v v8, (a0) ; CHECK-V-NEXT: mv a0, sp ; CHECK-V-NEXT: vle64.v v10, (a0) -; CHECK-V-NEXT: vsetivli zero, 2, e64, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 2, e64, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v10, v8, 1 ; CHECK-V-NEXT: addi a0, sp, 16 -; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-V-NEXT: vle64.v v8, (a0) -; CHECK-V-NEXT: vsetivli zero, 3, e64, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 3, e64, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v10, v8, 2 ; CHECK-V-NEXT: addi a0, sp, 8 -; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-V-NEXT: vle64.v v8, (a0) -; CHECK-V-NEXT: vsetivli zero, 4, e64, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 4, e64, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v10, v8, 3 ; CHECK-V-NEXT: li a0, -1 ; CHECK-V-NEXT: srli a0, a0, 32 -; CHECK-V-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-V-NEXT: vminu.vx v10, v10, a0 -; CHECK-V-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-V-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-V-NEXT: ld ra, 56(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 48(sp) # 8-byte Folded Reload @@ -4219,28 +4219,28 @@ ; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz ; CHECK-V-NEXT: sd a0, 8(sp) ; CHECK-V-NEXT: addi a0, sp, 24 -; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-V-NEXT: vle64.v v8, (a0) ; CHECK-V-NEXT: mv a0, sp ; CHECK-V-NEXT: vle64.v v10, (a0) -; CHECK-V-NEXT: vsetivli zero, 2, e64, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 2, e64, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v10, v8, 1 ; CHECK-V-NEXT: addi a0, sp, 16 -; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-V-NEXT: vle64.v v8, (a0) -; CHECK-V-NEXT: vsetivli zero, 3, e64, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 3, e64, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v10, v8, 2 ; CHECK-V-NEXT: addi a0, sp, 8 -; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-V-NEXT: vle64.v v8, (a0) -; CHECK-V-NEXT: vsetivli zero, 4, e64, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 4, e64, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v10, v8, 3 ; CHECK-V-NEXT: li a0, -1 ; CHECK-V-NEXT: srli a0, a0, 32 -; CHECK-V-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-V-NEXT: vmin.vx v8, v10, a0 ; CHECK-V-NEXT: vmax.vx v10, v8, zero -; CHECK-V-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-V-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-V-NEXT: ld ra, 56(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 48(sp) # 8-byte Folded Reload @@ -4291,14 +4291,14 @@ ; ; CHECK-V-LABEL: stest_f64i16_mm: ; CHECK-V: # %bb.0: # %entry -; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-V-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-V-NEXT: lui a0, 8 ; CHECK-V-NEXT: addiw a0, a0, -1 ; CHECK-V-NEXT: vmin.vx v8, v9, a0 ; CHECK-V-NEXT: lui a0, 1048568 ; CHECK-V-NEXT: vmax.vx v8, v8, a0 -; CHECK-V-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-V-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-V-NEXT: ret entry: @@ -4330,12 +4330,12 @@ ; ; CHECK-V-LABEL: utest_f64i16_mm: ; CHECK-V: # %bb.0: # %entry -; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-V-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-V-NEXT: lui a0, 16 ; CHECK-V-NEXT: addiw a0, a0, -1 ; CHECK-V-NEXT: vminu.vx v8, v9, a0 -; CHECK-V-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-V-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-V-NEXT: ret entry: @@ -4376,13 +4376,13 @@ ; ; CHECK-V-LABEL: ustest_f64i16_mm: ; CHECK-V: # %bb.0: # %entry -; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-V-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-V-NEXT: lui a0, 16 ; CHECK-V-NEXT: addiw a0, a0, -1 ; CHECK-V-NEXT: vmin.vx v8, v9, a0 ; CHECK-V-NEXT: vmax.vx v8, v8, zero -; CHECK-V-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-V-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-V-NEXT: ret entry: @@ -4454,14 +4454,14 @@ ; ; CHECK-V-LABEL: stest_f32i16_mm: ; CHECK-V: # %bb.0: # %entry -; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-V-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-V-NEXT: lui a0, 8 ; CHECK-V-NEXT: addiw a0, a0, -1 ; CHECK-V-NEXT: vmin.vx v8, v8, a0 ; CHECK-V-NEXT: lui a0, 1048568 ; CHECK-V-NEXT: vmax.vx v8, v8, a0 -; CHECK-V-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-V-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-V-NEXT: ret entry: @@ -4511,12 +4511,12 @@ ; ; CHECK-V-LABEL: utest_f32i16_mm: ; CHECK-V: # %bb.0: # %entry -; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-V-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-V-NEXT: lui a0, 16 ; CHECK-V-NEXT: addiw a0, a0, -1 ; CHECK-V-NEXT: vminu.vx v8, v8, a0 -; CHECK-V-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-V-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-V-NEXT: ret entry: @@ -4585,13 +4585,13 @@ ; ; CHECK-V-LABEL: ustest_f32i16_mm: ; CHECK-V: # %bb.0: # %entry -; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-V-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-V-NEXT: lui a0, 16 ; CHECK-V-NEXT: addiw a0, a0, -1 ; CHECK-V-NEXT: vmin.vx v8, v8, a0 ; CHECK-V-NEXT: vmax.vx v8, v8, zero -; CHECK-V-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-V-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-V-NEXT: ret entry: @@ -4856,49 +4856,49 @@ ; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz ; CHECK-V-NEXT: sw a0, 4(sp) ; CHECK-V-NEXT: addi a0, sp, 28 -; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-V-NEXT: vle32.v v10, (a0) ; CHECK-V-NEXT: mv a0, sp ; CHECK-V-NEXT: vle32.v v8, (a0) -; CHECK-V-NEXT: vsetivli zero, 2, e32, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 2, e32, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v10, 1 ; CHECK-V-NEXT: addi a0, sp, 24 -; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-V-NEXT: vle32.v v10, (a0) -; CHECK-V-NEXT: vsetivli zero, 3, e32, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 3, e32, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v10, 2 ; CHECK-V-NEXT: addi a0, sp, 20 -; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-V-NEXT: vle32.v v10, (a0) -; CHECK-V-NEXT: vsetivli zero, 4, e32, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 4, e32, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v10, 3 ; CHECK-V-NEXT: addi a0, sp, 16 -; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-V-NEXT: vle32.v v10, (a0) -; CHECK-V-NEXT: vsetivli zero, 5, e32, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 5, e32, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v10, 4 ; CHECK-V-NEXT: addi a0, sp, 12 -; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-V-NEXT: vle32.v v10, (a0) -; CHECK-V-NEXT: vsetivli zero, 6, e32, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 6, e32, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v10, 5 ; CHECK-V-NEXT: addi a0, sp, 8 -; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-V-NEXT: vle32.v v10, (a0) -; CHECK-V-NEXT: vsetivli zero, 7, e32, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 7, e32, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v10, 6 ; CHECK-V-NEXT: addi a0, sp, 4 -; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-V-NEXT: vle32.v v10, (a0) -; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v10, 7 ; CHECK-V-NEXT: lui a0, 8 ; CHECK-V-NEXT: addiw a0, a0, -1 -; CHECK-V-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK-V-NEXT: vmin.vx v8, v8, a0 ; CHECK-V-NEXT: lui a0, 1048568 ; CHECK-V-NEXT: vmax.vx v10, v8, a0 -; CHECK-V-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-V-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-V-NEXT: ld ra, 88(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 80(sp) # 8-byte Folded Reload @@ -5128,47 +5128,47 @@ ; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz ; CHECK-V-NEXT: sw a0, 4(sp) ; CHECK-V-NEXT: addi a0, sp, 28 -; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-V-NEXT: vle32.v v10, (a0) ; CHECK-V-NEXT: mv a0, sp ; CHECK-V-NEXT: vle32.v v8, (a0) -; CHECK-V-NEXT: vsetivli zero, 2, e32, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 2, e32, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v10, 1 ; CHECK-V-NEXT: addi a0, sp, 24 -; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-V-NEXT: vle32.v v10, (a0) -; CHECK-V-NEXT: vsetivli zero, 3, e32, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 3, e32, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v10, 2 ; CHECK-V-NEXT: addi a0, sp, 20 -; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-V-NEXT: vle32.v v10, (a0) -; CHECK-V-NEXT: vsetivli zero, 4, e32, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 4, e32, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v10, 3 ; CHECK-V-NEXT: addi a0, sp, 16 -; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-V-NEXT: vle32.v v10, (a0) -; CHECK-V-NEXT: vsetivli zero, 5, e32, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 5, e32, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v10, 4 ; CHECK-V-NEXT: addi a0, sp, 12 -; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-V-NEXT: vle32.v v10, (a0) -; CHECK-V-NEXT: vsetivli zero, 6, e32, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 6, e32, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v10, 5 ; CHECK-V-NEXT: addi a0, sp, 8 -; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-V-NEXT: vle32.v v10, (a0) -; CHECK-V-NEXT: vsetivli zero, 7, e32, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 7, e32, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v10, 6 ; CHECK-V-NEXT: addi a0, sp, 4 -; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-V-NEXT: vle32.v v10, (a0) -; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v10, 7 ; CHECK-V-NEXT: lui a0, 16 ; CHECK-V-NEXT: addiw a0, a0, -1 -; CHECK-V-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK-V-NEXT: vminu.vx v10, v8, a0 -; CHECK-V-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-V-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-V-NEXT: ld ra, 88(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 80(sp) # 8-byte Folded Reload @@ -5439,48 +5439,48 @@ ; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz ; CHECK-V-NEXT: sw a0, 4(sp) ; CHECK-V-NEXT: addi a0, sp, 28 -; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-V-NEXT: vle32.v v10, (a0) ; CHECK-V-NEXT: mv a0, sp ; CHECK-V-NEXT: vle32.v v8, (a0) -; CHECK-V-NEXT: vsetivli zero, 2, e32, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 2, e32, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v10, 1 ; CHECK-V-NEXT: addi a0, sp, 24 -; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-V-NEXT: vle32.v v10, (a0) -; CHECK-V-NEXT: vsetivli zero, 3, e32, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 3, e32, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v10, 2 ; CHECK-V-NEXT: addi a0, sp, 20 -; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-V-NEXT: vle32.v v10, (a0) -; CHECK-V-NEXT: vsetivli zero, 4, e32, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 4, e32, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v10, 3 ; CHECK-V-NEXT: addi a0, sp, 16 -; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-V-NEXT: vle32.v v10, (a0) -; CHECK-V-NEXT: vsetivli zero, 5, e32, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 5, e32, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v10, 4 ; CHECK-V-NEXT: addi a0, sp, 12 -; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-V-NEXT: vle32.v v10, (a0) -; CHECK-V-NEXT: vsetivli zero, 6, e32, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 6, e32, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v10, 5 ; CHECK-V-NEXT: addi a0, sp, 8 -; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-V-NEXT: vle32.v v10, (a0) -; CHECK-V-NEXT: vsetivli zero, 7, e32, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 7, e32, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v10, 6 ; CHECK-V-NEXT: addi a0, sp, 4 -; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-V-NEXT: vle32.v v10, (a0) -; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, tu, mu +; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v10, 7 ; CHECK-V-NEXT: lui a0, 16 ; CHECK-V-NEXT: addiw a0, a0, -1 -; CHECK-V-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK-V-NEXT: vmin.vx v8, v8, a0 ; CHECK-V-NEXT: vmax.vx v10, v8, zero -; CHECK-V-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-V-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-V-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-V-NEXT: ld ra, 88(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 80(sp) # 8-byte Folded Reload @@ -5625,13 +5625,13 @@ ; CHECK-V-NEXT: sub sp, sp, a0 ; CHECK-V-NEXT: addi a0, sp, 48 ; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill -; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-V-NEXT: vslidedown.vi v9, v8, 1 ; CHECK-V-NEXT: vfmv.f.s fa0, v9 ; CHECK-V-NEXT: call __fixdfti@plt ; CHECK-V-NEXT: mv s0, a0 ; CHECK-V-NEXT: mv s1, a1 -; CHECK-V-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; CHECK-V-NEXT: addi a0, sp, 48 ; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-V-NEXT: vfmv.f.s fa0, v8 @@ -5678,11 +5678,11 @@ ; CHECK-V-NEXT: sd a0, 24(sp) ; CHECK-V-NEXT: sd s0, 32(sp) ; CHECK-V-NEXT: addi a0, sp, 24 -; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-V-NEXT: vle64.v v8, (a0) ; CHECK-V-NEXT: addi a0, sp, 32 ; CHECK-V-NEXT: vle64.v v9, (a0) -; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, tu, mu +; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v9, 1 ; CHECK-V-NEXT: csrr a0, vlenb ; CHECK-V-NEXT: slli a0, a0, 1 @@ -5806,13 +5806,13 @@ ; CHECK-V-NEXT: sub sp, sp, a0 ; CHECK-V-NEXT: addi a0, sp, 48 ; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill -; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-V-NEXT: vslidedown.vi v9, v8, 1 ; CHECK-V-NEXT: vfmv.f.s fa0, v9 ; CHECK-V-NEXT: call __fixunsdfti@plt ; CHECK-V-NEXT: mv s0, a0 ; CHECK-V-NEXT: mv s1, a1 -; CHECK-V-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; CHECK-V-NEXT: addi a0, sp, 48 ; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-V-NEXT: vfmv.f.s fa0, v8 @@ -5835,11 +5835,11 @@ ; CHECK-V-NEXT: sd a2, 24(sp) ; CHECK-V-NEXT: sd a3, 32(sp) ; CHECK-V-NEXT: addi a0, sp, 24 -; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-V-NEXT: vle64.v v8, (a0) ; CHECK-V-NEXT: addi a0, sp, 32 ; CHECK-V-NEXT: vle64.v v9, (a0) -; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, tu, mu +; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v9, 1 ; CHECK-V-NEXT: csrr a0, vlenb ; CHECK-V-NEXT: slli a0, a0, 1 @@ -5967,13 +5967,13 @@ ; CHECK-V-NEXT: sub sp, sp, a0 ; CHECK-V-NEXT: addi a0, sp, 48 ; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill -; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-V-NEXT: vslidedown.vi v9, v8, 1 ; CHECK-V-NEXT: vfmv.f.s fa0, v9 ; CHECK-V-NEXT: call __fixdfti@plt ; CHECK-V-NEXT: mv s1, a0 ; CHECK-V-NEXT: mv s0, a1 -; CHECK-V-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; CHECK-V-NEXT: addi a0, sp, 48 ; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-V-NEXT: vfmv.f.s fa0, v8 @@ -6009,11 +6009,11 @@ ; CHECK-V-NEXT: sd a4, 24(sp) ; CHECK-V-NEXT: sd a3, 32(sp) ; CHECK-V-NEXT: addi a0, sp, 24 -; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-V-NEXT: vle64.v v8, (a0) ; CHECK-V-NEXT: addi a0, sp, 32 ; CHECK-V-NEXT: vle64.v v9, (a0) -; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, tu, mu +; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v9, 1 ; CHECK-V-NEXT: csrr a0, vlenb ; CHECK-V-NEXT: slli a0, a0, 1 @@ -6186,13 +6186,13 @@ ; CHECK-V-NEXT: sub sp, sp, a0 ; CHECK-V-NEXT: addi a0, sp, 48 ; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill -; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-V-NEXT: vslidedown.vi v9, v8, 1 ; CHECK-V-NEXT: vfmv.f.s fa0, v9 ; CHECK-V-NEXT: call __fixsfti@plt ; CHECK-V-NEXT: mv s0, a0 ; CHECK-V-NEXT: mv s1, a1 -; CHECK-V-NEXT: vsetivli zero, 0, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 0, e32, mf2, ta, ma ; CHECK-V-NEXT: addi a0, sp, 48 ; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-V-NEXT: vfmv.f.s fa0, v8 @@ -6239,11 +6239,11 @@ ; CHECK-V-NEXT: sd a0, 24(sp) ; CHECK-V-NEXT: sd s0, 32(sp) ; CHECK-V-NEXT: addi a0, sp, 24 -; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-V-NEXT: vle64.v v8, (a0) ; CHECK-V-NEXT: addi a0, sp, 32 ; CHECK-V-NEXT: vle64.v v9, (a0) -; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, tu, mu +; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v9, 1 ; CHECK-V-NEXT: csrr a0, vlenb ; CHECK-V-NEXT: slli a0, a0, 1 @@ -6367,13 +6367,13 @@ ; CHECK-V-NEXT: sub sp, sp, a0 ; CHECK-V-NEXT: addi a0, sp, 48 ; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill -; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-V-NEXT: vslidedown.vi v9, v8, 1 ; CHECK-V-NEXT: vfmv.f.s fa0, v9 ; CHECK-V-NEXT: call __fixunssfti@plt ; CHECK-V-NEXT: mv s0, a0 ; CHECK-V-NEXT: mv s1, a1 -; CHECK-V-NEXT: vsetivli zero, 0, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 0, e32, mf2, ta, ma ; CHECK-V-NEXT: addi a0, sp, 48 ; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-V-NEXT: vfmv.f.s fa0, v8 @@ -6396,11 +6396,11 @@ ; CHECK-V-NEXT: sd a2, 24(sp) ; CHECK-V-NEXT: sd a3, 32(sp) ; CHECK-V-NEXT: addi a0, sp, 24 -; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-V-NEXT: vle64.v v8, (a0) ; CHECK-V-NEXT: addi a0, sp, 32 ; CHECK-V-NEXT: vle64.v v9, (a0) -; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, tu, mu +; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v9, 1 ; CHECK-V-NEXT: csrr a0, vlenb ; CHECK-V-NEXT: slli a0, a0, 1 @@ -6528,13 +6528,13 @@ ; CHECK-V-NEXT: sub sp, sp, a0 ; CHECK-V-NEXT: addi a0, sp, 48 ; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill -; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-V-NEXT: vslidedown.vi v9, v8, 1 ; CHECK-V-NEXT: vfmv.f.s fa0, v9 ; CHECK-V-NEXT: call __fixsfti@plt ; CHECK-V-NEXT: mv s1, a0 ; CHECK-V-NEXT: mv s0, a1 -; CHECK-V-NEXT: vsetivli zero, 0, e32, mf2, ta, mu +; CHECK-V-NEXT: vsetivli zero, 0, e32, mf2, ta, ma ; CHECK-V-NEXT: addi a0, sp, 48 ; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-V-NEXT: vfmv.f.s fa0, v8 @@ -6570,11 +6570,11 @@ ; CHECK-V-NEXT: sd a4, 24(sp) ; CHECK-V-NEXT: sd a3, 32(sp) ; CHECK-V-NEXT: addi a0, sp, 24 -; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-V-NEXT: vle64.v v8, (a0) ; CHECK-V-NEXT: addi a0, sp, 32 ; CHECK-V-NEXT: vle64.v v9, (a0) -; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, tu, mu +; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v9, 1 ; CHECK-V-NEXT: csrr a0, vlenb ; CHECK-V-NEXT: slli a0, a0, 1 @@ -6796,11 +6796,11 @@ ; CHECK-V-NEXT: sd a0, 8(sp) ; CHECK-V-NEXT: sd s0, 0(sp) ; CHECK-V-NEXT: addi a0, sp, 8 -; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-V-NEXT: vle64.v v9, (a0) ; CHECK-V-NEXT: mv a0, sp ; CHECK-V-NEXT: vle64.v v8, (a0) -; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, tu, mu +; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v9, 1 ; CHECK-V-NEXT: ld ra, 40(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 32(sp) # 8-byte Folded Reload @@ -6948,11 +6948,11 @@ ; CHECK-V-NEXT: sd a2, 8(sp) ; CHECK-V-NEXT: sd a3, 0(sp) ; CHECK-V-NEXT: addi a0, sp, 8 -; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-V-NEXT: vle64.v v9, (a0) ; CHECK-V-NEXT: mv a0, sp ; CHECK-V-NEXT: vle64.v v8, (a0) -; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, tu, mu +; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v9, 1 ; CHECK-V-NEXT: ld ra, 40(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 32(sp) # 8-byte Folded Reload @@ -7117,11 +7117,11 @@ ; CHECK-V-NEXT: sd a3, 8(sp) ; CHECK-V-NEXT: sd a0, 0(sp) ; CHECK-V-NEXT: addi a0, sp, 8 -; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-V-NEXT: vle64.v v9, (a0) ; CHECK-V-NEXT: mv a0, sp ; CHECK-V-NEXT: vle64.v v8, (a0) -; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, tu, mu +; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, tu, ma ; CHECK-V-NEXT: vslideup.vi v8, v9, 1 ; CHECK-V-NEXT: ld ra, 40(sp) # 8-byte Folded Reload ; CHECK-V-NEXT: ld s0, 32(sp) # 8-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll --- a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll +++ b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll @@ -24,7 +24,7 @@ ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: li a0, 55 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v8, (a0), v8 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 @@ -35,7 +35,7 @@ ; CHECK-NEXT: vs4r.v v8, (a0) # Unknown-size Folded Spill ; CHECK-NEXT: add a0, a0, a1 ; CHECK-NEXT: vs4r.v v12, (a0) # Unknown-size Folded Spill -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmclr.m v0 ; CHECK-NEXT: li s0, 36 ; CHECK-NEXT: vsetvli zero, s0, e16, m4, tu, mu @@ -49,7 +49,7 @@ ; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu ; CHECK-NEXT: vrgather.vv v4, v8, v8, v0.t -; CHECK-NEXT: vsetvli zero, s0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, s0, e16, m4, ta, ma ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: add a1, sp, a1 @@ -100,7 +100,7 @@ ; SUBREGLIVENESS-NEXT: slli a0, a0, 4 ; SUBREGLIVENESS-NEXT: sub sp, sp, a0 ; SUBREGLIVENESS-NEXT: li a0, 55 -; SUBREGLIVENESS-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; SUBREGLIVENESS-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; SUBREGLIVENESS-NEXT: vloxseg2ei32.v v8, (a0), v8 ; SUBREGLIVENESS-NEXT: csrr a0, vlenb ; SUBREGLIVENESS-NEXT: slli a0, a0, 3 @@ -111,7 +111,7 @@ ; SUBREGLIVENESS-NEXT: vs4r.v v8, (a0) # Unknown-size Folded Spill ; SUBREGLIVENESS-NEXT: add a0, a0, a1 ; SUBREGLIVENESS-NEXT: vs4r.v v12, (a0) # Unknown-size Folded Spill -; SUBREGLIVENESS-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; SUBREGLIVENESS-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; SUBREGLIVENESS-NEXT: vmclr.m v0 ; SUBREGLIVENESS-NEXT: li s0, 36 ; SUBREGLIVENESS-NEXT: vsetvli zero, s0, e16, m4, tu, mu @@ -122,7 +122,7 @@ ; SUBREGLIVENESS-NEXT: li a0, 32 ; SUBREGLIVENESS-NEXT: vsetvli zero, a0, e16, m4, tu, mu ; SUBREGLIVENESS-NEXT: vrgather.vv v16, v8, v8, v0.t -; SUBREGLIVENESS-NEXT: vsetvli zero, s0, e16, m4, ta, mu +; SUBREGLIVENESS-NEXT: vsetvli zero, s0, e16, m4, ta, ma ; SUBREGLIVENESS-NEXT: csrr a1, vlenb ; SUBREGLIVENESS-NEXT: slli a1, a1, 3 ; SUBREGLIVENESS-NEXT: add a1, sp, a1 diff --git a/llvm/test/CodeGen/RISCV/rvv/abs-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/abs-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/abs-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/abs-sdnode.ll @@ -7,7 +7,7 @@ define @vabs_nxv1i16( %v) { ; CHECK-LABEL: vabs_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vrsub.vi v9, v8, 0 ; CHECK-NEXT: vmax.vv v8, v8, v9 ; CHECK-NEXT: ret @@ -20,7 +20,7 @@ define @vabs_nxv2i16( %v) { ; CHECK-LABEL: vabs_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vrsub.vi v9, v8, 0 ; CHECK-NEXT: vmax.vv v8, v8, v9 ; CHECK-NEXT: ret @@ -33,7 +33,7 @@ define @vabs_nxv4i16( %v) { ; CHECK-LABEL: vabs_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vrsub.vi v9, v8, 0 ; CHECK-NEXT: vmax.vv v8, v8, v9 ; CHECK-NEXT: ret @@ -46,7 +46,7 @@ define @vabs_nxv8i16( %v) { ; CHECK-LABEL: vabs_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vrsub.vi v10, v8, 0 ; CHECK-NEXT: vmax.vv v8, v8, v10 ; CHECK-NEXT: ret @@ -59,7 +59,7 @@ define @vabs_nxv16i16( %v) { ; CHECK-LABEL: vabs_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vrsub.vi v12, v8, 0 ; CHECK-NEXT: vmax.vv v8, v8, v12 ; CHECK-NEXT: ret @@ -72,7 +72,7 @@ define @vabs_nxv32i16( %v) { ; CHECK-LABEL: vabs_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vrsub.vi v16, v8, 0 ; CHECK-NEXT: vmax.vv v8, v8, v16 ; CHECK-NEXT: ret @@ -85,7 +85,7 @@ define @vabs_nxv1i32( %v) { ; CHECK-LABEL: vabs_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vrsub.vi v9, v8, 0 ; CHECK-NEXT: vmax.vv v8, v8, v9 ; CHECK-NEXT: ret @@ -98,7 +98,7 @@ define @vabs_nxv2i32( %v) { ; CHECK-LABEL: vabs_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vrsub.vi v9, v8, 0 ; CHECK-NEXT: vmax.vv v8, v8, v9 ; CHECK-NEXT: ret @@ -111,7 +111,7 @@ define @vabs_nxv4i32( %v) { ; CHECK-LABEL: vabs_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vrsub.vi v10, v8, 0 ; CHECK-NEXT: vmax.vv v8, v8, v10 ; CHECK-NEXT: ret @@ -124,7 +124,7 @@ define @vabs_nxv8i32( %v) { ; CHECK-LABEL: vabs_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vrsub.vi v12, v8, 0 ; CHECK-NEXT: vmax.vv v8, v8, v12 ; CHECK-NEXT: ret @@ -137,7 +137,7 @@ define @vabs_nxv16i32( %v) { ; CHECK-LABEL: vabs_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vrsub.vi v16, v8, 0 ; CHECK-NEXT: vmax.vv v8, v8, v16 ; CHECK-NEXT: ret @@ -150,7 +150,7 @@ define @vabs_nxv1i64( %v) { ; CHECK-LABEL: vabs_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vrsub.vi v9, v8, 0 ; CHECK-NEXT: vmax.vv v8, v8, v9 ; CHECK-NEXT: ret @@ -163,7 +163,7 @@ define @vabs_nxv2i64( %v) { ; CHECK-LABEL: vabs_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vrsub.vi v10, v8, 0 ; CHECK-NEXT: vmax.vv v8, v8, v10 ; CHECK-NEXT: ret @@ -176,7 +176,7 @@ define @vabs_nxv4i64( %v) { ; CHECK-LABEL: vabs_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vrsub.vi v12, v8, 0 ; CHECK-NEXT: vmax.vv v8, v8, v12 ; CHECK-NEXT: ret @@ -189,7 +189,7 @@ define @vabs_nxv8i64( %v) { ; CHECK-LABEL: vabs_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vrsub.vi v16, v8, 0 ; CHECK-NEXT: vmax.vv v8, v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll b/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll --- a/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll @@ -41,7 +41,7 @@ ; RV64IV-NEXT: addi a0, sp, 528 ; RV64IV-NEXT: ld a1, 520(sp) ; RV64IV-NEXT: vl1re64.v v9, (a0) -; RV64IV-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64IV-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64IV-NEXT: vadd.vv v8, v8, v9 ; RV64IV-NEXT: csrr a0, vlenb ; RV64IV-NEXT: slli a0, a0, 1 diff --git a/llvm/test/CodeGen/RISCV/rvv/active_lane_mask.ll b/llvm/test/CodeGen/RISCV/rvv/active_lane_mask.ll --- a/llvm/test/CodeGen/RISCV/rvv/active_lane_mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/active_lane_mask.ll @@ -4,7 +4,7 @@ define @get_lane_mask(ptr %p, i64 %index, i64 %tc) { ; CHECK-LABEL: get_lane_mask: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vsaddu.vx v8, v8, a1 ; CHECK-NEXT: vmsltu.vx v0, v8, a2 @@ -16,7 +16,7 @@ define @constant_zero_index(ptr %p, i64 %tc) { ; CHECK-LABEL: constant_zero_index: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vmsltu.vx v0, v8, a1 ; CHECK-NEXT: ret @@ -27,7 +27,7 @@ define @constant_nonzero_index(ptr %p, i64 %tc) { ; CHECK-LABEL: constant_nonzero_index: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: li a0, 24 ; CHECK-NEXT: vsaddu.vx v8, v8, a0 @@ -40,7 +40,7 @@ define @constant_tripcount(ptr %p, i64 %index) { ; CHECK-LABEL: constant_tripcount: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vsaddu.vx v8, v8, a1 ; CHECK-NEXT: li a0, 1024 @@ -53,7 +53,7 @@ define @constant_both(ptr %p) { ; CHECK-LABEL: constant_both: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: li a0, 1024 ; CHECK-NEXT: vmsltu.vx v0, v8, a0 @@ -66,7 +66,7 @@ define @above_maxvl(ptr %p) { ; CHECK-LABEL: above_maxvl: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: lui a0, 1 ; CHECK-NEXT: addiw a0, a0, -2048 @@ -79,7 +79,7 @@ define <2 x i1> @fv2(ptr %p, i64 %index, i64 %tc) { ; CHECK-LABEL: fv2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vsaddu.vx v8, v8, a1 ; CHECK-NEXT: vmsltu.vx v0, v8, a2 @@ -91,7 +91,7 @@ define <8 x i1> @fv8(ptr %p, i64 %index, i64 %tc) { ; CHECK-LABEL: fv8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vsaddu.vx v8, v8, a1 ; CHECK-NEXT: vmsltu.vx v0, v8, a2 @@ -105,14 +105,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI8_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI8_0) -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vsaddu.vx v8, v8, a1 ; CHECK-NEXT: vmsltu.vx v16, v8, a2 ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vsaddu.vx v8, v8, a1 ; CHECK-NEXT: vmsltu.vx v0, v8, a2 -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, tu, ma ; CHECK-NEXT: vslideup.vi v0, v16, 2 ; CHECK-NEXT: ret %mask = call <32 x i1> @llvm.get.active.lane.mask.v32i1.i64(i64 %index, i64 %tc) @@ -124,30 +124,30 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI9_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI9_0) -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vsaddu.vx v8, v8, a1 ; CHECK-NEXT: vmsltu.vx v16, v8, a2 ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vsaddu.vx v8, v8, a1 ; CHECK-NEXT: vmsltu.vx v0, v8, a2 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v0, v16, 2 ; CHECK-NEXT: lui a0, %hi(.LCPI9_1) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI9_1) -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vsaddu.vx v8, v8, a1 ; CHECK-NEXT: vmsltu.vx v16, v8, a2 -; CHECK-NEXT: vsetivli zero, 6, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 6, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v0, v16, 4 ; CHECK-NEXT: lui a0, %hi(.LCPI9_2) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI9_2) -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vsaddu.vx v8, v8, a1 ; CHECK-NEXT: vmsltu.vx v16, v8, a2 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v0, v16, 6 ; CHECK-NEXT: ret %mask = call <64 x i1> @llvm.get.active.lane.mask.v64i1.i64(i64 %index, i64 %tc) @@ -159,62 +159,62 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI10_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI10_0) -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vsaddu.vx v8, v8, a1 ; CHECK-NEXT: vmsltu.vx v16, v8, a2 ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vsaddu.vx v8, v8, a1 ; CHECK-NEXT: vmsltu.vx v0, v8, a2 -; CHECK-NEXT: vsetivli zero, 4, e8, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, m1, tu, ma ; CHECK-NEXT: vslideup.vi v0, v16, 2 ; CHECK-NEXT: lui a0, %hi(.LCPI10_1) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI10_1) -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vsaddu.vx v8, v8, a1 ; CHECK-NEXT: vmsltu.vx v16, v8, a2 -; CHECK-NEXT: vsetivli zero, 6, e8, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 6, e8, m1, tu, ma ; CHECK-NEXT: vslideup.vi v0, v16, 4 ; CHECK-NEXT: lui a0, %hi(.LCPI10_2) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI10_2) -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vsaddu.vx v8, v8, a1 ; CHECK-NEXT: vmsltu.vx v16, v8, a2 -; CHECK-NEXT: vsetivli zero, 8, e8, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 8, e8, m1, tu, ma ; CHECK-NEXT: vslideup.vi v0, v16, 6 ; CHECK-NEXT: lui a0, %hi(.LCPI10_3) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI10_3) -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vsaddu.vx v8, v8, a1 ; CHECK-NEXT: vmsltu.vx v16, v8, a2 -; CHECK-NEXT: vsetivli zero, 10, e8, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 10, e8, m1, tu, ma ; CHECK-NEXT: vslideup.vi v0, v16, 8 ; CHECK-NEXT: lui a0, %hi(.LCPI10_4) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI10_4) -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vsaddu.vx v8, v8, a1 ; CHECK-NEXT: vmsltu.vx v16, v8, a2 -; CHECK-NEXT: vsetivli zero, 12, e8, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 12, e8, m1, tu, ma ; CHECK-NEXT: vslideup.vi v0, v16, 10 ; CHECK-NEXT: lui a0, %hi(.LCPI10_5) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI10_5) -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vsaddu.vx v8, v8, a1 ; CHECK-NEXT: vmsltu.vx v16, v8, a2 -; CHECK-NEXT: vsetivli zero, 14, e8, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 14, e8, m1, tu, ma ; CHECK-NEXT: vslideup.vi v0, v16, 12 ; CHECK-NEXT: lui a0, %hi(.LCPI10_6) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI10_6) -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vsaddu.vx v8, v8, a1 ; CHECK-NEXT: vmsltu.vx v16, v8, a2 -; CHECK-NEXT: vsetvli zero, zero, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, zero, e8, m1, tu, ma ; CHECK-NEXT: vslideup.vi v0, v16, 14 ; CHECK-NEXT: ret %mask = call <128 x i1> @llvm.get.active.lane.mask.v128i1.i64(i64 %index, i64 %tc) diff --git a/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir b/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir --- a/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir +++ b/llvm/test/CodeGen/RISCV/rvv/addi-scalable-offset.mir @@ -40,7 +40,7 @@ ; CHECK-NEXT: $x12 = frame-setup PseudoReadVLENB ; CHECK-NEXT: $x12 = frame-setup SLLI killed $x12, 1 ; CHECK-NEXT: $x2 = frame-setup SUB $x2, killed $x12 - ; CHECK-NEXT: dead $x0 = PseudoVSETVLI killed renamable $x11, 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETVLI killed renamable $x11, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: renamable $v8 = PseudoVLE64_V_M1 killed renamable $x10, $noreg, 6 /* e64 */, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8) ; CHECK-NEXT: $x10 = PseudoReadVLENB ; CHECK-NEXT: $x10 = SLLI killed $x10, 1 diff --git a/llvm/test/CodeGen/RISCV/rvv/allone-masked-to-unmasked.ll b/llvm/test/CodeGen/RISCV/rvv/allone-masked-to-unmasked.ll --- a/llvm/test/CodeGen/RISCV/rvv/allone-masked-to-unmasked.ll +++ b/llvm/test/CodeGen/RISCV/rvv/allone-masked-to-unmasked.ll @@ -15,7 +15,7 @@ define @test0( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: test0: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -35,7 +35,7 @@ define @test1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: test1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -55,7 +55,7 @@ define @test2( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: test2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vadd.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -75,7 +75,7 @@ define @test3( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: test3: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vadd.vv v8, v9, v10 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll @@ -5,7 +5,7 @@ define @bitreverse_nxv1i8( %va) { ; CHECK-LABEL: bitreverse_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vand.vi v9, v8, 15 ; CHECK-NEXT: vsll.vi v9, v9, 4 ; CHECK-NEXT: vsrl.vi v8, v8, 4 @@ -32,7 +32,7 @@ define @bitreverse_nxv2i8( %va) { ; CHECK-LABEL: bitreverse_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vand.vi v9, v8, 15 ; CHECK-NEXT: vsll.vi v9, v9, 4 ; CHECK-NEXT: vsrl.vi v8, v8, 4 @@ -59,7 +59,7 @@ define @bitreverse_nxv4i8( %va) { ; CHECK-LABEL: bitreverse_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vand.vi v9, v8, 15 ; CHECK-NEXT: vsll.vi v9, v9, 4 ; CHECK-NEXT: vsrl.vi v8, v8, 4 @@ -86,7 +86,7 @@ define @bitreverse_nxv8i8( %va) { ; CHECK-LABEL: bitreverse_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vand.vi v9, v8, 15 ; CHECK-NEXT: vsll.vi v9, v9, 4 ; CHECK-NEXT: vsrl.vi v8, v8, 4 @@ -113,7 +113,7 @@ define @bitreverse_nxv16i8( %va) { ; CHECK-LABEL: bitreverse_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vand.vi v10, v8, 15 ; CHECK-NEXT: vsll.vi v10, v10, 4 ; CHECK-NEXT: vsrl.vi v8, v8, 4 @@ -140,7 +140,7 @@ define @bitreverse_nxv32i8( %va) { ; CHECK-LABEL: bitreverse_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vand.vi v12, v8, 15 ; CHECK-NEXT: vsll.vi v12, v12, 4 ; CHECK-NEXT: vsrl.vi v8, v8, 4 @@ -167,7 +167,7 @@ define @bitreverse_nxv64i8( %va) { ; CHECK-LABEL: bitreverse_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vand.vi v16, v8, 15 ; CHECK-NEXT: vsll.vi v16, v16, 4 ; CHECK-NEXT: vsrl.vi v8, v8, 4 @@ -194,7 +194,7 @@ define @bitreverse_nxv1i16( %va) { ; RV32-LABEL: bitreverse_nxv1i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; RV32-NEXT: vsrl.vi v9, v8, 8 ; RV32-NEXT: vsll.vi v8, v8, 8 ; RV32-NEXT: vor.vv v8, v8, v9 @@ -223,7 +223,7 @@ ; ; RV64-LABEL: bitreverse_nxv1i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; RV64-NEXT: vsrl.vi v9, v8, 8 ; RV64-NEXT: vsll.vi v8, v8, 8 ; RV64-NEXT: vor.vv v8, v8, v9 @@ -257,7 +257,7 @@ define @bitreverse_nxv2i16( %va) { ; RV32-LABEL: bitreverse_nxv2i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; RV32-NEXT: vsrl.vi v9, v8, 8 ; RV32-NEXT: vsll.vi v8, v8, 8 ; RV32-NEXT: vor.vv v8, v8, v9 @@ -286,7 +286,7 @@ ; ; RV64-LABEL: bitreverse_nxv2i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; RV64-NEXT: vsrl.vi v9, v8, 8 ; RV64-NEXT: vsll.vi v8, v8, 8 ; RV64-NEXT: vor.vv v8, v8, v9 @@ -320,7 +320,7 @@ define @bitreverse_nxv4i16( %va) { ; RV32-LABEL: bitreverse_nxv4i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; RV32-NEXT: vsrl.vi v9, v8, 8 ; RV32-NEXT: vsll.vi v8, v8, 8 ; RV32-NEXT: vor.vv v8, v8, v9 @@ -349,7 +349,7 @@ ; ; RV64-LABEL: bitreverse_nxv4i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; RV64-NEXT: vsrl.vi v9, v8, 8 ; RV64-NEXT: vsll.vi v8, v8, 8 ; RV64-NEXT: vor.vv v8, v8, v9 @@ -383,7 +383,7 @@ define @bitreverse_nxv8i16( %va) { ; RV32-LABEL: bitreverse_nxv8i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; RV32-NEXT: vsrl.vi v10, v8, 8 ; RV32-NEXT: vsll.vi v8, v8, 8 ; RV32-NEXT: vor.vv v8, v8, v10 @@ -412,7 +412,7 @@ ; ; RV64-LABEL: bitreverse_nxv8i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; RV64-NEXT: vsrl.vi v10, v8, 8 ; RV64-NEXT: vsll.vi v8, v8, 8 ; RV64-NEXT: vor.vv v8, v8, v10 @@ -446,7 +446,7 @@ define @bitreverse_nxv16i16( %va) { ; RV32-LABEL: bitreverse_nxv16i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; RV32-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; RV32-NEXT: vsrl.vi v12, v8, 8 ; RV32-NEXT: vsll.vi v8, v8, 8 ; RV32-NEXT: vor.vv v8, v8, v12 @@ -475,7 +475,7 @@ ; ; RV64-LABEL: bitreverse_nxv16i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; RV64-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; RV64-NEXT: vsrl.vi v12, v8, 8 ; RV64-NEXT: vsll.vi v8, v8, 8 ; RV64-NEXT: vor.vv v8, v8, v12 @@ -509,7 +509,7 @@ define @bitreverse_nxv32i16( %va) { ; RV32-LABEL: bitreverse_nxv32i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; RV32-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; RV32-NEXT: vsrl.vi v16, v8, 8 ; RV32-NEXT: vsll.vi v8, v8, 8 ; RV32-NEXT: vor.vv v8, v8, v16 @@ -538,7 +538,7 @@ ; ; RV64-LABEL: bitreverse_nxv32i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; RV64-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; RV64-NEXT: vsrl.vi v16, v8, 8 ; RV64-NEXT: vsll.vi v8, v8, 8 ; RV64-NEXT: vor.vv v8, v8, v16 @@ -572,7 +572,7 @@ define @bitreverse_nxv1i32( %va) { ; RV32-LABEL: bitreverse_nxv1i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; RV32-NEXT: vsrl.vi v9, v8, 8 ; RV32-NEXT: lui a0, 16 ; RV32-NEXT: addi a0, a0, -256 @@ -610,7 +610,7 @@ ; ; RV64-LABEL: bitreverse_nxv1i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; RV64-NEXT: vsrl.vi v9, v8, 8 ; RV64-NEXT: lui a0, 16 ; RV64-NEXT: addiw a0, a0, -256 @@ -653,7 +653,7 @@ define @bitreverse_nxv2i32( %va) { ; RV32-LABEL: bitreverse_nxv2i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV32-NEXT: vsrl.vi v9, v8, 8 ; RV32-NEXT: lui a0, 16 ; RV32-NEXT: addi a0, a0, -256 @@ -691,7 +691,7 @@ ; ; RV64-LABEL: bitreverse_nxv2i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV64-NEXT: vsrl.vi v9, v8, 8 ; RV64-NEXT: lui a0, 16 ; RV64-NEXT: addiw a0, a0, -256 @@ -734,7 +734,7 @@ define @bitreverse_nxv4i32( %va) { ; RV32-LABEL: bitreverse_nxv4i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; RV32-NEXT: vsrl.vi v10, v8, 8 ; RV32-NEXT: lui a0, 16 ; RV32-NEXT: addi a0, a0, -256 @@ -772,7 +772,7 @@ ; ; RV64-LABEL: bitreverse_nxv4i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; RV64-NEXT: vsrl.vi v10, v8, 8 ; RV64-NEXT: lui a0, 16 ; RV64-NEXT: addiw a0, a0, -256 @@ -815,7 +815,7 @@ define @bitreverse_nxv8i32( %va) { ; RV32-LABEL: bitreverse_nxv8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; RV32-NEXT: vsrl.vi v12, v8, 8 ; RV32-NEXT: lui a0, 16 ; RV32-NEXT: addi a0, a0, -256 @@ -853,7 +853,7 @@ ; ; RV64-LABEL: bitreverse_nxv8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; RV64-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; RV64-NEXT: vsrl.vi v12, v8, 8 ; RV64-NEXT: lui a0, 16 ; RV64-NEXT: addiw a0, a0, -256 @@ -896,7 +896,7 @@ define @bitreverse_nxv16i32( %va) { ; RV32-LABEL: bitreverse_nxv16i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; RV32-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; RV32-NEXT: vsrl.vi v16, v8, 8 ; RV32-NEXT: lui a0, 16 ; RV32-NEXT: addi a0, a0, -256 @@ -934,7 +934,7 @@ ; ; RV64-LABEL: bitreverse_nxv16i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; RV64-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; RV64-NEXT: vsrl.vi v16, v8, 8 ; RV64-NEXT: lui a0, 16 ; RV64-NEXT: addiw a0, a0, -256 @@ -1003,7 +1003,7 @@ ; RV32-NEXT: sw a2, 12(sp) ; RV32-NEXT: sw a2, 8(sp) ; RV32-NEXT: li a2, 56 -; RV32-NEXT: vsetvli a3, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a3, zero, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v9, v8, a2 ; RV32-NEXT: li a3, 40 ; RV32-NEXT: vsrl.vx v10, v8, a3 @@ -1055,7 +1055,7 @@ ; RV64-LABEL: bitreverse_nxv1i64: ; RV64: # %bb.0: ; RV64-NEXT: li a0, 56 -; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV64-NEXT: vsrl.vx v9, v8, a0 ; RV64-NEXT: li a1, 40 ; RV64-NEXT: vsrl.vx v10, v8, a1 @@ -1142,7 +1142,7 @@ ; RV32-NEXT: sw a2, 12(sp) ; RV32-NEXT: sw a2, 8(sp) ; RV32-NEXT: li a2, 56 -; RV32-NEXT: vsetvli a3, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a3, zero, e64, m2, ta, ma ; RV32-NEXT: vsrl.vx v10, v8, a2 ; RV32-NEXT: li a3, 40 ; RV32-NEXT: vsrl.vx v12, v8, a3 @@ -1194,7 +1194,7 @@ ; RV64-LABEL: bitreverse_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: li a0, 56 -; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV64-NEXT: vsrl.vx v10, v8, a0 ; RV64-NEXT: li a1, 40 ; RV64-NEXT: vsrl.vx v12, v8, a1 @@ -1281,7 +1281,7 @@ ; RV32-NEXT: sw a2, 12(sp) ; RV32-NEXT: sw a2, 8(sp) ; RV32-NEXT: li a2, 56 -; RV32-NEXT: vsetvli a3, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a3, zero, e64, m4, ta, ma ; RV32-NEXT: vsrl.vx v12, v8, a2 ; RV32-NEXT: li a3, 40 ; RV32-NEXT: vsrl.vx v16, v8, a3 @@ -1333,7 +1333,7 @@ ; RV64-LABEL: bitreverse_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: li a0, 56 -; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV64-NEXT: vsrl.vx v12, v8, a0 ; RV64-NEXT: li a1, 40 ; RV64-NEXT: vsrl.vx v16, v8, a1 @@ -1423,7 +1423,7 @@ ; RV32-NEXT: sw a2, 12(sp) ; RV32-NEXT: sw a2, 8(sp) ; RV32-NEXT: li a2, 56 -; RV32-NEXT: vsetvli a3, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a3, zero, e64, m8, ta, ma ; RV32-NEXT: li a3, 40 ; RV32-NEXT: vsrl.vx v16, v8, a3 ; RV32-NEXT: vand.vx v16, v16, a1 @@ -1502,7 +1502,7 @@ ; RV64-LABEL: bitreverse_nxv8i64: ; RV64: # %bb.0: ; RV64-NEXT: li a0, 56 -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsrl.vx v16, v8, a0 ; RV64-NEXT: li a1, 40 ; RV64-NEXT: vsrl.vx v24, v8, a1 diff --git a/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll @@ -5,7 +5,7 @@ define @bswap_nxv1i16( %va) { ; CHECK-LABEL: bswap_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vsrl.vi v9, v8, 8 ; CHECK-NEXT: vsll.vi v8, v8, 8 ; CHECK-NEXT: vor.vv v8, v8, v9 @@ -18,7 +18,7 @@ define @bswap_nxv2i16( %va) { ; CHECK-LABEL: bswap_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vsrl.vi v9, v8, 8 ; CHECK-NEXT: vsll.vi v8, v8, 8 ; CHECK-NEXT: vor.vv v8, v8, v9 @@ -31,7 +31,7 @@ define @bswap_nxv4i16( %va) { ; CHECK-LABEL: bswap_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vsrl.vi v9, v8, 8 ; CHECK-NEXT: vsll.vi v8, v8, 8 ; CHECK-NEXT: vor.vv v8, v8, v9 @@ -44,7 +44,7 @@ define @bswap_nxv8i16( %va) { ; CHECK-LABEL: bswap_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vsrl.vi v10, v8, 8 ; CHECK-NEXT: vsll.vi v8, v8, 8 ; CHECK-NEXT: vor.vv v8, v8, v10 @@ -57,7 +57,7 @@ define @bswap_nxv16i16( %va) { ; CHECK-LABEL: bswap_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vsrl.vi v12, v8, 8 ; CHECK-NEXT: vsll.vi v8, v8, 8 ; CHECK-NEXT: vor.vv v8, v8, v12 @@ -70,7 +70,7 @@ define @bswap_nxv32i16( %va) { ; CHECK-LABEL: bswap_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vsrl.vi v16, v8, 8 ; CHECK-NEXT: vsll.vi v8, v8, 8 ; CHECK-NEXT: vor.vv v8, v8, v16 @@ -83,7 +83,7 @@ define @bswap_nxv1i32( %va) { ; RV32-LABEL: bswap_nxv1i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; RV32-NEXT: vsrl.vi v9, v8, 8 ; RV32-NEXT: lui a0, 16 ; RV32-NEXT: addi a0, a0, -256 @@ -100,7 +100,7 @@ ; ; RV64-LABEL: bswap_nxv1i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; RV64-NEXT: vsrl.vi v9, v8, 8 ; RV64-NEXT: lui a0, 16 ; RV64-NEXT: addiw a0, a0, -256 @@ -122,7 +122,7 @@ define @bswap_nxv2i32( %va) { ; RV32-LABEL: bswap_nxv2i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV32-NEXT: vsrl.vi v9, v8, 8 ; RV32-NEXT: lui a0, 16 ; RV32-NEXT: addi a0, a0, -256 @@ -139,7 +139,7 @@ ; ; RV64-LABEL: bswap_nxv2i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV64-NEXT: vsrl.vi v9, v8, 8 ; RV64-NEXT: lui a0, 16 ; RV64-NEXT: addiw a0, a0, -256 @@ -161,7 +161,7 @@ define @bswap_nxv4i32( %va) { ; RV32-LABEL: bswap_nxv4i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; RV32-NEXT: vsrl.vi v10, v8, 8 ; RV32-NEXT: lui a0, 16 ; RV32-NEXT: addi a0, a0, -256 @@ -178,7 +178,7 @@ ; ; RV64-LABEL: bswap_nxv4i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; RV64-NEXT: vsrl.vi v10, v8, 8 ; RV64-NEXT: lui a0, 16 ; RV64-NEXT: addiw a0, a0, -256 @@ -200,7 +200,7 @@ define @bswap_nxv8i32( %va) { ; RV32-LABEL: bswap_nxv8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; RV32-NEXT: vsrl.vi v12, v8, 8 ; RV32-NEXT: lui a0, 16 ; RV32-NEXT: addi a0, a0, -256 @@ -217,7 +217,7 @@ ; ; RV64-LABEL: bswap_nxv8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; RV64-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; RV64-NEXT: vsrl.vi v12, v8, 8 ; RV64-NEXT: lui a0, 16 ; RV64-NEXT: addiw a0, a0, -256 @@ -239,7 +239,7 @@ define @bswap_nxv16i32( %va) { ; RV32-LABEL: bswap_nxv16i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; RV32-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; RV32-NEXT: vsrl.vi v16, v8, 8 ; RV32-NEXT: lui a0, 16 ; RV32-NEXT: addi a0, a0, -256 @@ -256,7 +256,7 @@ ; ; RV64-LABEL: bswap_nxv16i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; RV64-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; RV64-NEXT: vsrl.vi v16, v8, 8 ; RV64-NEXT: lui a0, 16 ; RV64-NEXT: addiw a0, a0, -256 @@ -292,7 +292,7 @@ ; RV32-NEXT: addi a1, a1, -256 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: li a2, 56 -; RV32-NEXT: vsetvli a3, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a3, zero, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v9, v8, a2 ; RV32-NEXT: li a3, 40 ; RV32-NEXT: vsrl.vx v10, v8, a3 @@ -326,7 +326,7 @@ ; RV64-LABEL: bswap_nxv1i64: ; RV64: # %bb.0: ; RV64-NEXT: li a0, 56 -; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV64-NEXT: vsrl.vx v9, v8, a0 ; RV64-NEXT: li a1, 40 ; RV64-NEXT: vsrl.vx v10, v8, a1 @@ -380,7 +380,7 @@ ; RV32-NEXT: addi a1, a1, -256 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: li a2, 56 -; RV32-NEXT: vsetvli a3, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a3, zero, e64, m2, ta, ma ; RV32-NEXT: vsrl.vx v10, v8, a2 ; RV32-NEXT: li a3, 40 ; RV32-NEXT: vsrl.vx v12, v8, a3 @@ -414,7 +414,7 @@ ; RV64-LABEL: bswap_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: li a0, 56 -; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV64-NEXT: vsrl.vx v10, v8, a0 ; RV64-NEXT: li a1, 40 ; RV64-NEXT: vsrl.vx v12, v8, a1 @@ -468,7 +468,7 @@ ; RV32-NEXT: addi a1, a1, -256 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: li a2, 56 -; RV32-NEXT: vsetvli a3, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a3, zero, e64, m4, ta, ma ; RV32-NEXT: vsrl.vx v12, v8, a2 ; RV32-NEXT: li a3, 40 ; RV32-NEXT: vsrl.vx v16, v8, a3 @@ -502,7 +502,7 @@ ; RV64-LABEL: bswap_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: li a0, 56 -; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV64-NEXT: vsrl.vx v12, v8, a0 ; RV64-NEXT: li a1, 40 ; RV64-NEXT: vsrl.vx v16, v8, a1 @@ -559,7 +559,7 @@ ; RV32-NEXT: addi a1, a1, -256 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: li a2, 56 -; RV32-NEXT: vsetvli a3, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a3, zero, e64, m8, ta, ma ; RV32-NEXT: li a3, 40 ; RV32-NEXT: vsrl.vx v16, v8, a3 ; RV32-NEXT: vand.vx v16, v16, a1 @@ -620,7 +620,7 @@ ; RV64-LABEL: bswap_nxv8i64: ; RV64: # %bb.0: ; RV64-NEXT: li a0, 56 -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsrl.vx v16, v8, a0 ; RV64-NEXT: li a1, 40 ; RV64-NEXT: vsrl.vx v24, v8, a1 diff --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll --- a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll @@ -7,7 +7,7 @@ define fastcc @ret_nxv4i8(* %p) { ; CHECK-LABEL: ret_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: ret %v = load , * %p @@ -48,7 +48,7 @@ define fastcc @ret_mask_nxv8i1(* %p) { ; CHECK-LABEL: ret_mask_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: ret %v = load , * %p @@ -58,7 +58,7 @@ define fastcc @ret_mask_nxv32i1(* %p) { ; CHECK-LABEL: ret_mask_nxv32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: ret %v = load , * %p @@ -192,7 +192,7 @@ define fastcc @ret_nxv4i8_param_nxv4i8_nxv4i8( %v, %w) { ; CHECK-LABEL: ret_nxv4i8_param_nxv4i8_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: ret %r = add %v, %w @@ -202,7 +202,7 @@ define fastcc @ret_nxv4i64_param_nxv4i64_nxv4i64( %v, %w) { ; CHECK-LABEL: ret_nxv4i64_param_nxv4i64_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v12 ; CHECK-NEXT: ret %r = add %v, %w @@ -212,7 +212,7 @@ define fastcc @ret_nxv8i1_param_nxv8i1_nxv8i1( %v, %w) { ; CHECK-LABEL: ret_nxv8i1_param_nxv8i1_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %r = xor %v, %w @@ -222,7 +222,7 @@ define fastcc @ret_nxv32i1_param_nxv32i1_nxv32i1( %v, %w) { ; CHECK-LABEL: ret_nxv32i1_param_nxv32i1_nxv32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vmand.mm v0, v0, v8 ; CHECK-NEXT: ret %r = and %v, %w @@ -253,7 +253,7 @@ ; CHECK-NEXT: vl8re32.v v0, (a0) ; CHECK-NEXT: vl8re32.v v8, (a1) ; CHECK-NEXT: vl8re32.v v16, (a2) -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vadd.vv v0, v24, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 @@ -494,7 +494,7 @@ ; CHECK-NEXT: add a0, t4, a0 ; CHECK-NEXT: vl8re32.v v24, (t4) ; CHECK-NEXT: vl8re32.v v0, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v24 ; CHECK-NEXT: vadd.vv v16, v16, v0 ; CHECK-NEXT: ret @@ -518,7 +518,7 @@ ; RV32-NEXT: andi sp, sp, -128 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 3 -; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; RV32-NEXT: vmv.v.i v8, 0 ; RV32-NEXT: addi a1, sp, 128 ; RV32-NEXT: vs8r.v v8, (a1) @@ -566,7 +566,7 @@ ; RV64-NEXT: andi sp, sp, -128 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 3 -; RV64-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; RV64-NEXT: vmv.v.i v8, 0 ; RV64-NEXT: addi a1, sp, 128 ; RV64-NEXT: vs8r.v v8, (a1) diff --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll --- a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll @@ -11,7 +11,7 @@ ; CHECK-NEXT: add a1, a0, a1 ; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vl8re32.v v0, (a1) -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v24 ; CHECK-NEXT: vadd.vv v16, v16, v0 ; CHECK-NEXT: ret @@ -39,7 +39,7 @@ ; RV32-NEXT: slli a1, a1, 3 ; RV32-NEXT: add a0, a0, a1 ; RV32-NEXT: vs8r.v v16, (a0) -; RV32-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; RV32-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; RV32-NEXT: vmv.v.i v8, 0 ; RV32-NEXT: addi a0, sp, 128 ; RV32-NEXT: vmv.v.i v16, 0 @@ -67,7 +67,7 @@ ; RV64-NEXT: slli a1, a1, 3 ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: vs8r.v v16, (a0) -; RV64-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; RV64-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; RV64-NEXT: vmv.v.i v8, 0 ; RV64-NEXT: addi a0, sp, 128 ; RV64-NEXT: vmv.v.i v16, 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll @@ -674,7 +674,7 @@ ; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a4, a1, 3 -; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma ; CHECK-NEXT: sub a3, a0, a1 ; CHECK-NEXT: vslidedown.vx v2, v0, a4 ; CHECK-NEXT: bltu a0, a3, .LBB32_2 diff --git a/llvm/test/CodeGen/RISCV/rvv/cmp-folds.ll b/llvm/test/CodeGen/RISCV/rvv/cmp-folds.ll --- a/llvm/test/CodeGen/RISCV/rvv/cmp-folds.ll +++ b/llvm/test/CodeGen/RISCV/rvv/cmp-folds.ll @@ -5,7 +5,7 @@ define @not_icmp_sle_nxv8i16( %a, %b) { ; CHECK-LABEL: not_icmp_sle_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmslt.vv v0, v10, v8 ; CHECK-NEXT: ret %icmp = icmp sle %a, %b @@ -18,7 +18,7 @@ define @not_icmp_sgt_nxv4i32( %a, %b) { ; CHECK-LABEL: not_icmp_sgt_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vmsle.vv v0, v8, v10 ; CHECK-NEXT: ret %icmp = icmp sgt %a, %b @@ -31,7 +31,7 @@ define @not_fcmp_une_nxv2f64( %a, %b) { ; CHECK-LABEL: not_fcmp_une_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vmfeq.vv v0, v8, v10 ; CHECK-NEXT: ret %icmp = fcmp une %a, %b @@ -44,7 +44,7 @@ define @not_fcmp_uge_nxv4f32( %a, %b) { ; CHECK-LABEL: not_fcmp_uge_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vmflt.vv v0, v8, v10 ; CHECK-NEXT: ret %icmp = fcmp uge %a, %b diff --git a/llvm/test/CodeGen/RISCV/rvv/combine-sats.ll b/llvm/test/CodeGen/RISCV/rvv/combine-sats.ll --- a/llvm/test/CodeGen/RISCV/rvv/combine-sats.ll +++ b/llvm/test/CodeGen/RISCV/rvv/combine-sats.ll @@ -10,7 +10,7 @@ ; CHECK-LABEL: add_umax_v2i64: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 7 -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %v1 = call <2 x i64> @llvm.umax.v2i64(<2 x i64> %a0, <2 x i64> ) @@ -22,7 +22,7 @@ ; CHECK-LABEL: add_umax_nxv2i64: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 7 -; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %ins1 = insertelement poison, i64 7, i32 0 @@ -40,7 +40,7 @@ define <2 x i64> @sub_umax_v2i64(<2 x i64> %a0, <2 x i64> %a1) { ; CHECK-LABEL: sub_umax_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret %v1 = call <2 x i64> @llvm.umax.v2i64(<2 x i64> %a0, <2 x i64> %a1) @@ -51,7 +51,7 @@ define @sub_umax_nxv2i64( %a0, %a1) { ; CHECK-LABEL: sub_umax_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v10 ; CHECK-NEXT: ret %v1 = call @llvm.umax.nxv2i64( %a0, %a1) @@ -62,7 +62,7 @@ define <2 x i64> @sub_umin_v2i64(<2 x i64> %a0, <2 x i64> %a1) { ; CHECK-LABEL: sub_umin_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret %v1 = call <2 x i64> @llvm.umin.v2i64(<2 x i64> %a0, <2 x i64> %a1) @@ -73,7 +73,7 @@ define @sub_umin_nxv2i64( %a0, %a1) { ; CHECK-LABEL: sub_umin_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v10 ; CHECK-NEXT: ret %v1 = call @llvm.umin.nxv2i64( %a0, %a1) @@ -88,7 +88,7 @@ define <2 x i64> @vselect_sub_v2i64(<2 x i64> %a0, <2 x i64> %a1) { ; CHECK-LABEL: vselect_sub_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret %cmp = icmp uge <2 x i64> %a0, %a1 @@ -100,7 +100,7 @@ define @vselect_sub_nxv2i64( %a0, %a1) { ; CHECK-LABEL: vselect_sub_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v10 ; CHECK-NEXT: ret %cmp = icmp uge %a0, %a1 @@ -112,7 +112,7 @@ define <8 x i16> @vselect_sub_2_v8i16(<8 x i16> %x, i16 zeroext %w) nounwind { ; CHECK-LABEL: vselect_sub_2_v8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -127,7 +127,7 @@ define @vselect_sub_2_nxv8i16( %x, i16 zeroext %w) nounwind { ; CHECK-LABEL: vselect_sub_2_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -146,7 +146,7 @@ ; CHECK-LABEL: vselect_add_const_v2i64: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 6 -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %v1 = add <2 x i64> %a0, @@ -159,7 +159,7 @@ ; CHECK-LABEL: vselect_add_const_nxv2i64: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 6 -; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %cm1 = insertelement poison, i64 -6, i32 0 @@ -177,7 +177,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 8 ; RV32-NEXT: addi a0, a0, -1 -; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; RV32-NEXT: vssubu.vx v8, v8, a0 ; RV32-NEXT: ret ; @@ -185,7 +185,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 8 ; RV64-NEXT: addiw a0, a0, -1 -; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; RV64-NEXT: vssubu.vx v8, v8, a0 ; RV64-NEXT: ret %cmp = icmp ugt <2 x i16> %a0, @@ -199,7 +199,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 8 ; RV32-NEXT: addi a0, a0, -1 -; RV32-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; RV32-NEXT: vssubu.vx v8, v8, a0 ; RV32-NEXT: ret ; @@ -207,7 +207,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 8 ; RV64-NEXT: addiw a0, a0, -1 -; RV64-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; RV64-NEXT: vssubu.vx v8, v8, a0 ; RV64-NEXT: ret %cm1 = insertelement poison, i16 32766, i32 0 @@ -226,7 +226,7 @@ ; CHECK-LABEL: vselect_xor_const_signbit_v2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, 8 -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %cmp = icmp slt <2 x i16> %a0, zeroinitializer @@ -239,7 +239,7 @@ ; CHECK-LABEL: vselect_xor_const_signbit_nxv2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, 8 -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %cmp = icmp slt %a0, zeroinitializer @@ -258,7 +258,7 @@ define <2 x i64> @vselect_add_v2i64(<2 x i64> %a0, <2 x i64> %a1) { ; CHECK-LABEL: vselect_add_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret %v1 = add <2 x i64> %a0, %a1 @@ -270,7 +270,7 @@ define @vselect_add_nxv2i64( %a0, %a1) { ; CHECK-LABEL: vselect_add_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v10 ; CHECK-NEXT: ret %v1 = add %a0, %a1 @@ -287,7 +287,7 @@ define <2 x i64> @vselect_add_const_2_v2i64(<2 x i64> %a0) { ; CHECK-LABEL: vselect_add_const_2_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 6 ; CHECK-NEXT: ret %v1 = add <2 x i64> %a0, @@ -299,7 +299,7 @@ define @vselect_add_const_2_nxv2i64( %a0) { ; CHECK-LABEL: vselect_add_const_2_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 6 ; CHECK-NEXT: ret %cm1 = insertelement poison, i64 6, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/combine-splats.ll b/llvm/test/CodeGen/RISCV/rvv/combine-splats.ll --- a/llvm/test/CodeGen/RISCV/rvv/combine-splats.ll +++ b/llvm/test/CodeGen/RISCV/rvv/combine-splats.ll @@ -7,7 +7,7 @@ define @and_or_nxv4i32( %A) { ; CHECK-LABEL: and_or_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 8 ; CHECK-NEXT: ret %ins1 = insertelement poison, i32 255, i32 0 @@ -24,7 +24,7 @@ define @or_and_nxv2i64( %a0) { ; CHECK-LABEL: or_and_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 3 ; CHECK-NEXT: vand.vi v8, v8, 7 ; CHECK-NEXT: ret @@ -42,7 +42,7 @@ define @or_and_nxv2i64_fold( %a0) { ; CHECK-LABEL: or_and_nxv2i64_fold: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 3 ; CHECK-NEXT: ret %ins1 = insertelement poison, i64 1, i32 0 @@ -60,7 +60,7 @@ ; CHECK-LABEL: combine_vec_shl_shl: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: li a0, 4 ; CHECK-NEXT: vmv.s.x v12, a0 @@ -81,7 +81,7 @@ define @combine_vec_ashr_ashr( %x) { ; CHECK-LABEL: combine_vec_ashr_ashr: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 6 ; CHECK-NEXT: ret %ins1 = insertelement poison, i32 2, i32 0 @@ -98,7 +98,7 @@ define @combine_vec_lshr_lshr( %x) { ; CHECK-LABEL: combine_vec_lshr_lshr: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 8 ; CHECK-NEXT: ret %ins1 = insertelement poison, i16 2, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/combine-store-fp.ll b/llvm/test/CodeGen/RISCV/rvv/combine-store-fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/combine-store-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/combine-store-fp.ll @@ -6,7 +6,7 @@ ; CHECK-LABEL: combine_fp_zero_stores_crash: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a0, a0, 4 -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/common-shuffle-patterns.ll b/llvm/test/CodeGen/RISCV/rvv/common-shuffle-patterns.ll --- a/llvm/test/CodeGen/RISCV/rvv/common-shuffle-patterns.ll +++ b/llvm/test/CodeGen/RISCV/rvv/common-shuffle-patterns.ll @@ -7,7 +7,7 @@ define dso_local <16 x i16> @interleave(<8 x i16> %v0, <8 x i16> %v1) { ; CHECK-LABEL: interleave: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 16, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m1, ta, ma ; CHECK-NEXT: vwaddu.vv v10, v8, v9 ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: vwmaccu.vx v10, a0, v9 diff --git a/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll b/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll --- a/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll +++ b/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll @@ -22,23 +22,23 @@ ; RV32-NEXT: vmv1r.v v10, v0 ; RV32-NEXT: andi a0, a0, 1 ; RV32-NEXT: seqz a0, a0 -; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; RV32-NEXT: vmv.v.x v11, a0 ; RV32-NEXT: vmsne.vi v0, v11, 0 -; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV32-NEXT: vmerge.vvm v8, v9, v8, v0 -; RV32-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; RV32-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; RV32-NEXT: vmv.v.i v9, 0 -; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; RV32-NEXT: vmv1r.v v0, v10 ; RV32-NEXT: vmerge.vim v8, v9, 1, v0 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: andi a1, a1, 1 ; RV32-NEXT: vmv.v.x v8, a1 ; RV32-NEXT: vmsne.vi v0, v8, 0 -; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV32-NEXT: vmv.v.i v8, 10 ; RV32-NEXT: vse32.v v8, (a0), v0.t ; RV32-NEXT: ret @@ -49,23 +49,23 @@ ; RV64-NEXT: vmv1r.v v12, v0 ; RV64-NEXT: andi a0, a0, 1 ; RV64-NEXT: seqz a0, a0 -; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; RV64-NEXT: vmv.v.x v13, a0 ; RV64-NEXT: vmsne.vi v0, v13, 0 -; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; RV64-NEXT: vmerge.vvm v8, v10, v8, v0 -; RV64-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; RV64-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; RV64-NEXT: vmv.v.i v10, 0 -; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; RV64-NEXT: vmv.x.s a0, v8 -; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; RV64-NEXT: vmv1r.v v0, v12 ; RV64-NEXT: vmerge.vim v8, v10, 1, v0 ; RV64-NEXT: vmv.x.s a1, v8 ; RV64-NEXT: andi a1, a1, 1 ; RV64-NEXT: vmv.v.x v8, a1 ; RV64-NEXT: vmsne.vi v0, v8, 0 -; RV64-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64-NEXT: vmv.v.i v8, 10 ; RV64-NEXT: vse32.v v8, (a0), v0.t ; RV64-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll b/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll --- a/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll +++ b/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll @@ -16,22 +16,22 @@ define <2 x i16> @fixedlen(<2 x i32> %x) { ; RV32-LABEL: fixedlen: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV32-NEXT: vsrl.vi v8, v8, 16 ; RV32-NEXT: lui a0, 1048568 ; RV32-NEXT: vand.vx v8, v8, a0 -; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: ret ; ; RV64-LABEL: fixedlen: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-NEXT: vsrl.vi v8, v8, 16 ; RV64-NEXT: lui a0, 131071 ; RV64-NEXT: slli a0, a0, 3 ; RV64-NEXT: vand.vx v8, v8, a0 -; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: ret %v41 = insertelement <2 x i32> poison, i32 16, i32 0 @@ -48,7 +48,7 @@ define @scalable( %x) { ; CHECK-LABEL: scalable: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 16 ; CHECK-NEXT: lui a0, 1048568 ; CHECK-NEXT: vand.vx v8, v8, a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll @@ -7,7 +7,7 @@ define @ctlz_nxv1i8( %va) { ; CHECK-ZVE64X-LABEL: ctlz_nxv1i8: ; CHECK-ZVE64X: # %bb.0: -; CHECK-ZVE64X-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-ZVE64X-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-ZVE64X-NEXT: vsrl.vi v9, v8, 1 ; CHECK-ZVE64X-NEXT: vor.vv v8, v8, v9 ; CHECK-ZVE64X-NEXT: vsrl.vi v9, v8, 2 @@ -31,12 +31,12 @@ ; ; CHECK-D-LABEL: ctlz_nxv1i8: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-D-NEXT: vzext.vf4 v9, v8 ; CHECK-D-NEXT: vfcvt.f.xu.v v9, v9 -; CHECK-D-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-D-NEXT: vnsrl.wi v9, v9, 23 -; CHECK-D-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; CHECK-D-NEXT: vnsrl.wi v9, v9, 0 ; CHECK-D-NEXT: li a0, 134 ; CHECK-D-NEXT: vmseq.vi v0, v8, 0 @@ -51,7 +51,7 @@ define @ctlz_nxv2i8( %va) { ; CHECK-ZVE64X-LABEL: ctlz_nxv2i8: ; CHECK-ZVE64X: # %bb.0: -; CHECK-ZVE64X-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-ZVE64X-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-ZVE64X-NEXT: vsrl.vi v9, v8, 1 ; CHECK-ZVE64X-NEXT: vor.vv v8, v8, v9 ; CHECK-ZVE64X-NEXT: vsrl.vi v9, v8, 2 @@ -75,12 +75,12 @@ ; ; CHECK-D-LABEL: ctlz_nxv2i8: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-D-NEXT: vzext.vf4 v9, v8 ; CHECK-D-NEXT: vfcvt.f.xu.v v9, v9 -; CHECK-D-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-D-NEXT: vnsrl.wi v9, v9, 23 -; CHECK-D-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-D-NEXT: vnsrl.wi v9, v9, 0 ; CHECK-D-NEXT: li a0, 134 ; CHECK-D-NEXT: vmseq.vi v0, v8, 0 @@ -95,7 +95,7 @@ define @ctlz_nxv4i8( %va) { ; CHECK-ZVE64X-LABEL: ctlz_nxv4i8: ; CHECK-ZVE64X: # %bb.0: -; CHECK-ZVE64X-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-ZVE64X-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-ZVE64X-NEXT: vsrl.vi v9, v8, 1 ; CHECK-ZVE64X-NEXT: vor.vv v8, v8, v9 ; CHECK-ZVE64X-NEXT: vsrl.vi v9, v8, 2 @@ -119,12 +119,12 @@ ; ; CHECK-D-LABEL: ctlz_nxv4i8: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-D-NEXT: vzext.vf4 v10, v8 ; CHECK-D-NEXT: vfcvt.f.xu.v v10, v10 -; CHECK-D-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-D-NEXT: vnsrl.wi v9, v10, 23 -; CHECK-D-NEXT: vsetvli zero, zero, e8, mf2, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; CHECK-D-NEXT: vnsrl.wi v9, v9, 0 ; CHECK-D-NEXT: li a0, 134 ; CHECK-D-NEXT: vmseq.vi v0, v8, 0 @@ -139,7 +139,7 @@ define @ctlz_nxv8i8( %va) { ; CHECK-ZVE64X-LABEL: ctlz_nxv8i8: ; CHECK-ZVE64X: # %bb.0: -; CHECK-ZVE64X-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-ZVE64X-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-ZVE64X-NEXT: vsrl.vi v9, v8, 1 ; CHECK-ZVE64X-NEXT: vor.vv v8, v8, v9 ; CHECK-ZVE64X-NEXT: vsrl.vi v9, v8, 2 @@ -163,12 +163,12 @@ ; ; CHECK-D-LABEL: ctlz_nxv8i8: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-D-NEXT: vzext.vf4 v12, v8 ; CHECK-D-NEXT: vfcvt.f.xu.v v12, v12 -; CHECK-D-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-D-NEXT: vnsrl.wi v10, v12, 23 -; CHECK-D-NEXT: vsetvli zero, zero, e8, m1, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e8, m1, ta, ma ; CHECK-D-NEXT: vnsrl.wi v9, v10, 0 ; CHECK-D-NEXT: li a0, 134 ; CHECK-D-NEXT: vmseq.vi v0, v8, 0 @@ -183,7 +183,7 @@ define @ctlz_nxv16i8( %va) { ; CHECK-ZVE64X-LABEL: ctlz_nxv16i8: ; CHECK-ZVE64X: # %bb.0: -; CHECK-ZVE64X-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-ZVE64X-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-ZVE64X-NEXT: vsrl.vi v10, v8, 1 ; CHECK-ZVE64X-NEXT: vor.vv v8, v8, v10 ; CHECK-ZVE64X-NEXT: vsrl.vi v10, v8, 2 @@ -207,12 +207,12 @@ ; ; CHECK-D-LABEL: ctlz_nxv16i8: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-D-NEXT: vzext.vf4 v16, v8 ; CHECK-D-NEXT: vfcvt.f.xu.v v16, v16 -; CHECK-D-NEXT: vsetvli zero, zero, e16, m4, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e16, m4, ta, ma ; CHECK-D-NEXT: vnsrl.wi v12, v16, 23 -; CHECK-D-NEXT: vsetvli zero, zero, e8, m2, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e8, m2, ta, ma ; CHECK-D-NEXT: vnsrl.wi v10, v12, 0 ; CHECK-D-NEXT: li a0, 134 ; CHECK-D-NEXT: vmseq.vi v0, v8, 0 @@ -227,7 +227,7 @@ define @ctlz_nxv32i8( %va) { ; CHECK-LABEL: ctlz_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vsrl.vi v12, v8, 1 ; CHECK-NEXT: vor.vv v8, v8, v12 ; CHECK-NEXT: vsrl.vi v12, v8, 2 @@ -256,7 +256,7 @@ define @ctlz_nxv64i8( %va) { ; CHECK-LABEL: ctlz_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vsrl.vi v16, v8, 1 ; CHECK-NEXT: vor.vv v8, v8, v16 ; CHECK-NEXT: vsrl.vi v16, v8, 2 @@ -285,7 +285,7 @@ define @ctlz_nxv1i16( %va) { ; RV32I-LABEL: ctlz_nxv1i16: ; RV32I: # %bb.0: -; RV32I-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; RV32I-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: vor.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 2 @@ -318,7 +318,7 @@ ; ; RV64I-LABEL: ctlz_nxv1i16: ; RV64I: # %bb.0: -; RV64I-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; RV64I-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: vor.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 2 @@ -351,7 +351,7 @@ ; ; CHECK-D-LABEL: ctlz_nxv1i16: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-D-NEXT: vfwcvt.f.xu.v v9, v8 ; CHECK-D-NEXT: vnsrl.wi v9, v9, 23 ; CHECK-D-NEXT: li a0, 142 @@ -368,7 +368,7 @@ define @ctlz_nxv2i16( %va) { ; RV32I-LABEL: ctlz_nxv2i16: ; RV32I: # %bb.0: -; RV32I-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; RV32I-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: vor.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 2 @@ -401,7 +401,7 @@ ; ; RV64I-LABEL: ctlz_nxv2i16: ; RV64I: # %bb.0: -; RV64I-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; RV64I-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: vor.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 2 @@ -434,7 +434,7 @@ ; ; CHECK-D-LABEL: ctlz_nxv2i16: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-D-NEXT: vfwcvt.f.xu.v v9, v8 ; CHECK-D-NEXT: vnsrl.wi v9, v9, 23 ; CHECK-D-NEXT: li a0, 142 @@ -451,7 +451,7 @@ define @ctlz_nxv4i16( %va) { ; RV32I-LABEL: ctlz_nxv4i16: ; RV32I: # %bb.0: -; RV32I-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; RV32I-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: vor.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 2 @@ -484,7 +484,7 @@ ; ; RV64I-LABEL: ctlz_nxv4i16: ; RV64I: # %bb.0: -; RV64I-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; RV64I-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: vor.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 2 @@ -517,7 +517,7 @@ ; ; CHECK-D-LABEL: ctlz_nxv4i16: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-D-NEXT: vfwcvt.f.xu.v v10, v8 ; CHECK-D-NEXT: vnsrl.wi v9, v10, 23 ; CHECK-D-NEXT: li a0, 142 @@ -534,7 +534,7 @@ define @ctlz_nxv8i16( %va) { ; RV32I-LABEL: ctlz_nxv8i16: ; RV32I: # %bb.0: -; RV32I-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; RV32I-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; RV32I-NEXT: vsrl.vi v10, v8, 1 ; RV32I-NEXT: vor.vv v8, v8, v10 ; RV32I-NEXT: vsrl.vi v10, v8, 2 @@ -567,7 +567,7 @@ ; ; RV64I-LABEL: ctlz_nxv8i16: ; RV64I: # %bb.0: -; RV64I-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; RV64I-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; RV64I-NEXT: vsrl.vi v10, v8, 1 ; RV64I-NEXT: vor.vv v8, v8, v10 ; RV64I-NEXT: vsrl.vi v10, v8, 2 @@ -600,7 +600,7 @@ ; ; CHECK-D-LABEL: ctlz_nxv8i16: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-D-NEXT: vfwcvt.f.xu.v v12, v8 ; CHECK-D-NEXT: vnsrl.wi v10, v12, 23 ; CHECK-D-NEXT: li a0, 142 @@ -617,7 +617,7 @@ define @ctlz_nxv16i16( %va) { ; RV32I-LABEL: ctlz_nxv16i16: ; RV32I: # %bb.0: -; RV32I-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; RV32I-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; RV32I-NEXT: vsrl.vi v12, v8, 1 ; RV32I-NEXT: vor.vv v8, v8, v12 ; RV32I-NEXT: vsrl.vi v12, v8, 2 @@ -650,7 +650,7 @@ ; ; RV64I-LABEL: ctlz_nxv16i16: ; RV64I: # %bb.0: -; RV64I-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; RV64I-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; RV64I-NEXT: vsrl.vi v12, v8, 1 ; RV64I-NEXT: vor.vv v8, v8, v12 ; RV64I-NEXT: vsrl.vi v12, v8, 2 @@ -683,7 +683,7 @@ ; ; CHECK-D-LABEL: ctlz_nxv16i16: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-D-NEXT: vfwcvt.f.xu.v v16, v8 ; CHECK-D-NEXT: vnsrl.wi v12, v16, 23 ; CHECK-D-NEXT: li a0, 142 @@ -700,7 +700,7 @@ define @ctlz_nxv32i16( %va) { ; RV32-LABEL: ctlz_nxv32i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; RV32-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; RV32-NEXT: vsrl.vi v16, v8, 1 ; RV32-NEXT: vor.vv v8, v8, v16 ; RV32-NEXT: vsrl.vi v16, v8, 2 @@ -733,7 +733,7 @@ ; ; RV64-LABEL: ctlz_nxv32i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; RV64-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; RV64-NEXT: vsrl.vi v16, v8, 1 ; RV64-NEXT: vor.vv v8, v8, v16 ; RV64-NEXT: vsrl.vi v16, v8, 2 @@ -771,7 +771,7 @@ define @ctlz_nxv1i32( %va) { ; RV32I-LABEL: ctlz_nxv1i32: ; RV32I: # %bb.0: -; RV32I-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; RV32I-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: vor.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 2 @@ -807,7 +807,7 @@ ; ; RV64I-LABEL: ctlz_nxv1i32: ; RV64I: # %bb.0: -; RV64I-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; RV64I-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: vor.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 2 @@ -843,12 +843,12 @@ ; ; CHECK-D-LABEL: ctlz_nxv1i32: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-D-NEXT: vfwcvt.f.xu.v v9, v8 ; CHECK-D-NEXT: li a0, 52 -; CHECK-D-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; CHECK-D-NEXT: vsrl.vx v9, v9, a0 -; CHECK-D-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-D-NEXT: vnsrl.wi v9, v9, 0 ; CHECK-D-NEXT: li a0, 1054 ; CHECK-D-NEXT: vrsub.vx v9, v9, a0 @@ -864,7 +864,7 @@ define @ctlz_nxv2i32( %va) { ; RV32I-LABEL: ctlz_nxv2i32: ; RV32I: # %bb.0: -; RV32I-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; RV32I-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: vor.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 2 @@ -900,7 +900,7 @@ ; ; RV64I-LABEL: ctlz_nxv2i32: ; RV64I: # %bb.0: -; RV64I-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; RV64I-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: vor.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 2 @@ -936,12 +936,12 @@ ; ; CHECK-D-LABEL: ctlz_nxv2i32: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-D-NEXT: vfwcvt.f.xu.v v10, v8 ; CHECK-D-NEXT: li a0, 52 -; CHECK-D-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-D-NEXT: vsrl.vx v10, v10, a0 -; CHECK-D-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-D-NEXT: vnsrl.wi v9, v10, 0 ; CHECK-D-NEXT: li a0, 1054 ; CHECK-D-NEXT: vrsub.vx v9, v9, a0 @@ -957,7 +957,7 @@ define @ctlz_nxv4i32( %va) { ; RV32I-LABEL: ctlz_nxv4i32: ; RV32I: # %bb.0: -; RV32I-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; RV32I-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; RV32I-NEXT: vsrl.vi v10, v8, 1 ; RV32I-NEXT: vor.vv v8, v8, v10 ; RV32I-NEXT: vsrl.vi v10, v8, 2 @@ -993,7 +993,7 @@ ; ; RV64I-LABEL: ctlz_nxv4i32: ; RV64I: # %bb.0: -; RV64I-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; RV64I-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; RV64I-NEXT: vsrl.vi v10, v8, 1 ; RV64I-NEXT: vor.vv v8, v8, v10 ; RV64I-NEXT: vsrl.vi v10, v8, 2 @@ -1029,12 +1029,12 @@ ; ; CHECK-D-LABEL: ctlz_nxv4i32: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-D-NEXT: vfwcvt.f.xu.v v12, v8 ; CHECK-D-NEXT: li a0, 52 -; CHECK-D-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; CHECK-D-NEXT: vsrl.vx v12, v12, a0 -; CHECK-D-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK-D-NEXT: vnsrl.wi v10, v12, 0 ; CHECK-D-NEXT: li a0, 1054 ; CHECK-D-NEXT: vrsub.vx v10, v10, a0 @@ -1050,7 +1050,7 @@ define @ctlz_nxv8i32( %va) { ; RV32I-LABEL: ctlz_nxv8i32: ; RV32I: # %bb.0: -; RV32I-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; RV32I-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; RV32I-NEXT: vsrl.vi v12, v8, 1 ; RV32I-NEXT: vor.vv v8, v8, v12 ; RV32I-NEXT: vsrl.vi v12, v8, 2 @@ -1086,7 +1086,7 @@ ; ; RV64I-LABEL: ctlz_nxv8i32: ; RV64I: # %bb.0: -; RV64I-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; RV64I-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; RV64I-NEXT: vsrl.vi v12, v8, 1 ; RV64I-NEXT: vor.vv v8, v8, v12 ; RV64I-NEXT: vsrl.vi v12, v8, 2 @@ -1122,12 +1122,12 @@ ; ; CHECK-D-LABEL: ctlz_nxv8i32: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-D-NEXT: vfwcvt.f.xu.v v16, v8 ; CHECK-D-NEXT: li a0, 52 -; CHECK-D-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-D-NEXT: vsrl.vx v16, v16, a0 -; CHECK-D-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-D-NEXT: vnsrl.wi v12, v16, 0 ; CHECK-D-NEXT: li a0, 1054 ; CHECK-D-NEXT: vrsub.vx v12, v12, a0 @@ -1143,7 +1143,7 @@ define @ctlz_nxv16i32( %va) { ; RV32-LABEL: ctlz_nxv16i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; RV32-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; RV32-NEXT: vsrl.vi v16, v8, 1 ; RV32-NEXT: vor.vv v8, v8, v16 ; RV32-NEXT: vsrl.vi v16, v8, 2 @@ -1179,7 +1179,7 @@ ; ; RV64-LABEL: ctlz_nxv16i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; RV64-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; RV64-NEXT: vsrl.vi v16, v8, 1 ; RV64-NEXT: vor.vv v8, v8, v16 ; RV64-NEXT: vsrl.vi v16, v8, 2 @@ -1238,7 +1238,7 @@ ; RV32-NEXT: addi a0, a0, 257 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV32-NEXT: vsrl.vi v9, v8, 1 ; RV32-NEXT: vor.vv v8, v8, v9 ; RV32-NEXT: vsrl.vi v9, v8, 2 @@ -1276,7 +1276,7 @@ ; ; RV64-LABEL: ctlz_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV64-NEXT: vsrl.vi v9, v8, 1 ; RV64-NEXT: vor.vv v8, v8, v9 ; RV64-NEXT: vsrl.vi v9, v8, 2 @@ -1339,7 +1339,7 @@ ; RV32-NEXT: addi a0, a0, 257 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; RV32-NEXT: vsrl.vi v10, v8, 1 ; RV32-NEXT: vor.vv v8, v8, v10 ; RV32-NEXT: vsrl.vi v10, v8, 2 @@ -1377,7 +1377,7 @@ ; ; RV64-LABEL: ctlz_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; RV64-NEXT: vsrl.vi v10, v8, 1 ; RV64-NEXT: vor.vv v8, v8, v10 ; RV64-NEXT: vsrl.vi v10, v8, 2 @@ -1440,7 +1440,7 @@ ; RV32-NEXT: addi a0, a0, 257 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV32-NEXT: vsrl.vi v12, v8, 1 ; RV32-NEXT: vor.vv v8, v8, v12 ; RV32-NEXT: vsrl.vi v12, v8, 2 @@ -1478,7 +1478,7 @@ ; ; RV64-LABEL: ctlz_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV64-NEXT: vsrl.vi v12, v8, 1 ; RV64-NEXT: vor.vv v8, v8, v12 ; RV64-NEXT: vsrl.vi v12, v8, 2 @@ -1541,7 +1541,7 @@ ; RV32-NEXT: addi a0, a0, 257 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; RV32-NEXT: vsrl.vi v16, v8, 1 ; RV32-NEXT: vor.vv v8, v8, v16 ; RV32-NEXT: vsrl.vi v16, v8, 2 @@ -1579,7 +1579,7 @@ ; ; RV64-LABEL: ctlz_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; RV64-NEXT: vsrl.vi v16, v8, 1 ; RV64-NEXT: vor.vv v8, v8, v16 ; RV64-NEXT: vsrl.vi v16, v8, 2 @@ -1624,7 +1624,7 @@ define @ctlz_zero_undef_nxv1i8( %va) { ; CHECK-ZVE64X-LABEL: ctlz_zero_undef_nxv1i8: ; CHECK-ZVE64X: # %bb.0: -; CHECK-ZVE64X-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-ZVE64X-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-ZVE64X-NEXT: vsrl.vi v9, v8, 1 ; CHECK-ZVE64X-NEXT: vor.vv v8, v8, v9 ; CHECK-ZVE64X-NEXT: vsrl.vi v9, v8, 2 @@ -1648,12 +1648,12 @@ ; ; CHECK-D-LABEL: ctlz_zero_undef_nxv1i8: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-D-NEXT: vzext.vf4 v9, v8 ; CHECK-D-NEXT: vfcvt.f.xu.v v8, v9 -; CHECK-D-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-D-NEXT: vnsrl.wi v8, v8, 23 -; CHECK-D-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; CHECK-D-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-D-NEXT: li a0, 134 ; CHECK-D-NEXT: vrsub.vx v8, v8, a0 @@ -1665,7 +1665,7 @@ define @ctlz_zero_undef_nxv2i8( %va) { ; CHECK-ZVE64X-LABEL: ctlz_zero_undef_nxv2i8: ; CHECK-ZVE64X: # %bb.0: -; CHECK-ZVE64X-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-ZVE64X-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-ZVE64X-NEXT: vsrl.vi v9, v8, 1 ; CHECK-ZVE64X-NEXT: vor.vv v8, v8, v9 ; CHECK-ZVE64X-NEXT: vsrl.vi v9, v8, 2 @@ -1689,12 +1689,12 @@ ; ; CHECK-D-LABEL: ctlz_zero_undef_nxv2i8: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-D-NEXT: vzext.vf4 v9, v8 ; CHECK-D-NEXT: vfcvt.f.xu.v v8, v9 -; CHECK-D-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-D-NEXT: vnsrl.wi v8, v8, 23 -; CHECK-D-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-D-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-D-NEXT: li a0, 134 ; CHECK-D-NEXT: vrsub.vx v8, v8, a0 @@ -1706,7 +1706,7 @@ define @ctlz_zero_undef_nxv4i8( %va) { ; CHECK-ZVE64X-LABEL: ctlz_zero_undef_nxv4i8: ; CHECK-ZVE64X: # %bb.0: -; CHECK-ZVE64X-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-ZVE64X-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-ZVE64X-NEXT: vsrl.vi v9, v8, 1 ; CHECK-ZVE64X-NEXT: vor.vv v8, v8, v9 ; CHECK-ZVE64X-NEXT: vsrl.vi v9, v8, 2 @@ -1730,12 +1730,12 @@ ; ; CHECK-D-LABEL: ctlz_zero_undef_nxv4i8: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-D-NEXT: vzext.vf4 v10, v8 ; CHECK-D-NEXT: vfcvt.f.xu.v v8, v10 -; CHECK-D-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-D-NEXT: vnsrl.wi v10, v8, 23 -; CHECK-D-NEXT: vsetvli zero, zero, e8, mf2, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; CHECK-D-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-D-NEXT: li a0, 134 ; CHECK-D-NEXT: vrsub.vx v8, v8, a0 @@ -1747,7 +1747,7 @@ define @ctlz_zero_undef_nxv8i8( %va) { ; CHECK-ZVE64X-LABEL: ctlz_zero_undef_nxv8i8: ; CHECK-ZVE64X: # %bb.0: -; CHECK-ZVE64X-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-ZVE64X-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-ZVE64X-NEXT: vsrl.vi v9, v8, 1 ; CHECK-ZVE64X-NEXT: vor.vv v8, v8, v9 ; CHECK-ZVE64X-NEXT: vsrl.vi v9, v8, 2 @@ -1771,12 +1771,12 @@ ; ; CHECK-D-LABEL: ctlz_zero_undef_nxv8i8: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-D-NEXT: vzext.vf4 v12, v8 ; CHECK-D-NEXT: vfcvt.f.xu.v v8, v12 -; CHECK-D-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-D-NEXT: vnsrl.wi v12, v8, 23 -; CHECK-D-NEXT: vsetvli zero, zero, e8, m1, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e8, m1, ta, ma ; CHECK-D-NEXT: vnsrl.wi v8, v12, 0 ; CHECK-D-NEXT: li a0, 134 ; CHECK-D-NEXT: vrsub.vx v8, v8, a0 @@ -1788,7 +1788,7 @@ define @ctlz_zero_undef_nxv16i8( %va) { ; CHECK-ZVE64X-LABEL: ctlz_zero_undef_nxv16i8: ; CHECK-ZVE64X: # %bb.0: -; CHECK-ZVE64X-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-ZVE64X-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-ZVE64X-NEXT: vsrl.vi v10, v8, 1 ; CHECK-ZVE64X-NEXT: vor.vv v8, v8, v10 ; CHECK-ZVE64X-NEXT: vsrl.vi v10, v8, 2 @@ -1812,12 +1812,12 @@ ; ; CHECK-D-LABEL: ctlz_zero_undef_nxv16i8: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-D-NEXT: vzext.vf4 v16, v8 ; CHECK-D-NEXT: vfcvt.f.xu.v v8, v16 -; CHECK-D-NEXT: vsetvli zero, zero, e16, m4, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e16, m4, ta, ma ; CHECK-D-NEXT: vnsrl.wi v16, v8, 23 -; CHECK-D-NEXT: vsetvli zero, zero, e8, m2, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e8, m2, ta, ma ; CHECK-D-NEXT: vnsrl.wi v8, v16, 0 ; CHECK-D-NEXT: li a0, 134 ; CHECK-D-NEXT: vrsub.vx v8, v8, a0 @@ -1829,7 +1829,7 @@ define @ctlz_zero_undef_nxv32i8( %va) { ; CHECK-LABEL: ctlz_zero_undef_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vsrl.vi v12, v8, 1 ; CHECK-NEXT: vor.vv v8, v8, v12 ; CHECK-NEXT: vsrl.vi v12, v8, 2 @@ -1857,7 +1857,7 @@ define @ctlz_zero_undef_nxv64i8( %va) { ; CHECK-LABEL: ctlz_zero_undef_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vsrl.vi v16, v8, 1 ; CHECK-NEXT: vor.vv v8, v8, v16 ; CHECK-NEXT: vsrl.vi v16, v8, 2 @@ -1885,7 +1885,7 @@ define @ctlz_zero_undef_nxv1i16( %va) { ; RV32I-LABEL: ctlz_zero_undef_nxv1i16: ; RV32I: # %bb.0: -; RV32I-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; RV32I-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: vor.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 2 @@ -1918,7 +1918,7 @@ ; ; RV64I-LABEL: ctlz_zero_undef_nxv1i16: ; RV64I: # %bb.0: -; RV64I-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; RV64I-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: vor.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 2 @@ -1951,7 +1951,7 @@ ; ; CHECK-D-LABEL: ctlz_zero_undef_nxv1i16: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-D-NEXT: vfwcvt.f.xu.v v9, v8 ; CHECK-D-NEXT: vnsrl.wi v8, v9, 23 ; CHECK-D-NEXT: li a0, 142 @@ -1964,7 +1964,7 @@ define @ctlz_zero_undef_nxv2i16( %va) { ; RV32I-LABEL: ctlz_zero_undef_nxv2i16: ; RV32I: # %bb.0: -; RV32I-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; RV32I-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: vor.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 2 @@ -1997,7 +1997,7 @@ ; ; RV64I-LABEL: ctlz_zero_undef_nxv2i16: ; RV64I: # %bb.0: -; RV64I-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; RV64I-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: vor.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 2 @@ -2030,7 +2030,7 @@ ; ; CHECK-D-LABEL: ctlz_zero_undef_nxv2i16: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-D-NEXT: vfwcvt.f.xu.v v9, v8 ; CHECK-D-NEXT: vnsrl.wi v8, v9, 23 ; CHECK-D-NEXT: li a0, 142 @@ -2043,7 +2043,7 @@ define @ctlz_zero_undef_nxv4i16( %va) { ; RV32I-LABEL: ctlz_zero_undef_nxv4i16: ; RV32I: # %bb.0: -; RV32I-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; RV32I-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: vor.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 2 @@ -2076,7 +2076,7 @@ ; ; RV64I-LABEL: ctlz_zero_undef_nxv4i16: ; RV64I: # %bb.0: -; RV64I-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; RV64I-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: vor.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 2 @@ -2109,7 +2109,7 @@ ; ; CHECK-D-LABEL: ctlz_zero_undef_nxv4i16: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-D-NEXT: vfwcvt.f.xu.v v10, v8 ; CHECK-D-NEXT: vnsrl.wi v8, v10, 23 ; CHECK-D-NEXT: li a0, 142 @@ -2122,7 +2122,7 @@ define @ctlz_zero_undef_nxv8i16( %va) { ; RV32I-LABEL: ctlz_zero_undef_nxv8i16: ; RV32I: # %bb.0: -; RV32I-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; RV32I-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; RV32I-NEXT: vsrl.vi v10, v8, 1 ; RV32I-NEXT: vor.vv v8, v8, v10 ; RV32I-NEXT: vsrl.vi v10, v8, 2 @@ -2155,7 +2155,7 @@ ; ; RV64I-LABEL: ctlz_zero_undef_nxv8i16: ; RV64I: # %bb.0: -; RV64I-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; RV64I-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; RV64I-NEXT: vsrl.vi v10, v8, 1 ; RV64I-NEXT: vor.vv v8, v8, v10 ; RV64I-NEXT: vsrl.vi v10, v8, 2 @@ -2188,7 +2188,7 @@ ; ; CHECK-D-LABEL: ctlz_zero_undef_nxv8i16: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-D-NEXT: vfwcvt.f.xu.v v12, v8 ; CHECK-D-NEXT: vnsrl.wi v8, v12, 23 ; CHECK-D-NEXT: li a0, 142 @@ -2201,7 +2201,7 @@ define @ctlz_zero_undef_nxv16i16( %va) { ; RV32I-LABEL: ctlz_zero_undef_nxv16i16: ; RV32I: # %bb.0: -; RV32I-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; RV32I-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; RV32I-NEXT: vsrl.vi v12, v8, 1 ; RV32I-NEXT: vor.vv v8, v8, v12 ; RV32I-NEXT: vsrl.vi v12, v8, 2 @@ -2234,7 +2234,7 @@ ; ; RV64I-LABEL: ctlz_zero_undef_nxv16i16: ; RV64I: # %bb.0: -; RV64I-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; RV64I-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; RV64I-NEXT: vsrl.vi v12, v8, 1 ; RV64I-NEXT: vor.vv v8, v8, v12 ; RV64I-NEXT: vsrl.vi v12, v8, 2 @@ -2267,7 +2267,7 @@ ; ; CHECK-D-LABEL: ctlz_zero_undef_nxv16i16: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-D-NEXT: vfwcvt.f.xu.v v16, v8 ; CHECK-D-NEXT: vnsrl.wi v8, v16, 23 ; CHECK-D-NEXT: li a0, 142 @@ -2280,7 +2280,7 @@ define @ctlz_zero_undef_nxv32i16( %va) { ; RV32-LABEL: ctlz_zero_undef_nxv32i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; RV32-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; RV32-NEXT: vsrl.vi v16, v8, 1 ; RV32-NEXT: vor.vv v8, v8, v16 ; RV32-NEXT: vsrl.vi v16, v8, 2 @@ -2313,7 +2313,7 @@ ; ; RV64-LABEL: ctlz_zero_undef_nxv32i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; RV64-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; RV64-NEXT: vsrl.vi v16, v8, 1 ; RV64-NEXT: vor.vv v8, v8, v16 ; RV64-NEXT: vsrl.vi v16, v8, 2 @@ -2350,7 +2350,7 @@ define @ctlz_zero_undef_nxv1i32( %va) { ; RV32I-LABEL: ctlz_zero_undef_nxv1i32: ; RV32I: # %bb.0: -; RV32I-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; RV32I-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: vor.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 2 @@ -2386,7 +2386,7 @@ ; ; RV64I-LABEL: ctlz_zero_undef_nxv1i32: ; RV64I: # %bb.0: -; RV64I-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; RV64I-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: vor.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 2 @@ -2422,12 +2422,12 @@ ; ; CHECK-D-LABEL: ctlz_zero_undef_nxv1i32: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-D-NEXT: vfwcvt.f.xu.v v9, v8 ; CHECK-D-NEXT: li a0, 52 -; CHECK-D-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; CHECK-D-NEXT: vsrl.vx v8, v9, a0 -; CHECK-D-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-D-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-D-NEXT: li a0, 1054 ; CHECK-D-NEXT: vrsub.vx v8, v8, a0 @@ -2439,7 +2439,7 @@ define @ctlz_zero_undef_nxv2i32( %va) { ; RV32I-LABEL: ctlz_zero_undef_nxv2i32: ; RV32I: # %bb.0: -; RV32I-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; RV32I-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV32I-NEXT: vsrl.vi v9, v8, 1 ; RV32I-NEXT: vor.vv v8, v8, v9 ; RV32I-NEXT: vsrl.vi v9, v8, 2 @@ -2475,7 +2475,7 @@ ; ; RV64I-LABEL: ctlz_zero_undef_nxv2i32: ; RV64I: # %bb.0: -; RV64I-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; RV64I-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV64I-NEXT: vsrl.vi v9, v8, 1 ; RV64I-NEXT: vor.vv v8, v8, v9 ; RV64I-NEXT: vsrl.vi v9, v8, 2 @@ -2511,12 +2511,12 @@ ; ; CHECK-D-LABEL: ctlz_zero_undef_nxv2i32: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-D-NEXT: vfwcvt.f.xu.v v10, v8 ; CHECK-D-NEXT: li a0, 52 -; CHECK-D-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-D-NEXT: vsrl.vx v8, v10, a0 -; CHECK-D-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-D-NEXT: vnsrl.wi v10, v8, 0 ; CHECK-D-NEXT: li a0, 1054 ; CHECK-D-NEXT: vrsub.vx v8, v10, a0 @@ -2528,7 +2528,7 @@ define @ctlz_zero_undef_nxv4i32( %va) { ; RV32I-LABEL: ctlz_zero_undef_nxv4i32: ; RV32I: # %bb.0: -; RV32I-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; RV32I-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; RV32I-NEXT: vsrl.vi v10, v8, 1 ; RV32I-NEXT: vor.vv v8, v8, v10 ; RV32I-NEXT: vsrl.vi v10, v8, 2 @@ -2564,7 +2564,7 @@ ; ; RV64I-LABEL: ctlz_zero_undef_nxv4i32: ; RV64I: # %bb.0: -; RV64I-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; RV64I-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; RV64I-NEXT: vsrl.vi v10, v8, 1 ; RV64I-NEXT: vor.vv v8, v8, v10 ; RV64I-NEXT: vsrl.vi v10, v8, 2 @@ -2600,12 +2600,12 @@ ; ; CHECK-D-LABEL: ctlz_zero_undef_nxv4i32: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-D-NEXT: vfwcvt.f.xu.v v12, v8 ; CHECK-D-NEXT: li a0, 52 -; CHECK-D-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; CHECK-D-NEXT: vsrl.vx v8, v12, a0 -; CHECK-D-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK-D-NEXT: vnsrl.wi v12, v8, 0 ; CHECK-D-NEXT: li a0, 1054 ; CHECK-D-NEXT: vrsub.vx v8, v12, a0 @@ -2617,7 +2617,7 @@ define @ctlz_zero_undef_nxv8i32( %va) { ; RV32I-LABEL: ctlz_zero_undef_nxv8i32: ; RV32I: # %bb.0: -; RV32I-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; RV32I-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; RV32I-NEXT: vsrl.vi v12, v8, 1 ; RV32I-NEXT: vor.vv v8, v8, v12 ; RV32I-NEXT: vsrl.vi v12, v8, 2 @@ -2653,7 +2653,7 @@ ; ; RV64I-LABEL: ctlz_zero_undef_nxv8i32: ; RV64I: # %bb.0: -; RV64I-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; RV64I-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; RV64I-NEXT: vsrl.vi v12, v8, 1 ; RV64I-NEXT: vor.vv v8, v8, v12 ; RV64I-NEXT: vsrl.vi v12, v8, 2 @@ -2689,12 +2689,12 @@ ; ; CHECK-D-LABEL: ctlz_zero_undef_nxv8i32: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-D-NEXT: vfwcvt.f.xu.v v16, v8 ; CHECK-D-NEXT: li a0, 52 -; CHECK-D-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-D-NEXT: vsrl.vx v8, v16, a0 -; CHECK-D-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-D-NEXT: vnsrl.wi v16, v8, 0 ; CHECK-D-NEXT: li a0, 1054 ; CHECK-D-NEXT: vrsub.vx v8, v16, a0 @@ -2706,7 +2706,7 @@ define @ctlz_zero_undef_nxv16i32( %va) { ; RV32-LABEL: ctlz_zero_undef_nxv16i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; RV32-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; RV32-NEXT: vsrl.vi v16, v8, 1 ; RV32-NEXT: vor.vv v8, v8, v16 ; RV32-NEXT: vsrl.vi v16, v8, 2 @@ -2742,7 +2742,7 @@ ; ; RV64-LABEL: ctlz_zero_undef_nxv16i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; RV64-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; RV64-NEXT: vsrl.vi v16, v8, 1 ; RV64-NEXT: vor.vv v8, v8, v16 ; RV64-NEXT: vsrl.vi v16, v8, 2 @@ -2800,7 +2800,7 @@ ; RV32-NEXT: addi a0, a0, 257 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV32-NEXT: vsrl.vi v9, v8, 1 ; RV32-NEXT: vor.vv v8, v8, v9 ; RV32-NEXT: vsrl.vi v9, v8, 2 @@ -2838,7 +2838,7 @@ ; ; RV64-LABEL: ctlz_zero_undef_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV64-NEXT: vsrl.vi v9, v8, 1 ; RV64-NEXT: vor.vv v8, v8, v9 ; RV64-NEXT: vsrl.vi v9, v8, 2 @@ -2900,7 +2900,7 @@ ; RV32-NEXT: addi a0, a0, 257 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; RV32-NEXT: vsrl.vi v10, v8, 1 ; RV32-NEXT: vor.vv v8, v8, v10 ; RV32-NEXT: vsrl.vi v10, v8, 2 @@ -2938,7 +2938,7 @@ ; ; RV64-LABEL: ctlz_zero_undef_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; RV64-NEXT: vsrl.vi v10, v8, 1 ; RV64-NEXT: vor.vv v8, v8, v10 ; RV64-NEXT: vsrl.vi v10, v8, 2 @@ -3000,7 +3000,7 @@ ; RV32-NEXT: addi a0, a0, 257 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV32-NEXT: vsrl.vi v12, v8, 1 ; RV32-NEXT: vor.vv v8, v8, v12 ; RV32-NEXT: vsrl.vi v12, v8, 2 @@ -3038,7 +3038,7 @@ ; ; RV64-LABEL: ctlz_zero_undef_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV64-NEXT: vsrl.vi v12, v8, 1 ; RV64-NEXT: vor.vv v8, v8, v12 ; RV64-NEXT: vsrl.vi v12, v8, 2 @@ -3100,7 +3100,7 @@ ; RV32-NEXT: addi a0, a0, 257 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; RV32-NEXT: vsrl.vi v16, v8, 1 ; RV32-NEXT: vor.vv v8, v8, v16 ; RV32-NEXT: vsrl.vi v16, v8, 2 @@ -3138,7 +3138,7 @@ ; ; RV64-LABEL: ctlz_zero_undef_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; RV64-NEXT: vsrl.vi v16, v8, 1 ; RV64-NEXT: vor.vv v8, v8, v16 ; RV64-NEXT: vsrl.vi v16, v8, 2 diff --git a/llvm/test/CodeGen/RISCV/rvv/ctpop-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ctpop-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/ctpop-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ctpop-sdnode.ll @@ -5,7 +5,7 @@ define @ctpop_nxv1i8( %va) { ; CHECK-LABEL: ctpop_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vsrl.vi v9, v8, 1 ; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v9, v9, a0 @@ -27,7 +27,7 @@ define @ctpop_nxv2i8( %va) { ; CHECK-LABEL: ctpop_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vsrl.vi v9, v8, 1 ; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v9, v9, a0 @@ -49,7 +49,7 @@ define @ctpop_nxv4i8( %va) { ; CHECK-LABEL: ctpop_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vsrl.vi v9, v8, 1 ; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v9, v9, a0 @@ -71,7 +71,7 @@ define @ctpop_nxv8i8( %va) { ; CHECK-LABEL: ctpop_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vsrl.vi v9, v8, 1 ; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v9, v9, a0 @@ -93,7 +93,7 @@ define @ctpop_nxv16i8( %va) { ; CHECK-LABEL: ctpop_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vsrl.vi v10, v8, 1 ; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v10, v10, a0 @@ -115,7 +115,7 @@ define @ctpop_nxv32i8( %va) { ; CHECK-LABEL: ctpop_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vsrl.vi v12, v8, 1 ; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v12, v12, a0 @@ -137,7 +137,7 @@ define @ctpop_nxv64i8( %va) { ; CHECK-LABEL: ctpop_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vsrl.vi v16, v8, 1 ; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v16, v16, a0 @@ -159,7 +159,7 @@ define @ctpop_nxv1i16( %va) { ; RV32-LABEL: ctpop_nxv1i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; RV32-NEXT: vsrl.vi v9, v8, 1 ; RV32-NEXT: lui a0, 5 ; RV32-NEXT: addi a0, a0, 1365 @@ -183,7 +183,7 @@ ; ; RV64-LABEL: ctpop_nxv1i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; RV64-NEXT: vsrl.vi v9, v8, 1 ; RV64-NEXT: lui a0, 5 ; RV64-NEXT: addiw a0, a0, 1365 @@ -212,7 +212,7 @@ define @ctpop_nxv2i16( %va) { ; RV32-LABEL: ctpop_nxv2i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; RV32-NEXT: vsrl.vi v9, v8, 1 ; RV32-NEXT: lui a0, 5 ; RV32-NEXT: addi a0, a0, 1365 @@ -236,7 +236,7 @@ ; ; RV64-LABEL: ctpop_nxv2i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; RV64-NEXT: vsrl.vi v9, v8, 1 ; RV64-NEXT: lui a0, 5 ; RV64-NEXT: addiw a0, a0, 1365 @@ -265,7 +265,7 @@ define @ctpop_nxv4i16( %va) { ; RV32-LABEL: ctpop_nxv4i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; RV32-NEXT: vsrl.vi v9, v8, 1 ; RV32-NEXT: lui a0, 5 ; RV32-NEXT: addi a0, a0, 1365 @@ -289,7 +289,7 @@ ; ; RV64-LABEL: ctpop_nxv4i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; RV64-NEXT: vsrl.vi v9, v8, 1 ; RV64-NEXT: lui a0, 5 ; RV64-NEXT: addiw a0, a0, 1365 @@ -318,7 +318,7 @@ define @ctpop_nxv8i16( %va) { ; RV32-LABEL: ctpop_nxv8i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; RV32-NEXT: vsrl.vi v10, v8, 1 ; RV32-NEXT: lui a0, 5 ; RV32-NEXT: addi a0, a0, 1365 @@ -342,7 +342,7 @@ ; ; RV64-LABEL: ctpop_nxv8i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; RV64-NEXT: vsrl.vi v10, v8, 1 ; RV64-NEXT: lui a0, 5 ; RV64-NEXT: addiw a0, a0, 1365 @@ -371,7 +371,7 @@ define @ctpop_nxv16i16( %va) { ; RV32-LABEL: ctpop_nxv16i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; RV32-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; RV32-NEXT: vsrl.vi v12, v8, 1 ; RV32-NEXT: lui a0, 5 ; RV32-NEXT: addi a0, a0, 1365 @@ -395,7 +395,7 @@ ; ; RV64-LABEL: ctpop_nxv16i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; RV64-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; RV64-NEXT: vsrl.vi v12, v8, 1 ; RV64-NEXT: lui a0, 5 ; RV64-NEXT: addiw a0, a0, 1365 @@ -424,7 +424,7 @@ define @ctpop_nxv32i16( %va) { ; RV32-LABEL: ctpop_nxv32i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; RV32-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; RV32-NEXT: vsrl.vi v16, v8, 1 ; RV32-NEXT: lui a0, 5 ; RV32-NEXT: addi a0, a0, 1365 @@ -448,7 +448,7 @@ ; ; RV64-LABEL: ctpop_nxv32i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; RV64-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; RV64-NEXT: vsrl.vi v16, v8, 1 ; RV64-NEXT: lui a0, 5 ; RV64-NEXT: addiw a0, a0, 1365 @@ -477,7 +477,7 @@ define @ctpop_nxv1i32( %va) { ; RV32-LABEL: ctpop_nxv1i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; RV32-NEXT: vsrl.vi v9, v8, 1 ; RV32-NEXT: lui a0, 349525 ; RV32-NEXT: addi a0, a0, 1365 @@ -502,7 +502,7 @@ ; ; RV64-LABEL: ctpop_nxv1i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; RV64-NEXT: vsrl.vi v9, v8, 1 ; RV64-NEXT: lui a0, 349525 ; RV64-NEXT: addiw a0, a0, 1365 @@ -532,7 +532,7 @@ define @ctpop_nxv2i32( %va) { ; RV32-LABEL: ctpop_nxv2i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV32-NEXT: vsrl.vi v9, v8, 1 ; RV32-NEXT: lui a0, 349525 ; RV32-NEXT: addi a0, a0, 1365 @@ -557,7 +557,7 @@ ; ; RV64-LABEL: ctpop_nxv2i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV64-NEXT: vsrl.vi v9, v8, 1 ; RV64-NEXT: lui a0, 349525 ; RV64-NEXT: addiw a0, a0, 1365 @@ -587,7 +587,7 @@ define @ctpop_nxv4i32( %va) { ; RV32-LABEL: ctpop_nxv4i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; RV32-NEXT: vsrl.vi v10, v8, 1 ; RV32-NEXT: lui a0, 349525 ; RV32-NEXT: addi a0, a0, 1365 @@ -612,7 +612,7 @@ ; ; RV64-LABEL: ctpop_nxv4i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; RV64-NEXT: vsrl.vi v10, v8, 1 ; RV64-NEXT: lui a0, 349525 ; RV64-NEXT: addiw a0, a0, 1365 @@ -642,7 +642,7 @@ define @ctpop_nxv8i32( %va) { ; RV32-LABEL: ctpop_nxv8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; RV32-NEXT: vsrl.vi v12, v8, 1 ; RV32-NEXT: lui a0, 349525 ; RV32-NEXT: addi a0, a0, 1365 @@ -667,7 +667,7 @@ ; ; RV64-LABEL: ctpop_nxv8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; RV64-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; RV64-NEXT: vsrl.vi v12, v8, 1 ; RV64-NEXT: lui a0, 349525 ; RV64-NEXT: addiw a0, a0, 1365 @@ -697,7 +697,7 @@ define @ctpop_nxv16i32( %va) { ; RV32-LABEL: ctpop_nxv16i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; RV32-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; RV32-NEXT: vsrl.vi v16, v8, 1 ; RV32-NEXT: lui a0, 349525 ; RV32-NEXT: addi a0, a0, 1365 @@ -722,7 +722,7 @@ ; ; RV64-LABEL: ctpop_nxv16i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; RV64-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; RV64-NEXT: vsrl.vi v16, v8, 1 ; RV64-NEXT: lui a0, 349525 ; RV64-NEXT: addiw a0, a0, 1365 @@ -770,7 +770,7 @@ ; RV32-NEXT: addi a0, a0, 257 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vlse64.v v10, (a0), zero @@ -794,7 +794,7 @@ ; ; RV64-LABEL: ctpop_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV64-NEXT: lui a0, %hi(.LCPI18_0) ; RV64-NEXT: ld a0, %lo(.LCPI18_0)(a0) ; RV64-NEXT: lui a1, %hi(.LCPI18_1) @@ -843,7 +843,7 @@ ; RV32-NEXT: addi a0, a0, 257 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vlse64.v v12, (a0), zero @@ -867,7 +867,7 @@ ; ; RV64-LABEL: ctpop_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; RV64-NEXT: lui a0, %hi(.LCPI19_0) ; RV64-NEXT: ld a0, %lo(.LCPI19_0)(a0) ; RV64-NEXT: lui a1, %hi(.LCPI19_1) @@ -916,7 +916,7 @@ ; RV32-NEXT: addi a0, a0, 257 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vlse64.v v16, (a0), zero @@ -940,7 +940,7 @@ ; ; RV64-LABEL: ctpop_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV64-NEXT: lui a0, %hi(.LCPI20_0) ; RV64-NEXT: ld a0, %lo(.LCPI20_0)(a0) ; RV64-NEXT: lui a1, %hi(.LCPI20_1) @@ -989,7 +989,7 @@ ; RV32-NEXT: addi a0, a0, 257 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vlse64.v v24, (a0), zero @@ -1013,7 +1013,7 @@ ; ; RV64-LABEL: ctpop_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; RV64-NEXT: lui a0, %hi(.LCPI21_0) ; RV64-NEXT: ld a0, %lo(.LCPI21_0)(a0) ; RV64-NEXT: lui a1, %hi(.LCPI21_1) diff --git a/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll @@ -8,7 +8,7 @@ ; CHECK-ZVE64X-LABEL: cttz_nxv1i8: ; CHECK-ZVE64X: # %bb.0: ; CHECK-ZVE64X-NEXT: li a0, 1 -; CHECK-ZVE64X-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-ZVE64X-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-ZVE64X-NEXT: vsub.vx v9, v8, a0 ; CHECK-ZVE64X-NEXT: vnot.v v8, v8 ; CHECK-ZVE64X-NEXT: vand.vv v8, v8, v9 @@ -28,15 +28,15 @@ ; ; CHECK-D-LABEL: cttz_nxv1i8: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-D-NEXT: vrsub.vi v9, v8, 0 ; CHECK-D-NEXT: vand.vv v9, v8, v9 -; CHECK-D-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-D-NEXT: vzext.vf4 v10, v9 ; CHECK-D-NEXT: vfcvt.f.xu.v v9, v10 -; CHECK-D-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-D-NEXT: vnsrl.wi v9, v9, 23 -; CHECK-D-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; CHECK-D-NEXT: vnsrl.wi v9, v9, 0 ; CHECK-D-NEXT: li a0, 127 ; CHECK-D-NEXT: vmseq.vi v0, v8, 0 @@ -52,7 +52,7 @@ ; CHECK-ZVE64X-LABEL: cttz_nxv2i8: ; CHECK-ZVE64X: # %bb.0: ; CHECK-ZVE64X-NEXT: li a0, 1 -; CHECK-ZVE64X-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-ZVE64X-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-ZVE64X-NEXT: vsub.vx v9, v8, a0 ; CHECK-ZVE64X-NEXT: vnot.v v8, v8 ; CHECK-ZVE64X-NEXT: vand.vv v8, v8, v9 @@ -72,15 +72,15 @@ ; ; CHECK-D-LABEL: cttz_nxv2i8: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-D-NEXT: vrsub.vi v9, v8, 0 ; CHECK-D-NEXT: vand.vv v9, v8, v9 -; CHECK-D-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-D-NEXT: vzext.vf4 v10, v9 ; CHECK-D-NEXT: vfcvt.f.xu.v v9, v10 -; CHECK-D-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-D-NEXT: vnsrl.wi v9, v9, 23 -; CHECK-D-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-D-NEXT: vnsrl.wi v9, v9, 0 ; CHECK-D-NEXT: li a0, 127 ; CHECK-D-NEXT: vmseq.vi v0, v8, 0 @@ -96,7 +96,7 @@ ; CHECK-ZVE64X-LABEL: cttz_nxv4i8: ; CHECK-ZVE64X: # %bb.0: ; CHECK-ZVE64X-NEXT: li a0, 1 -; CHECK-ZVE64X-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-ZVE64X-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-ZVE64X-NEXT: vsub.vx v9, v8, a0 ; CHECK-ZVE64X-NEXT: vnot.v v8, v8 ; CHECK-ZVE64X-NEXT: vand.vv v8, v8, v9 @@ -116,15 +116,15 @@ ; ; CHECK-D-LABEL: cttz_nxv4i8: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-D-NEXT: vrsub.vi v9, v8, 0 ; CHECK-D-NEXT: vand.vv v9, v8, v9 -; CHECK-D-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK-D-NEXT: vzext.vf4 v10, v9 ; CHECK-D-NEXT: vfcvt.f.xu.v v10, v10 -; CHECK-D-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-D-NEXT: vnsrl.wi v9, v10, 23 -; CHECK-D-NEXT: vsetvli zero, zero, e8, mf2, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; CHECK-D-NEXT: vnsrl.wi v9, v9, 0 ; CHECK-D-NEXT: li a0, 127 ; CHECK-D-NEXT: vmseq.vi v0, v8, 0 @@ -140,7 +140,7 @@ ; CHECK-ZVE64X-LABEL: cttz_nxv8i8: ; CHECK-ZVE64X: # %bb.0: ; CHECK-ZVE64X-NEXT: li a0, 1 -; CHECK-ZVE64X-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-ZVE64X-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-ZVE64X-NEXT: vsub.vx v9, v8, a0 ; CHECK-ZVE64X-NEXT: vnot.v v8, v8 ; CHECK-ZVE64X-NEXT: vand.vv v8, v8, v9 @@ -160,15 +160,15 @@ ; ; CHECK-D-LABEL: cttz_nxv8i8: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-D-NEXT: vrsub.vi v9, v8, 0 ; CHECK-D-NEXT: vand.vv v9, v8, v9 -; CHECK-D-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-D-NEXT: vzext.vf4 v12, v9 ; CHECK-D-NEXT: vfcvt.f.xu.v v12, v12 -; CHECK-D-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-D-NEXT: vnsrl.wi v10, v12, 23 -; CHECK-D-NEXT: vsetvli zero, zero, e8, m1, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e8, m1, ta, ma ; CHECK-D-NEXT: vnsrl.wi v9, v10, 0 ; CHECK-D-NEXT: li a0, 127 ; CHECK-D-NEXT: vmseq.vi v0, v8, 0 @@ -184,7 +184,7 @@ ; CHECK-ZVE64X-LABEL: cttz_nxv16i8: ; CHECK-ZVE64X: # %bb.0: ; CHECK-ZVE64X-NEXT: li a0, 1 -; CHECK-ZVE64X-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-ZVE64X-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-ZVE64X-NEXT: vsub.vx v10, v8, a0 ; CHECK-ZVE64X-NEXT: vnot.v v8, v8 ; CHECK-ZVE64X-NEXT: vand.vv v8, v8, v10 @@ -204,15 +204,15 @@ ; ; CHECK-D-LABEL: cttz_nxv16i8: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-D-NEXT: vrsub.vi v10, v8, 0 ; CHECK-D-NEXT: vand.vv v10, v8, v10 -; CHECK-D-NEXT: vsetvli zero, zero, e32, m8, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e32, m8, ta, ma ; CHECK-D-NEXT: vzext.vf4 v16, v10 ; CHECK-D-NEXT: vfcvt.f.xu.v v16, v16 -; CHECK-D-NEXT: vsetvli zero, zero, e16, m4, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e16, m4, ta, ma ; CHECK-D-NEXT: vnsrl.wi v12, v16, 23 -; CHECK-D-NEXT: vsetvli zero, zero, e8, m2, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e8, m2, ta, ma ; CHECK-D-NEXT: vnsrl.wi v10, v12, 0 ; CHECK-D-NEXT: li a0, 127 ; CHECK-D-NEXT: vmseq.vi v0, v8, 0 @@ -228,7 +228,7 @@ ; CHECK-LABEL: cttz_nxv32i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vsub.vx v12, v8, a0 ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: vand.vv v8, v8, v12 @@ -254,7 +254,7 @@ ; CHECK-LABEL: cttz_nxv64i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vsub.vx v16, v8, a0 ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: vand.vv v8, v8, v16 @@ -280,7 +280,7 @@ ; RV32I-LABEL: cttz_nxv1i16: ; RV32I: # %bb.0: ; RV32I-NEXT: li a0, 1 -; RV32I-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; RV32I-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; RV32I-NEXT: vsub.vx v9, v8, a0 ; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v9 @@ -308,7 +308,7 @@ ; RV64I-LABEL: cttz_nxv1i16: ; RV64I: # %bb.0: ; RV64I-NEXT: li a0, 1 -; RV64I-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; RV64I-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; RV64I-NEXT: vsub.vx v9, v8, a0 ; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v9 @@ -335,7 +335,7 @@ ; ; CHECK-D-LABEL: cttz_nxv1i16: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-D-NEXT: vrsub.vi v9, v8, 0 ; CHECK-D-NEXT: vand.vv v9, v8, v9 ; CHECK-D-NEXT: vfwcvt.f.xu.v v10, v9 @@ -355,7 +355,7 @@ ; RV32I-LABEL: cttz_nxv2i16: ; RV32I: # %bb.0: ; RV32I-NEXT: li a0, 1 -; RV32I-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; RV32I-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; RV32I-NEXT: vsub.vx v9, v8, a0 ; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v9 @@ -383,7 +383,7 @@ ; RV64I-LABEL: cttz_nxv2i16: ; RV64I: # %bb.0: ; RV64I-NEXT: li a0, 1 -; RV64I-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; RV64I-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; RV64I-NEXT: vsub.vx v9, v8, a0 ; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v9 @@ -410,7 +410,7 @@ ; ; CHECK-D-LABEL: cttz_nxv2i16: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-D-NEXT: vrsub.vi v9, v8, 0 ; CHECK-D-NEXT: vand.vv v9, v8, v9 ; CHECK-D-NEXT: vfwcvt.f.xu.v v10, v9 @@ -430,7 +430,7 @@ ; RV32I-LABEL: cttz_nxv4i16: ; RV32I: # %bb.0: ; RV32I-NEXT: li a0, 1 -; RV32I-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; RV32I-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; RV32I-NEXT: vsub.vx v9, v8, a0 ; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v9 @@ -458,7 +458,7 @@ ; RV64I-LABEL: cttz_nxv4i16: ; RV64I: # %bb.0: ; RV64I-NEXT: li a0, 1 -; RV64I-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; RV64I-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; RV64I-NEXT: vsub.vx v9, v8, a0 ; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v9 @@ -485,7 +485,7 @@ ; ; CHECK-D-LABEL: cttz_nxv4i16: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-D-NEXT: vrsub.vi v9, v8, 0 ; CHECK-D-NEXT: vand.vv v9, v8, v9 ; CHECK-D-NEXT: vfwcvt.f.xu.v v10, v9 @@ -505,7 +505,7 @@ ; RV32I-LABEL: cttz_nxv8i16: ; RV32I: # %bb.0: ; RV32I-NEXT: li a0, 1 -; RV32I-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; RV32I-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; RV32I-NEXT: vsub.vx v10, v8, a0 ; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v10 @@ -533,7 +533,7 @@ ; RV64I-LABEL: cttz_nxv8i16: ; RV64I: # %bb.0: ; RV64I-NEXT: li a0, 1 -; RV64I-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; RV64I-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; RV64I-NEXT: vsub.vx v10, v8, a0 ; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v10 @@ -560,7 +560,7 @@ ; ; CHECK-D-LABEL: cttz_nxv8i16: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-D-NEXT: vrsub.vi v10, v8, 0 ; CHECK-D-NEXT: vand.vv v10, v8, v10 ; CHECK-D-NEXT: vfwcvt.f.xu.v v12, v10 @@ -580,7 +580,7 @@ ; RV32I-LABEL: cttz_nxv16i16: ; RV32I: # %bb.0: ; RV32I-NEXT: li a0, 1 -; RV32I-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; RV32I-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; RV32I-NEXT: vsub.vx v12, v8, a0 ; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v12 @@ -608,7 +608,7 @@ ; RV64I-LABEL: cttz_nxv16i16: ; RV64I: # %bb.0: ; RV64I-NEXT: li a0, 1 -; RV64I-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; RV64I-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; RV64I-NEXT: vsub.vx v12, v8, a0 ; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v12 @@ -635,7 +635,7 @@ ; ; CHECK-D-LABEL: cttz_nxv16i16: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-D-NEXT: vrsub.vi v12, v8, 0 ; CHECK-D-NEXT: vand.vv v12, v8, v12 ; CHECK-D-NEXT: vfwcvt.f.xu.v v16, v12 @@ -655,7 +655,7 @@ ; RV32-LABEL: cttz_nxv32i16: ; RV32: # %bb.0: ; RV32-NEXT: li a0, 1 -; RV32-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; RV32-NEXT: vsub.vx v16, v8, a0 ; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: vand.vv v8, v8, v16 @@ -683,7 +683,7 @@ ; RV64-LABEL: cttz_nxv32i16: ; RV64: # %bb.0: ; RV64-NEXT: li a0, 1 -; RV64-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; RV64-NEXT: vsub.vx v16, v8, a0 ; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: vand.vv v8, v8, v16 @@ -716,7 +716,7 @@ ; RV32I-LABEL: cttz_nxv1i32: ; RV32I: # %bb.0: ; RV32I-NEXT: li a0, 1 -; RV32I-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; RV32I-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; RV32I-NEXT: vsub.vx v9, v8, a0 ; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v9 @@ -745,7 +745,7 @@ ; RV64I-LABEL: cttz_nxv1i32: ; RV64I: # %bb.0: ; RV64I-NEXT: li a0, 1 -; RV64I-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; RV64I-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; RV64I-NEXT: vsub.vx v9, v8, a0 ; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v9 @@ -773,14 +773,14 @@ ; ; CHECK-D-LABEL: cttz_nxv1i32: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-D-NEXT: vrsub.vi v9, v8, 0 ; CHECK-D-NEXT: vand.vv v9, v8, v9 ; CHECK-D-NEXT: vfwcvt.f.xu.v v10, v9 ; CHECK-D-NEXT: li a0, 52 -; CHECK-D-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; CHECK-D-NEXT: vsrl.vx v9, v10, a0 -; CHECK-D-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-D-NEXT: vnsrl.wi v9, v9, 0 ; CHECK-D-NEXT: li a0, 1023 ; CHECK-D-NEXT: vsub.vx v9, v9, a0 @@ -797,7 +797,7 @@ ; RV32I-LABEL: cttz_nxv2i32: ; RV32I: # %bb.0: ; RV32I-NEXT: li a0, 1 -; RV32I-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV32I-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; RV32I-NEXT: vsub.vx v9, v8, a0 ; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v9 @@ -826,7 +826,7 @@ ; RV64I-LABEL: cttz_nxv2i32: ; RV64I: # %bb.0: ; RV64I-NEXT: li a0, 1 -; RV64I-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV64I-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; RV64I-NEXT: vsub.vx v9, v8, a0 ; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v9 @@ -854,14 +854,14 @@ ; ; CHECK-D-LABEL: cttz_nxv2i32: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-D-NEXT: vrsub.vi v9, v8, 0 ; CHECK-D-NEXT: vand.vv v9, v8, v9 ; CHECK-D-NEXT: vfwcvt.f.xu.v v10, v9 ; CHECK-D-NEXT: li a0, 52 -; CHECK-D-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-D-NEXT: vsrl.vx v10, v10, a0 -; CHECK-D-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-D-NEXT: vnsrl.wi v9, v10, 0 ; CHECK-D-NEXT: li a0, 1023 ; CHECK-D-NEXT: vsub.vx v9, v9, a0 @@ -878,7 +878,7 @@ ; RV32I-LABEL: cttz_nxv4i32: ; RV32I: # %bb.0: ; RV32I-NEXT: li a0, 1 -; RV32I-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; RV32I-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; RV32I-NEXT: vsub.vx v10, v8, a0 ; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v10 @@ -907,7 +907,7 @@ ; RV64I-LABEL: cttz_nxv4i32: ; RV64I: # %bb.0: ; RV64I-NEXT: li a0, 1 -; RV64I-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; RV64I-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; RV64I-NEXT: vsub.vx v10, v8, a0 ; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v10 @@ -935,14 +935,14 @@ ; ; CHECK-D-LABEL: cttz_nxv4i32: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-D-NEXT: vrsub.vi v10, v8, 0 ; CHECK-D-NEXT: vand.vv v10, v8, v10 ; CHECK-D-NEXT: vfwcvt.f.xu.v v12, v10 ; CHECK-D-NEXT: li a0, 52 -; CHECK-D-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; CHECK-D-NEXT: vsrl.vx v12, v12, a0 -; CHECK-D-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK-D-NEXT: vnsrl.wi v10, v12, 0 ; CHECK-D-NEXT: li a0, 1023 ; CHECK-D-NEXT: vsub.vx v10, v10, a0 @@ -959,7 +959,7 @@ ; RV32I-LABEL: cttz_nxv8i32: ; RV32I: # %bb.0: ; RV32I-NEXT: li a0, 1 -; RV32I-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32I-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32I-NEXT: vsub.vx v12, v8, a0 ; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v12 @@ -988,7 +988,7 @@ ; RV64I-LABEL: cttz_nxv8i32: ; RV64I: # %bb.0: ; RV64I-NEXT: li a0, 1 -; RV64I-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV64I-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV64I-NEXT: vsub.vx v12, v8, a0 ; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v12 @@ -1016,14 +1016,14 @@ ; ; CHECK-D-LABEL: cttz_nxv8i32: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-D-NEXT: vrsub.vi v12, v8, 0 ; CHECK-D-NEXT: vand.vv v12, v8, v12 ; CHECK-D-NEXT: vfwcvt.f.xu.v v16, v12 ; CHECK-D-NEXT: li a0, 52 -; CHECK-D-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-D-NEXT: vsrl.vx v16, v16, a0 -; CHECK-D-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-D-NEXT: vnsrl.wi v12, v16, 0 ; CHECK-D-NEXT: li a0, 1023 ; CHECK-D-NEXT: vsub.vx v12, v12, a0 @@ -1040,7 +1040,7 @@ ; RV32-LABEL: cttz_nxv16i32: ; RV32: # %bb.0: ; RV32-NEXT: li a0, 1 -; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; RV32-NEXT: vsub.vx v16, v8, a0 ; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: vand.vv v8, v8, v16 @@ -1069,7 +1069,7 @@ ; RV64-LABEL: cttz_nxv16i32: ; RV64: # %bb.0: ; RV64-NEXT: li a0, 1 -; RV64-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; RV64-NEXT: vsub.vx v16, v8, a0 ; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: vand.vv v8, v8, v16 @@ -1121,7 +1121,7 @@ ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: li a0, 1 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vsub.vx v9, v8, a0 ; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: addi a0, sp, 8 @@ -1149,7 +1149,7 @@ ; RV64-LABEL: cttz_nxv1i64: ; RV64: # %bb.0: ; RV64-NEXT: li a0, 1 -; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV64-NEXT: vsub.vx v9, v8, a0 ; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: vand.vv v8, v8, v9 @@ -1202,7 +1202,7 @@ ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: li a0, 1 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vsub.vx v10, v8, a0 ; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: addi a0, sp, 8 @@ -1230,7 +1230,7 @@ ; RV64-LABEL: cttz_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: li a0, 1 -; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV64-NEXT: vsub.vx v10, v8, a0 ; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: vand.vv v8, v8, v10 @@ -1283,7 +1283,7 @@ ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: li a0, 1 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vsub.vx v12, v8, a0 ; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: addi a0, sp, 8 @@ -1311,7 +1311,7 @@ ; RV64-LABEL: cttz_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: li a0, 1 -; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV64-NEXT: vsub.vx v12, v8, a0 ; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: vand.vv v8, v8, v12 @@ -1364,7 +1364,7 @@ ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: li a0, 1 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vsub.vx v16, v8, a0 ; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: addi a0, sp, 8 @@ -1392,7 +1392,7 @@ ; RV64-LABEL: cttz_nxv8i64: ; RV64: # %bb.0: ; RV64-NEXT: li a0, 1 -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsub.vx v16, v8, a0 ; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: vand.vv v8, v8, v16 @@ -1427,7 +1427,7 @@ ; CHECK-ZVE64X-LABEL: cttz_zero_undef_nxv1i8: ; CHECK-ZVE64X: # %bb.0: ; CHECK-ZVE64X-NEXT: li a0, 1 -; CHECK-ZVE64X-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-ZVE64X-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-ZVE64X-NEXT: vsub.vx v9, v8, a0 ; CHECK-ZVE64X-NEXT: vnot.v v8, v8 ; CHECK-ZVE64X-NEXT: vand.vv v8, v8, v9 @@ -1447,15 +1447,15 @@ ; ; CHECK-D-LABEL: cttz_zero_undef_nxv1i8: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-D-NEXT: vrsub.vi v9, v8, 0 ; CHECK-D-NEXT: vand.vv v8, v8, v9 -; CHECK-D-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-D-NEXT: vzext.vf4 v9, v8 ; CHECK-D-NEXT: vfcvt.f.xu.v v8, v9 -; CHECK-D-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-D-NEXT: vnsrl.wi v8, v8, 23 -; CHECK-D-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; CHECK-D-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-D-NEXT: li a0, 127 ; CHECK-D-NEXT: vsub.vx v8, v8, a0 @@ -1468,7 +1468,7 @@ ; CHECK-ZVE64X-LABEL: cttz_zero_undef_nxv2i8: ; CHECK-ZVE64X: # %bb.0: ; CHECK-ZVE64X-NEXT: li a0, 1 -; CHECK-ZVE64X-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-ZVE64X-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-ZVE64X-NEXT: vsub.vx v9, v8, a0 ; CHECK-ZVE64X-NEXT: vnot.v v8, v8 ; CHECK-ZVE64X-NEXT: vand.vv v8, v8, v9 @@ -1488,15 +1488,15 @@ ; ; CHECK-D-LABEL: cttz_zero_undef_nxv2i8: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-D-NEXT: vrsub.vi v9, v8, 0 ; CHECK-D-NEXT: vand.vv v8, v8, v9 -; CHECK-D-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-D-NEXT: vzext.vf4 v9, v8 ; CHECK-D-NEXT: vfcvt.f.xu.v v8, v9 -; CHECK-D-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-D-NEXT: vnsrl.wi v8, v8, 23 -; CHECK-D-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-D-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-D-NEXT: li a0, 127 ; CHECK-D-NEXT: vsub.vx v8, v8, a0 @@ -1509,7 +1509,7 @@ ; CHECK-ZVE64X-LABEL: cttz_zero_undef_nxv4i8: ; CHECK-ZVE64X: # %bb.0: ; CHECK-ZVE64X-NEXT: li a0, 1 -; CHECK-ZVE64X-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-ZVE64X-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-ZVE64X-NEXT: vsub.vx v9, v8, a0 ; CHECK-ZVE64X-NEXT: vnot.v v8, v8 ; CHECK-ZVE64X-NEXT: vand.vv v8, v8, v9 @@ -1529,15 +1529,15 @@ ; ; CHECK-D-LABEL: cttz_zero_undef_nxv4i8: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-D-NEXT: vrsub.vi v9, v8, 0 ; CHECK-D-NEXT: vand.vv v8, v8, v9 -; CHECK-D-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK-D-NEXT: vzext.vf4 v10, v8 ; CHECK-D-NEXT: vfcvt.f.xu.v v8, v10 -; CHECK-D-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-D-NEXT: vnsrl.wi v10, v8, 23 -; CHECK-D-NEXT: vsetvli zero, zero, e8, mf2, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; CHECK-D-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-D-NEXT: li a0, 127 ; CHECK-D-NEXT: vsub.vx v8, v8, a0 @@ -1550,7 +1550,7 @@ ; CHECK-ZVE64X-LABEL: cttz_zero_undef_nxv8i8: ; CHECK-ZVE64X: # %bb.0: ; CHECK-ZVE64X-NEXT: li a0, 1 -; CHECK-ZVE64X-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-ZVE64X-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-ZVE64X-NEXT: vsub.vx v9, v8, a0 ; CHECK-ZVE64X-NEXT: vnot.v v8, v8 ; CHECK-ZVE64X-NEXT: vand.vv v8, v8, v9 @@ -1570,15 +1570,15 @@ ; ; CHECK-D-LABEL: cttz_zero_undef_nxv8i8: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-D-NEXT: vrsub.vi v9, v8, 0 ; CHECK-D-NEXT: vand.vv v8, v8, v9 -; CHECK-D-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-D-NEXT: vzext.vf4 v12, v8 ; CHECK-D-NEXT: vfcvt.f.xu.v v8, v12 -; CHECK-D-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-D-NEXT: vnsrl.wi v12, v8, 23 -; CHECK-D-NEXT: vsetvli zero, zero, e8, m1, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e8, m1, ta, ma ; CHECK-D-NEXT: vnsrl.wi v8, v12, 0 ; CHECK-D-NEXT: li a0, 127 ; CHECK-D-NEXT: vsub.vx v8, v8, a0 @@ -1591,7 +1591,7 @@ ; CHECK-ZVE64X-LABEL: cttz_zero_undef_nxv16i8: ; CHECK-ZVE64X: # %bb.0: ; CHECK-ZVE64X-NEXT: li a0, 1 -; CHECK-ZVE64X-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-ZVE64X-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-ZVE64X-NEXT: vsub.vx v10, v8, a0 ; CHECK-ZVE64X-NEXT: vnot.v v8, v8 ; CHECK-ZVE64X-NEXT: vand.vv v8, v8, v10 @@ -1611,15 +1611,15 @@ ; ; CHECK-D-LABEL: cttz_zero_undef_nxv16i8: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-D-NEXT: vrsub.vi v10, v8, 0 ; CHECK-D-NEXT: vand.vv v8, v8, v10 -; CHECK-D-NEXT: vsetvli zero, zero, e32, m8, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e32, m8, ta, ma ; CHECK-D-NEXT: vzext.vf4 v16, v8 ; CHECK-D-NEXT: vfcvt.f.xu.v v8, v16 -; CHECK-D-NEXT: vsetvli zero, zero, e16, m4, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e16, m4, ta, ma ; CHECK-D-NEXT: vnsrl.wi v16, v8, 23 -; CHECK-D-NEXT: vsetvli zero, zero, e8, m2, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e8, m2, ta, ma ; CHECK-D-NEXT: vnsrl.wi v8, v16, 0 ; CHECK-D-NEXT: li a0, 127 ; CHECK-D-NEXT: vsub.vx v8, v8, a0 @@ -1632,7 +1632,7 @@ ; CHECK-LABEL: cttz_zero_undef_nxv32i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vsub.vx v12, v8, a0 ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: vand.vv v8, v8, v12 @@ -1657,7 +1657,7 @@ ; CHECK-LABEL: cttz_zero_undef_nxv64i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vsub.vx v16, v8, a0 ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: vand.vv v8, v8, v16 @@ -1682,7 +1682,7 @@ ; RV32I-LABEL: cttz_zero_undef_nxv1i16: ; RV32I: # %bb.0: ; RV32I-NEXT: li a0, 1 -; RV32I-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; RV32I-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; RV32I-NEXT: vsub.vx v9, v8, a0 ; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v9 @@ -1710,7 +1710,7 @@ ; RV64I-LABEL: cttz_zero_undef_nxv1i16: ; RV64I: # %bb.0: ; RV64I-NEXT: li a0, 1 -; RV64I-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; RV64I-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; RV64I-NEXT: vsub.vx v9, v8, a0 ; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v9 @@ -1737,7 +1737,7 @@ ; ; CHECK-D-LABEL: cttz_zero_undef_nxv1i16: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-D-NEXT: vrsub.vi v9, v8, 0 ; CHECK-D-NEXT: vand.vv v8, v8, v9 ; CHECK-D-NEXT: vfwcvt.f.xu.v v9, v8 @@ -1753,7 +1753,7 @@ ; RV32I-LABEL: cttz_zero_undef_nxv2i16: ; RV32I: # %bb.0: ; RV32I-NEXT: li a0, 1 -; RV32I-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; RV32I-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; RV32I-NEXT: vsub.vx v9, v8, a0 ; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v9 @@ -1781,7 +1781,7 @@ ; RV64I-LABEL: cttz_zero_undef_nxv2i16: ; RV64I: # %bb.0: ; RV64I-NEXT: li a0, 1 -; RV64I-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; RV64I-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; RV64I-NEXT: vsub.vx v9, v8, a0 ; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v9 @@ -1808,7 +1808,7 @@ ; ; CHECK-D-LABEL: cttz_zero_undef_nxv2i16: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-D-NEXT: vrsub.vi v9, v8, 0 ; CHECK-D-NEXT: vand.vv v8, v8, v9 ; CHECK-D-NEXT: vfwcvt.f.xu.v v9, v8 @@ -1824,7 +1824,7 @@ ; RV32I-LABEL: cttz_zero_undef_nxv4i16: ; RV32I: # %bb.0: ; RV32I-NEXT: li a0, 1 -; RV32I-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; RV32I-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; RV32I-NEXT: vsub.vx v9, v8, a0 ; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v9 @@ -1852,7 +1852,7 @@ ; RV64I-LABEL: cttz_zero_undef_nxv4i16: ; RV64I: # %bb.0: ; RV64I-NEXT: li a0, 1 -; RV64I-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; RV64I-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; RV64I-NEXT: vsub.vx v9, v8, a0 ; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v9 @@ -1879,7 +1879,7 @@ ; ; CHECK-D-LABEL: cttz_zero_undef_nxv4i16: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-D-NEXT: vrsub.vi v9, v8, 0 ; CHECK-D-NEXT: vand.vv v8, v8, v9 ; CHECK-D-NEXT: vfwcvt.f.xu.v v10, v8 @@ -1895,7 +1895,7 @@ ; RV32I-LABEL: cttz_zero_undef_nxv8i16: ; RV32I: # %bb.0: ; RV32I-NEXT: li a0, 1 -; RV32I-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; RV32I-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; RV32I-NEXT: vsub.vx v10, v8, a0 ; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v10 @@ -1923,7 +1923,7 @@ ; RV64I-LABEL: cttz_zero_undef_nxv8i16: ; RV64I: # %bb.0: ; RV64I-NEXT: li a0, 1 -; RV64I-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; RV64I-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; RV64I-NEXT: vsub.vx v10, v8, a0 ; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v10 @@ -1950,7 +1950,7 @@ ; ; CHECK-D-LABEL: cttz_zero_undef_nxv8i16: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-D-NEXT: vrsub.vi v10, v8, 0 ; CHECK-D-NEXT: vand.vv v8, v8, v10 ; CHECK-D-NEXT: vfwcvt.f.xu.v v12, v8 @@ -1966,7 +1966,7 @@ ; RV32I-LABEL: cttz_zero_undef_nxv16i16: ; RV32I: # %bb.0: ; RV32I-NEXT: li a0, 1 -; RV32I-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; RV32I-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; RV32I-NEXT: vsub.vx v12, v8, a0 ; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v12 @@ -1994,7 +1994,7 @@ ; RV64I-LABEL: cttz_zero_undef_nxv16i16: ; RV64I: # %bb.0: ; RV64I-NEXT: li a0, 1 -; RV64I-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; RV64I-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; RV64I-NEXT: vsub.vx v12, v8, a0 ; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v12 @@ -2021,7 +2021,7 @@ ; ; CHECK-D-LABEL: cttz_zero_undef_nxv16i16: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-D-NEXT: vrsub.vi v12, v8, 0 ; CHECK-D-NEXT: vand.vv v8, v8, v12 ; CHECK-D-NEXT: vfwcvt.f.xu.v v16, v8 @@ -2037,7 +2037,7 @@ ; RV32-LABEL: cttz_zero_undef_nxv32i16: ; RV32: # %bb.0: ; RV32-NEXT: li a0, 1 -; RV32-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; RV32-NEXT: vsub.vx v16, v8, a0 ; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: vand.vv v8, v8, v16 @@ -2065,7 +2065,7 @@ ; RV64-LABEL: cttz_zero_undef_nxv32i16: ; RV64: # %bb.0: ; RV64-NEXT: li a0, 1 -; RV64-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; RV64-NEXT: vsub.vx v16, v8, a0 ; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: vand.vv v8, v8, v16 @@ -2097,7 +2097,7 @@ ; RV32I-LABEL: cttz_zero_undef_nxv1i32: ; RV32I: # %bb.0: ; RV32I-NEXT: li a0, 1 -; RV32I-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; RV32I-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; RV32I-NEXT: vsub.vx v9, v8, a0 ; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v9 @@ -2126,7 +2126,7 @@ ; RV64I-LABEL: cttz_zero_undef_nxv1i32: ; RV64I: # %bb.0: ; RV64I-NEXT: li a0, 1 -; RV64I-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; RV64I-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; RV64I-NEXT: vsub.vx v9, v8, a0 ; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v9 @@ -2154,14 +2154,14 @@ ; ; CHECK-D-LABEL: cttz_zero_undef_nxv1i32: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-D-NEXT: vrsub.vi v9, v8, 0 ; CHECK-D-NEXT: vand.vv v8, v8, v9 ; CHECK-D-NEXT: vfwcvt.f.xu.v v9, v8 ; CHECK-D-NEXT: li a0, 52 -; CHECK-D-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; CHECK-D-NEXT: vsrl.vx v8, v9, a0 -; CHECK-D-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-D-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-D-NEXT: li a0, 1023 ; CHECK-D-NEXT: vsub.vx v8, v8, a0 @@ -2174,7 +2174,7 @@ ; RV32I-LABEL: cttz_zero_undef_nxv2i32: ; RV32I: # %bb.0: ; RV32I-NEXT: li a0, 1 -; RV32I-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV32I-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; RV32I-NEXT: vsub.vx v9, v8, a0 ; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v9 @@ -2203,7 +2203,7 @@ ; RV64I-LABEL: cttz_zero_undef_nxv2i32: ; RV64I: # %bb.0: ; RV64I-NEXT: li a0, 1 -; RV64I-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV64I-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; RV64I-NEXT: vsub.vx v9, v8, a0 ; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v9 @@ -2231,14 +2231,14 @@ ; ; CHECK-D-LABEL: cttz_zero_undef_nxv2i32: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-D-NEXT: vrsub.vi v9, v8, 0 ; CHECK-D-NEXT: vand.vv v8, v8, v9 ; CHECK-D-NEXT: vfwcvt.f.xu.v v10, v8 ; CHECK-D-NEXT: li a0, 52 -; CHECK-D-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-D-NEXT: vsrl.vx v8, v10, a0 -; CHECK-D-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-D-NEXT: vnsrl.wi v10, v8, 0 ; CHECK-D-NEXT: li a0, 1023 ; CHECK-D-NEXT: vsub.vx v8, v10, a0 @@ -2251,7 +2251,7 @@ ; RV32I-LABEL: cttz_zero_undef_nxv4i32: ; RV32I: # %bb.0: ; RV32I-NEXT: li a0, 1 -; RV32I-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; RV32I-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; RV32I-NEXT: vsub.vx v10, v8, a0 ; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v10 @@ -2280,7 +2280,7 @@ ; RV64I-LABEL: cttz_zero_undef_nxv4i32: ; RV64I: # %bb.0: ; RV64I-NEXT: li a0, 1 -; RV64I-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; RV64I-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; RV64I-NEXT: vsub.vx v10, v8, a0 ; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v10 @@ -2308,14 +2308,14 @@ ; ; CHECK-D-LABEL: cttz_zero_undef_nxv4i32: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-D-NEXT: vrsub.vi v10, v8, 0 ; CHECK-D-NEXT: vand.vv v8, v8, v10 ; CHECK-D-NEXT: vfwcvt.f.xu.v v12, v8 ; CHECK-D-NEXT: li a0, 52 -; CHECK-D-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; CHECK-D-NEXT: vsrl.vx v8, v12, a0 -; CHECK-D-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK-D-NEXT: vnsrl.wi v12, v8, 0 ; CHECK-D-NEXT: li a0, 1023 ; CHECK-D-NEXT: vsub.vx v8, v12, a0 @@ -2328,7 +2328,7 @@ ; RV32I-LABEL: cttz_zero_undef_nxv8i32: ; RV32I: # %bb.0: ; RV32I-NEXT: li a0, 1 -; RV32I-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32I-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32I-NEXT: vsub.vx v12, v8, a0 ; RV32I-NEXT: vnot.v v8, v8 ; RV32I-NEXT: vand.vv v8, v8, v12 @@ -2357,7 +2357,7 @@ ; RV64I-LABEL: cttz_zero_undef_nxv8i32: ; RV64I: # %bb.0: ; RV64I-NEXT: li a0, 1 -; RV64I-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV64I-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV64I-NEXT: vsub.vx v12, v8, a0 ; RV64I-NEXT: vnot.v v8, v8 ; RV64I-NEXT: vand.vv v8, v8, v12 @@ -2385,14 +2385,14 @@ ; ; CHECK-D-LABEL: cttz_zero_undef_nxv8i32: ; CHECK-D: # %bb.0: -; CHECK-D-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-D-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-D-NEXT: vrsub.vi v12, v8, 0 ; CHECK-D-NEXT: vand.vv v8, v8, v12 ; CHECK-D-NEXT: vfwcvt.f.xu.v v16, v8 ; CHECK-D-NEXT: li a0, 52 -; CHECK-D-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-D-NEXT: vsrl.vx v8, v16, a0 -; CHECK-D-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-D-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-D-NEXT: vnsrl.wi v16, v8, 0 ; CHECK-D-NEXT: li a0, 1023 ; CHECK-D-NEXT: vsub.vx v8, v16, a0 @@ -2405,7 +2405,7 @@ ; RV32-LABEL: cttz_zero_undef_nxv16i32: ; RV32: # %bb.0: ; RV32-NEXT: li a0, 1 -; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; RV32-NEXT: vsub.vx v16, v8, a0 ; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: vand.vv v8, v8, v16 @@ -2434,7 +2434,7 @@ ; RV64-LABEL: cttz_zero_undef_nxv16i32: ; RV64: # %bb.0: ; RV64-NEXT: li a0, 1 -; RV64-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; RV64-NEXT: vsub.vx v16, v8, a0 ; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: vand.vv v8, v8, v16 @@ -2485,7 +2485,7 @@ ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: li a0, 1 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vsub.vx v9, v8, a0 ; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: addi a0, sp, 8 @@ -2513,7 +2513,7 @@ ; RV64-LABEL: cttz_zero_undef_nxv1i64: ; RV64: # %bb.0: ; RV64-NEXT: li a0, 1 -; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV64-NEXT: vsub.vx v9, v8, a0 ; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: vand.vv v8, v8, v9 @@ -2565,7 +2565,7 @@ ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: li a0, 1 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vsub.vx v10, v8, a0 ; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: addi a0, sp, 8 @@ -2593,7 +2593,7 @@ ; RV64-LABEL: cttz_zero_undef_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: li a0, 1 -; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV64-NEXT: vsub.vx v10, v8, a0 ; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: vand.vv v8, v8, v10 @@ -2645,7 +2645,7 @@ ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: li a0, 1 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vsub.vx v12, v8, a0 ; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: addi a0, sp, 8 @@ -2673,7 +2673,7 @@ ; RV64-LABEL: cttz_zero_undef_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: li a0, 1 -; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV64-NEXT: vsub.vx v12, v8, a0 ; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: vand.vv v8, v8, v12 @@ -2725,7 +2725,7 @@ ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: li a0, 1 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vsub.vx v16, v8, a0 ; RV32-NEXT: vnot.v v8, v8 ; RV32-NEXT: addi a0, sp, 8 @@ -2753,7 +2753,7 @@ ; RV64-LABEL: cttz_zero_undef_nxv8i64: ; RV64: # %bb.0: ; RV64-NEXT: li a0, 1 -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsub.vx v16, v8, a0 ; RV64-NEXT: vnot.v v8, v8 ; RV64-NEXT: vand.vv v8, v8, v16 diff --git a/llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll b/llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll --- a/llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll @@ -5,7 +5,7 @@ define @sextload_nxv1i1_nxv1i8(* %x) { ; CHECK-LABEL: sextload_nxv1i1_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 @@ -18,7 +18,7 @@ define @sextload_nxv1i8_nxv1i16(* %x) { ; CHECK-LABEL: sextload_nxv1i8_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vsext.vf2 v8, v9 ; CHECK-NEXT: ret @@ -30,7 +30,7 @@ define @zextload_nxv1i8_nxv1i16(* %x) { ; CHECK-LABEL: zextload_nxv1i8_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vzext.vf2 v8, v9 ; CHECK-NEXT: ret @@ -42,7 +42,7 @@ define @sextload_nxv1i8_nxv1i32(* %x) { ; CHECK-LABEL: sextload_nxv1i8_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vsext.vf4 v8, v9 ; CHECK-NEXT: ret @@ -54,7 +54,7 @@ define @zextload_nxv1i8_nxv1i32(* %x) { ; CHECK-LABEL: zextload_nxv1i8_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vzext.vf4 v8, v9 ; CHECK-NEXT: ret @@ -66,7 +66,7 @@ define @sextload_nxv1i8_nxv1i64(* %x) { ; CHECK-LABEL: sextload_nxv1i8_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vsext.vf8 v8, v9 ; CHECK-NEXT: ret @@ -78,7 +78,7 @@ define @zextload_nxv1i8_nxv1i64(* %x) { ; CHECK-LABEL: zextload_nxv1i8_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vzext.vf8 v8, v9 ; CHECK-NEXT: ret @@ -90,7 +90,7 @@ define @sextload_nxv2i8_nxv2i16(* %x) { ; CHECK-LABEL: sextload_nxv2i8_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vsext.vf2 v8, v9 ; CHECK-NEXT: ret @@ -102,7 +102,7 @@ define @zextload_nxv2i8_nxv2i16(* %x) { ; CHECK-LABEL: zextload_nxv2i8_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vzext.vf2 v8, v9 ; CHECK-NEXT: ret @@ -114,7 +114,7 @@ define @sextload_nxv2i8_nxv2i32(* %x) { ; CHECK-LABEL: sextload_nxv2i8_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vsext.vf4 v8, v9 ; CHECK-NEXT: ret @@ -126,7 +126,7 @@ define @zextload_nxv2i8_nxv2i32(* %x) { ; CHECK-LABEL: zextload_nxv2i8_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vzext.vf4 v8, v9 ; CHECK-NEXT: ret @@ -138,7 +138,7 @@ define @sextload_nxv2i8_nxv2i64(* %x) { ; CHECK-LABEL: sextload_nxv2i8_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; CHECK-NEXT: vle8.v v10, (a0) ; CHECK-NEXT: vsext.vf8 v8, v10 ; CHECK-NEXT: ret @@ -150,7 +150,7 @@ define @zextload_nxv2i8_nxv2i64(* %x) { ; CHECK-LABEL: zextload_nxv2i8_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; CHECK-NEXT: vle8.v v10, (a0) ; CHECK-NEXT: vzext.vf8 v8, v10 ; CHECK-NEXT: ret @@ -162,7 +162,7 @@ define @sextload_nxv4i8_nxv4i16(* %x) { ; CHECK-LABEL: sextload_nxv4i8_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vsext.vf2 v8, v9 ; CHECK-NEXT: ret @@ -174,7 +174,7 @@ define @zextload_nxv4i8_nxv4i16(* %x) { ; CHECK-LABEL: zextload_nxv4i8_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vzext.vf2 v8, v9 ; CHECK-NEXT: ret @@ -186,7 +186,7 @@ define @sextload_nxv4i8_nxv4i32(* %x) { ; CHECK-LABEL: sextload_nxv4i8_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vle8.v v10, (a0) ; CHECK-NEXT: vsext.vf4 v8, v10 ; CHECK-NEXT: ret @@ -198,7 +198,7 @@ define @zextload_nxv4i8_nxv4i32(* %x) { ; CHECK-LABEL: zextload_nxv4i8_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vle8.v v10, (a0) ; CHECK-NEXT: vzext.vf4 v8, v10 ; CHECK-NEXT: ret @@ -210,7 +210,7 @@ define @sextload_nxv4i8_nxv4i64(* %x) { ; CHECK-LABEL: sextload_nxv4i8_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; CHECK-NEXT: vle8.v v12, (a0) ; CHECK-NEXT: vsext.vf8 v8, v12 ; CHECK-NEXT: ret @@ -222,7 +222,7 @@ define @zextload_nxv4i8_nxv4i64(* %x) { ; CHECK-LABEL: zextload_nxv4i8_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; CHECK-NEXT: vle8.v v12, (a0) ; CHECK-NEXT: vzext.vf8 v8, v12 ; CHECK-NEXT: ret @@ -235,7 +235,7 @@ ; CHECK-LABEL: sextload_nxv8i8_nxv8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vl1r.v v10, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vsext.vf2 v8, v10 ; CHECK-NEXT: ret %y = load , * %x @@ -247,7 +247,7 @@ ; CHECK-LABEL: zextload_nxv8i8_nxv8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vl1r.v v10, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vzext.vf2 v8, v10 ; CHECK-NEXT: ret %y = load , * %x @@ -259,7 +259,7 @@ ; CHECK-LABEL: sextload_nxv8i8_nxv8i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vl1r.v v12, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vsext.vf4 v8, v12 ; CHECK-NEXT: ret %y = load , * %x @@ -271,7 +271,7 @@ ; CHECK-LABEL: zextload_nxv8i8_nxv8i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vl1r.v v12, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vzext.vf4 v8, v12 ; CHECK-NEXT: ret %y = load , * %x @@ -283,7 +283,7 @@ ; CHECK-LABEL: sextload_nxv8i8_nxv8i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vl1r.v v16, (a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vsext.vf8 v8, v16 ; CHECK-NEXT: ret %y = load , * %x @@ -295,7 +295,7 @@ ; CHECK-LABEL: zextload_nxv8i8_nxv8i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vl1r.v v16, (a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vzext.vf8 v8, v16 ; CHECK-NEXT: ret %y = load , * %x @@ -307,7 +307,7 @@ ; CHECK-LABEL: sextload_nxv16i8_nxv16i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vl2r.v v12, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vsext.vf2 v8, v12 ; CHECK-NEXT: ret %y = load , * %x @@ -319,7 +319,7 @@ ; CHECK-LABEL: zextload_nxv16i8_nxv16i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vl2r.v v12, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vzext.vf2 v8, v12 ; CHECK-NEXT: ret %y = load , * %x @@ -331,7 +331,7 @@ ; CHECK-LABEL: sextload_nxv16i8_nxv16i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vl2r.v v16, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vsext.vf4 v8, v16 ; CHECK-NEXT: ret %y = load , * %x @@ -343,7 +343,7 @@ ; CHECK-LABEL: zextload_nxv16i8_nxv16i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vl2r.v v16, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vzext.vf4 v8, v16 ; CHECK-NEXT: ret %y = load , * %x @@ -355,7 +355,7 @@ ; CHECK-LABEL: sextload_nxv32i8_nxv32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vl4r.v v16, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vsext.vf2 v8, v16 ; CHECK-NEXT: ret %y = load , * %x @@ -367,7 +367,7 @@ ; CHECK-LABEL: zextload_nxv32i8_nxv32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vl4r.v v16, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vzext.vf2 v8, v16 ; CHECK-NEXT: ret %y = load , * %x @@ -378,7 +378,7 @@ define void @truncstore_nxv1i8_nxv1i1( %x, *%z) { ; CHECK-LABEL: truncstore_nxv1i8_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v8, v8, 0 ; CHECK-NEXT: vsm.v v8, (a0) @@ -391,7 +391,7 @@ define void @truncstore_nxv1i16_nxv1i8( %x, * %z) { ; CHECK-LABEL: truncstore_nxv1i16_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret @@ -403,7 +403,7 @@ define @sextload_nxv1i16_nxv1i32(* %x) { ; CHECK-LABEL: sextload_nxv1i16_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vsext.vf2 v8, v9 ; CHECK-NEXT: ret @@ -415,7 +415,7 @@ define @zextload_nxv1i16_nxv1i32(* %x) { ; CHECK-LABEL: zextload_nxv1i16_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vzext.vf2 v8, v9 ; CHECK-NEXT: ret @@ -427,7 +427,7 @@ define @sextload_nxv1i16_nxv1i64(* %x) { ; CHECK-LABEL: sextload_nxv1i16_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vsext.vf4 v8, v9 ; CHECK-NEXT: ret @@ -439,7 +439,7 @@ define @zextload_nxv1i16_nxv1i64(* %x) { ; CHECK-LABEL: zextload_nxv1i16_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vzext.vf4 v8, v9 ; CHECK-NEXT: ret @@ -451,7 +451,7 @@ define void @truncstore_nxv2i16_nxv2i8( %x, * %z) { ; CHECK-LABEL: truncstore_nxv2i16_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret @@ -463,7 +463,7 @@ define @sextload_nxv2i16_nxv2i32(* %x) { ; CHECK-LABEL: sextload_nxv2i16_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vsext.vf2 v8, v9 ; CHECK-NEXT: ret @@ -475,7 +475,7 @@ define @zextload_nxv2i16_nxv2i32(* %x) { ; CHECK-LABEL: zextload_nxv2i16_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vzext.vf2 v8, v9 ; CHECK-NEXT: ret @@ -487,7 +487,7 @@ define @sextload_nxv2i16_nxv2i64(* %x) { ; CHECK-LABEL: sextload_nxv2i16_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; CHECK-NEXT: vle16.v v10, (a0) ; CHECK-NEXT: vsext.vf4 v8, v10 ; CHECK-NEXT: ret @@ -499,7 +499,7 @@ define @zextload_nxv2i16_nxv2i64(* %x) { ; CHECK-LABEL: zextload_nxv2i16_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; CHECK-NEXT: vle16.v v10, (a0) ; CHECK-NEXT: vzext.vf4 v8, v10 ; CHECK-NEXT: ret @@ -511,7 +511,7 @@ define void @truncstore_nxv4i16_nxv4i8( %x, * %z) { ; CHECK-LABEL: truncstore_nxv4i16_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret @@ -524,7 +524,7 @@ ; CHECK-LABEL: sextload_nxv4i16_nxv4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vl1re16.v v10, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vsext.vf2 v8, v10 ; CHECK-NEXT: ret %y = load , * %x @@ -536,7 +536,7 @@ ; CHECK-LABEL: zextload_nxv4i16_nxv4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vl1re16.v v10, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vzext.vf2 v8, v10 ; CHECK-NEXT: ret %y = load , * %x @@ -548,7 +548,7 @@ ; CHECK-LABEL: sextload_nxv4i16_nxv4i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vl1re16.v v12, (a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vsext.vf4 v8, v12 ; CHECK-NEXT: ret %y = load , * %x @@ -560,7 +560,7 @@ ; CHECK-LABEL: zextload_nxv4i16_nxv4i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vl1re16.v v12, (a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vzext.vf4 v8, v12 ; CHECK-NEXT: ret %y = load , * %x @@ -571,7 +571,7 @@ define void @truncstore_nxv8i16_nxv8i8( %x, * %z) { ; CHECK-LABEL: truncstore_nxv8i16_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vnsrl.wi v10, v8, 0 ; CHECK-NEXT: vs1r.v v10, (a0) ; CHECK-NEXT: ret @@ -584,7 +584,7 @@ ; CHECK-LABEL: sextload_nxv8i16_nxv8i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vl2re16.v v12, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vsext.vf2 v8, v12 ; CHECK-NEXT: ret %y = load , * %x @@ -596,7 +596,7 @@ ; CHECK-LABEL: zextload_nxv8i16_nxv8i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vl2re16.v v12, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vzext.vf2 v8, v12 ; CHECK-NEXT: ret %y = load , * %x @@ -608,7 +608,7 @@ ; CHECK-LABEL: sextload_nxv8i16_nxv8i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vl2re16.v v16, (a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vsext.vf4 v8, v16 ; CHECK-NEXT: ret %y = load , * %x @@ -620,7 +620,7 @@ ; CHECK-LABEL: zextload_nxv8i16_nxv8i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vl2re16.v v16, (a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vzext.vf4 v8, v16 ; CHECK-NEXT: ret %y = load , * %x @@ -631,7 +631,7 @@ define void @truncstore_nxv16i16_nxv16i8( %x, * %z) { ; CHECK-LABEL: truncstore_nxv16i16_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vnsrl.wi v12, v8, 0 ; CHECK-NEXT: vs2r.v v12, (a0) ; CHECK-NEXT: ret @@ -644,7 +644,7 @@ ; CHECK-LABEL: sextload_nxv16i16_nxv16i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vl4re16.v v16, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vsext.vf2 v8, v16 ; CHECK-NEXT: ret %y = load , * %x @@ -656,7 +656,7 @@ ; CHECK-LABEL: zextload_nxv16i16_nxv16i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vl4re16.v v16, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vzext.vf2 v8, v16 ; CHECK-NEXT: ret %y = load , * %x @@ -667,7 +667,7 @@ define void @truncstore_nxv32i16_nxv32i8( %x, * %z) { ; CHECK-LABEL: truncstore_nxv32i16_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vnsrl.wi v16, v8, 0 ; CHECK-NEXT: vs4r.v v16, (a0) ; CHECK-NEXT: ret @@ -679,9 +679,9 @@ define void @truncstore_nxv1i32_nxv1i8( %x, * %z) { ; CHECK-LABEL: truncstore_nxv1i32_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret @@ -693,7 +693,7 @@ define void @truncstore_nxv1i32_nxv1i16( %x, * %z) { ; CHECK-LABEL: truncstore_nxv1i32_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret @@ -705,7 +705,7 @@ define @sextload_nxv1i32_nxv1i64(* %x) { ; CHECK-LABEL: sextload_nxv1i32_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vsext.vf2 v8, v9 ; CHECK-NEXT: ret @@ -717,7 +717,7 @@ define @zextload_nxv1i32_nxv1i64(* %x) { ; CHECK-LABEL: zextload_nxv1i32_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vzext.vf2 v8, v9 ; CHECK-NEXT: ret @@ -729,9 +729,9 @@ define void @truncstore_nxv2i32_nxv2i8( %x, * %z) { ; CHECK-LABEL: truncstore_nxv2i32_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret @@ -743,7 +743,7 @@ define void @truncstore_nxv2i32_nxv2i16( %x, * %z) { ; CHECK-LABEL: truncstore_nxv2i32_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret @@ -756,7 +756,7 @@ ; CHECK-LABEL: sextload_nxv2i32_nxv2i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vl1re32.v v10, (a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vsext.vf2 v8, v10 ; CHECK-NEXT: ret %y = load , * %x @@ -768,7 +768,7 @@ ; CHECK-LABEL: zextload_nxv2i32_nxv2i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vl1re32.v v10, (a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vzext.vf2 v8, v10 ; CHECK-NEXT: ret %y = load , * %x @@ -779,9 +779,9 @@ define void @truncstore_nxv4i32_nxv4i8( %x, * %z) { ; CHECK-LABEL: truncstore_nxv4i32_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vnsrl.wi v10, v8, 0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret @@ -793,7 +793,7 @@ define void @truncstore_nxv4i32_nxv4i16( %x, * %z) { ; CHECK-LABEL: truncstore_nxv4i32_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vnsrl.wi v10, v8, 0 ; CHECK-NEXT: vs1r.v v10, (a0) ; CHECK-NEXT: ret @@ -806,7 +806,7 @@ ; CHECK-LABEL: sextload_nxv4i32_nxv4i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vl2re32.v v12, (a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vsext.vf2 v8, v12 ; CHECK-NEXT: ret %y = load , * %x @@ -818,7 +818,7 @@ ; CHECK-LABEL: zextload_nxv4i32_nxv4i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vl2re32.v v12, (a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vzext.vf2 v8, v12 ; CHECK-NEXT: ret %y = load , * %x @@ -829,9 +829,9 @@ define void @truncstore_nxv8i32_nxv8i8( %x, * %z) { ; CHECK-LABEL: truncstore_nxv8i32_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vnsrl.wi v12, v8, 0 -; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v12, 0 ; CHECK-NEXT: vs1r.v v8, (a0) ; CHECK-NEXT: ret @@ -843,7 +843,7 @@ define void @truncstore_nxv8i32_nxv8i16( %x, * %z) { ; CHECK-LABEL: truncstore_nxv8i32_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vnsrl.wi v12, v8, 0 ; CHECK-NEXT: vs2r.v v12, (a0) ; CHECK-NEXT: ret @@ -856,7 +856,7 @@ ; CHECK-LABEL: sextload_nxv8i32_nxv8i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vl4re32.v v16, (a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vsext.vf2 v8, v16 ; CHECK-NEXT: ret %y = load , * %x @@ -868,7 +868,7 @@ ; CHECK-LABEL: zextload_nxv8i32_nxv8i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vl4re32.v v16, (a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vzext.vf2 v8, v16 ; CHECK-NEXT: ret %y = load , * %x @@ -879,9 +879,9 @@ define void @truncstore_nxv16i32_nxv16i8( %x, * %z) { ; CHECK-LABEL: truncstore_nxv16i32_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vnsrl.wi v16, v8, 0 -; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v16, 0 ; CHECK-NEXT: vs2r.v v8, (a0) ; CHECK-NEXT: ret @@ -893,7 +893,7 @@ define void @truncstore_nxv16i32_nxv16i16( %x, * %z) { ; CHECK-LABEL: truncstore_nxv16i32_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vnsrl.wi v16, v8, 0 ; CHECK-NEXT: vs4r.v v16, (a0) ; CHECK-NEXT: ret @@ -905,11 +905,11 @@ define void @truncstore_nxv1i64_nxv1i8( %x, * %z) { ; CHECK-LABEL: truncstore_nxv1i64_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret @@ -921,9 +921,9 @@ define void @truncstore_nxv1i64_nxv1i16( %x, * %z) { ; CHECK-LABEL: truncstore_nxv1i64_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret @@ -935,7 +935,7 @@ define void @truncstore_nxv1i64_nxv1i32( %x, * %z) { ; CHECK-LABEL: truncstore_nxv1i64_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret @@ -947,11 +947,11 @@ define void @truncstore_nxv2i64_nxv2i8( %x, * %z) { ; CHECK-LABEL: truncstore_nxv2i64_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vnsrl.wi v10, v8, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret @@ -963,9 +963,9 @@ define void @truncstore_nxv2i64_nxv2i16( %x, * %z) { ; CHECK-LABEL: truncstore_nxv2i64_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vnsrl.wi v10, v8, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret @@ -977,7 +977,7 @@ define void @truncstore_nxv2i64_nxv2i32( %x, * %z) { ; CHECK-LABEL: truncstore_nxv2i64_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vnsrl.wi v10, v8, 0 ; CHECK-NEXT: vs1r.v v10, (a0) ; CHECK-NEXT: ret @@ -989,11 +989,11 @@ define void @truncstore_nxv4i64_nxv4i8( %x, * %z) { ; CHECK-LABEL: truncstore_nxv4i64_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vnsrl.wi v12, v8, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v12, 0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret @@ -1005,9 +1005,9 @@ define void @truncstore_nxv4i64_nxv4i16( %x, * %z) { ; CHECK-LABEL: truncstore_nxv4i64_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vnsrl.wi v12, v8, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v12, 0 ; CHECK-NEXT: vs1r.v v8, (a0) ; CHECK-NEXT: ret @@ -1019,7 +1019,7 @@ define void @truncstore_nxv4i64_nxv4i32( %x, * %z) { ; CHECK-LABEL: truncstore_nxv4i64_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vnsrl.wi v12, v8, 0 ; CHECK-NEXT: vs2r.v v12, (a0) ; CHECK-NEXT: ret @@ -1031,11 +1031,11 @@ define void @truncstore_nxv8i64_nxv8i8( %x, * %z) { ; CHECK-LABEL: truncstore_nxv8i64_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vnsrl.wi v16, v8, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v16, 0 -; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma ; CHECK-NEXT: vnsrl.wi v10, v8, 0 ; CHECK-NEXT: vs1r.v v10, (a0) ; CHECK-NEXT: ret @@ -1047,9 +1047,9 @@ define void @truncstore_nxv8i64_nxv8i16( %x, * %z) { ; CHECK-LABEL: truncstore_nxv8i64_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vnsrl.wi v16, v8, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v16, 0 ; CHECK-NEXT: vs2r.v v8, (a0) ; CHECK-NEXT: ret @@ -1061,7 +1061,7 @@ define void @truncstore_nxv8i64_nxv8i32( %x, * %z) { ; CHECK-LABEL: truncstore_nxv8i64_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vnsrl.wi v16, v8, 0 ; CHECK-NEXT: vs4r.v v16, (a0) ; CHECK-NEXT: ret @@ -1073,7 +1073,7 @@ define @extload_nxv1f16_nxv1f32(* %x) { ; CHECK-LABEL: extload_nxv1f16_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vfwcvt.f.f.v v8, v9 ; CHECK-NEXT: ret @@ -1085,10 +1085,10 @@ define @extload_nxv1f16_nxv1f64(* %x) { ; CHECK-LABEL: extload_nxv1f16_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfwcvt.f.f.v v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v8, v9 ; CHECK-NEXT: ret %y = load , * %x @@ -1099,7 +1099,7 @@ define @extload_nxv2f16_nxv2f32(* %x) { ; CHECK-LABEL: extload_nxv2f16_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vfwcvt.f.f.v v8, v9 ; CHECK-NEXT: ret @@ -1111,10 +1111,10 @@ define @extload_nxv2f16_nxv2f64(* %x) { ; CHECK-LABEL: extload_nxv2f16_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfwcvt.f.f.v v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v8, v10 ; CHECK-NEXT: ret %y = load , * %x @@ -1126,7 +1126,7 @@ ; CHECK-LABEL: extload_nxv4f16_nxv4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vl1re16.v v10, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v8, v10 ; CHECK-NEXT: ret %y = load , * %x @@ -1138,9 +1138,9 @@ ; CHECK-LABEL: extload_nxv4f16_nxv4f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vl1re16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v8, v12 ; CHECK-NEXT: ret %y = load , * %x @@ -1152,7 +1152,7 @@ ; CHECK-LABEL: extload_nxv8f16_nxv8f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vl2re16.v v12, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v8, v12 ; CHECK-NEXT: ret %y = load , * %x @@ -1164,9 +1164,9 @@ ; CHECK-LABEL: extload_nxv8f16_nxv8f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vl2re16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v16, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v8, v16 ; CHECK-NEXT: ret %y = load , * %x @@ -1178,7 +1178,7 @@ ; CHECK-LABEL: extload_nxv16f16_nxv16f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vl4re16.v v16, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v8, v16 ; CHECK-NEXT: ret %y = load , * %x @@ -1189,7 +1189,7 @@ define void @truncstore_nxv1f32_nxv1f16( %x, * %z) { ; CHECK-LABEL: truncstore_nxv1f32_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v9, v8 ; CHECK-NEXT: vse16.v v9, (a0) ; CHECK-NEXT: ret @@ -1201,7 +1201,7 @@ define @extload_nxv1f32_nxv1f64(* %x) { ; CHECK-LABEL: extload_nxv1f32_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vfwcvt.f.f.v v8, v9 ; CHECK-NEXT: ret @@ -1213,7 +1213,7 @@ define void @truncstore_nxv2f32_nxv2f16( %x, * %z) { ; CHECK-LABEL: truncstore_nxv2f32_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v9, v8 ; CHECK-NEXT: vse16.v v9, (a0) ; CHECK-NEXT: ret @@ -1226,7 +1226,7 @@ ; CHECK-LABEL: extload_nxv2f32_nxv2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vl1re32.v v10, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v8, v10 ; CHECK-NEXT: ret %y = load , * %x @@ -1237,7 +1237,7 @@ define void @truncstore_nxv4f32_nxv4f16( %x, * %z) { ; CHECK-LABEL: truncstore_nxv4f32_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v10, v8 ; CHECK-NEXT: vs1r.v v10, (a0) ; CHECK-NEXT: ret @@ -1250,7 +1250,7 @@ ; CHECK-LABEL: extload_nxv4f32_nxv4f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vl2re32.v v12, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v8, v12 ; CHECK-NEXT: ret %y = load , * %x @@ -1261,7 +1261,7 @@ define void @truncstore_nxv8f32_nxv8f16( %x, * %z) { ; CHECK-LABEL: truncstore_nxv8f32_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v12, v8 ; CHECK-NEXT: vs2r.v v12, (a0) ; CHECK-NEXT: ret @@ -1274,7 +1274,7 @@ ; CHECK-LABEL: extload_nxv8f32_nxv8f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vl4re32.v v16, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v8, v16 ; CHECK-NEXT: ret %y = load , * %x @@ -1285,7 +1285,7 @@ define void @truncstore_nxv16f32_nxv16f16( %x, * %z) { ; CHECK-LABEL: truncstore_nxv16f32_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v16, v8 ; CHECK-NEXT: vs4r.v v16, (a0) ; CHECK-NEXT: ret @@ -1297,9 +1297,9 @@ define void @truncstore_nxv1f64_nxv1f16( %x, * %z) { ; CHECK-LABEL: truncstore_nxv1f64_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfncvt.rod.f.f.w v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v8, v9 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret @@ -1311,7 +1311,7 @@ define void @truncstore_nxv1f64_nxv1f32( %x, * %z) { ; CHECK-LABEL: truncstore_nxv1f64_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v9, v8 ; CHECK-NEXT: vse32.v v9, (a0) ; CHECK-NEXT: ret @@ -1323,9 +1323,9 @@ define void @truncstore_nxv2f64_nxv2f16( %x, * %z) { ; CHECK-LABEL: truncstore_nxv2f64_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.rod.f.f.w v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v8, v10 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret @@ -1337,7 +1337,7 @@ define void @truncstore_nxv2f64_nxv2f32( %x, * %z) { ; CHECK-LABEL: truncstore_nxv2f64_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v10, v8 ; CHECK-NEXT: vs1r.v v10, (a0) ; CHECK-NEXT: ret @@ -1349,9 +1349,9 @@ define void @truncstore_nxv4f64_nxv4f16( %x, * %z) { ; CHECK-LABEL: truncstore_nxv4f64_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vfncvt.rod.f.f.w v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v8, v12 ; CHECK-NEXT: vs1r.v v8, (a0) ; CHECK-NEXT: ret @@ -1363,7 +1363,7 @@ define void @truncstore_nxv4f64_nxv4f32( %x, * %z) { ; CHECK-LABEL: truncstore_nxv4f64_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v12, v8 ; CHECK-NEXT: vs2r.v v12, (a0) ; CHECK-NEXT: ret @@ -1375,9 +1375,9 @@ define void @truncstore_nxv8f64_nxv8f16( %x, * %z) { ; CHECK-LABEL: truncstore_nxv8f64_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vfncvt.rod.f.f.w v16, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v8, v16 ; CHECK-NEXT: vs2r.v v8, (a0) ; CHECK-NEXT: ret @@ -1389,7 +1389,7 @@ define void @truncstore_nxv8f64_nxv8f32( %x, * %z) { ; CHECK-LABEL: truncstore_nxv8f64_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v16, v8 ; CHECK-NEXT: vs4r.v v16, (a0) ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll --- a/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll @@ -196,7 +196,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv1i32.nxv16i32( %vec, i64 1) @@ -208,7 +208,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv1i32.nxv16i32( %vec, i64 3) @@ -220,7 +220,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v15, a0 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv1i32.nxv16i32( %vec, i64 15) @@ -258,7 +258,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv2i8.nxv32i8( %vec, i64 2) @@ -270,7 +270,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv2i8.nxv32i8( %vec, i64 4) @@ -284,7 +284,7 @@ ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: li a1, 6 ; CHECK-NEXT: mul a0, a0, a1 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv2i8.nxv32i8( %vec, i64 6) @@ -307,7 +307,7 @@ ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: li a1, 6 ; CHECK-NEXT: mul a0, a0, a1 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v10, a0 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv2i8.nxv32i8( %vec, i64 22) @@ -320,7 +320,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a1, a0, 3 ; CHECK-NEXT: sub a0, a0, a1 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv1i8.nxv8i8( %vec, i64 7) @@ -334,7 +334,7 @@ ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: slli a1, a0, 1 ; CHECK-NEXT: add a0, a1, a0 -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv1i8.nxv4i8( %vec, i64 3) @@ -355,7 +355,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv2f16.nxv16f16( %vec, i64 2) @@ -384,7 +384,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v0, a0 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv8i1( %mask, i64 8) @@ -402,14 +402,14 @@ define @extract_nxv64i1_nxv2i1_2( %mask) { ; CHECK-LABEL: extract_nxv64i1_nxv2i1_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv2i1( %mask, i64 2) @@ -427,14 +427,14 @@ define @extract_nxv4i1_nxv32i1_4( %x) { ; CHECK-LABEL: extract_nxv4i1_nxv32i1_4: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv4i1( %x, i64 4) @@ -454,7 +454,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v0, a0 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv16i1( %x, i64 16) @@ -478,13 +478,13 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v14, v10, a0 ; CHECK-NEXT: vslidedown.vx v12, v9, a0 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vslideup.vi v13, v14, 0 ; CHECK-NEXT: add a1, a0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma ; CHECK-NEXT: vslideup.vx v12, v10, a0 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll @@ -7,7 +7,7 @@ define half @extractelt_nxv1f16_0( %v) { ; CHECK-LABEL: extractelt_nxv1f16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -17,7 +17,7 @@ define half @extractelt_nxv1f16_imm( %v) { ; CHECK-LABEL: extractelt_nxv1f16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -28,7 +28,7 @@ define half @extractelt_nxv1f16_idx( %v, i32 signext %idx) { ; CHECK-LABEL: extractelt_nxv1f16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -39,7 +39,7 @@ define half @extractelt_nxv2f16_0( %v) { ; CHECK-LABEL: extractelt_nxv2f16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, mf2, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -49,7 +49,7 @@ define half @extractelt_nxv2f16_imm( %v) { ; CHECK-LABEL: extractelt_nxv2f16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -60,7 +60,7 @@ define half @extractelt_nxv2f16_idx( %v, i32 signext %idx) { ; CHECK-LABEL: extractelt_nxv2f16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -71,7 +71,7 @@ define half @extractelt_nxv4f16_0( %v) { ; CHECK-LABEL: extractelt_nxv4f16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -81,7 +81,7 @@ define half @extractelt_nxv4f16_imm( %v) { ; CHECK-LABEL: extractelt_nxv4f16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -92,7 +92,7 @@ define half @extractelt_nxv4f16_idx( %v, i32 signext %idx) { ; CHECK-LABEL: extractelt_nxv4f16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -103,7 +103,7 @@ define half @extractelt_nxv8f16_0( %v) { ; CHECK-LABEL: extractelt_nxv8f16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m2, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -113,7 +113,7 @@ define half @extractelt_nxv8f16_imm( %v) { ; CHECK-LABEL: extractelt_nxv8f16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -124,7 +124,7 @@ define half @extractelt_nxv8f16_idx( %v, i32 signext %idx) { ; CHECK-LABEL: extractelt_nxv8f16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -135,7 +135,7 @@ define half @extractelt_nxv16f16_0( %v) { ; CHECK-LABEL: extractelt_nxv16f16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -145,7 +145,7 @@ define half @extractelt_nxv16f16_imm( %v) { ; CHECK-LABEL: extractelt_nxv16f16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -156,7 +156,7 @@ define half @extractelt_nxv16f16_idx( %v, i32 signext %idx) { ; CHECK-LABEL: extractelt_nxv16f16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -167,7 +167,7 @@ define half @extractelt_nxv32f16_0( %v) { ; CHECK-LABEL: extractelt_nxv32f16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e16, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m8, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -177,7 +177,7 @@ define half @extractelt_nxv32f16_imm( %v) { ; CHECK-LABEL: extractelt_nxv32f16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -188,7 +188,7 @@ define half @extractelt_nxv32f16_idx( %v, i32 signext %idx) { ; CHECK-LABEL: extractelt_nxv32f16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -199,7 +199,7 @@ define float @extractelt_nxv1f32_0( %v) { ; CHECK-LABEL: extractelt_nxv1f32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -209,7 +209,7 @@ define float @extractelt_nxv1f32_imm( %v) { ; CHECK-LABEL: extractelt_nxv1f32_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -220,7 +220,7 @@ define float @extractelt_nxv1f32_idx( %v, i32 signext %idx) { ; CHECK-LABEL: extractelt_nxv1f32_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -231,7 +231,7 @@ define float @extractelt_nxv2f32_0( %v) { ; CHECK-LABEL: extractelt_nxv2f32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -241,7 +241,7 @@ define float @extractelt_nxv2f32_imm( %v) { ; CHECK-LABEL: extractelt_nxv2f32_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -252,7 +252,7 @@ define float @extractelt_nxv2f32_idx( %v, i32 signext %idx) { ; CHECK-LABEL: extractelt_nxv2f32_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -263,7 +263,7 @@ define float @extractelt_nxv4f32_0( %v) { ; CHECK-LABEL: extractelt_nxv4f32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -273,7 +273,7 @@ define float @extractelt_nxv4f32_imm( %v) { ; CHECK-LABEL: extractelt_nxv4f32_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -284,7 +284,7 @@ define float @extractelt_nxv4f32_idx( %v, i32 signext %idx) { ; CHECK-LABEL: extractelt_nxv4f32_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -295,7 +295,7 @@ define float @extractelt_nxv8f32_0( %v) { ; CHECK-LABEL: extractelt_nxv8f32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m4, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -305,7 +305,7 @@ define float @extractelt_nxv8f32_imm( %v) { ; CHECK-LABEL: extractelt_nxv8f32_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -316,7 +316,7 @@ define float @extractelt_nxv8f32_idx( %v, i32 signext %idx) { ; CHECK-LABEL: extractelt_nxv8f32_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -327,7 +327,7 @@ define float @extractelt_nxv16f32_0( %v) { ; CHECK-LABEL: extractelt_nxv16f32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m8, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -337,7 +337,7 @@ define float @extractelt_nxv16f32_imm( %v) { ; CHECK-LABEL: extractelt_nxv16f32_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -348,7 +348,7 @@ define float @extractelt_nxv16f32_idx( %v, i32 signext %idx) { ; CHECK-LABEL: extractelt_nxv16f32_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -359,7 +359,7 @@ define double @extractelt_nxv1f64_0( %v) { ; CHECK-LABEL: extractelt_nxv1f64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -369,7 +369,7 @@ define double @extractelt_nxv1f64_imm( %v) { ; CHECK-LABEL: extractelt_nxv1f64_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -380,7 +380,7 @@ define double @extractelt_nxv1f64_idx( %v, i32 signext %idx) { ; CHECK-LABEL: extractelt_nxv1f64_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -391,7 +391,7 @@ define double @extractelt_nxv2f64_0( %v) { ; CHECK-LABEL: extractelt_nxv2f64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m2, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -401,7 +401,7 @@ define double @extractelt_nxv2f64_imm( %v) { ; CHECK-LABEL: extractelt_nxv2f64_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -412,7 +412,7 @@ define double @extractelt_nxv2f64_idx( %v, i32 signext %idx) { ; CHECK-LABEL: extractelt_nxv2f64_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -423,7 +423,7 @@ define double @extractelt_nxv4f64_0( %v) { ; CHECK-LABEL: extractelt_nxv4f64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m4, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -433,7 +433,7 @@ define double @extractelt_nxv4f64_imm( %v) { ; CHECK-LABEL: extractelt_nxv4f64_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -444,7 +444,7 @@ define double @extractelt_nxv4f64_idx( %v, i32 signext %idx) { ; CHECK-LABEL: extractelt_nxv4f64_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -455,7 +455,7 @@ define double @extractelt_nxv8f64_0( %v) { ; CHECK-LABEL: extractelt_nxv8f64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m8, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -465,7 +465,7 @@ define double @extractelt_nxv8f64_imm( %v) { ; CHECK-LABEL: extractelt_nxv8f64_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -476,7 +476,7 @@ define double @extractelt_nxv8f64_idx( %v, i32 signext %idx) { ; CHECK-LABEL: extractelt_nxv8f64_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -488,7 +488,7 @@ ; CHECK-LABEL: store_extractelt_nxv8f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re64.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 1 ; CHECK-NEXT: vse64.v v8, (a1) ; CHECK-NEXT: ret @@ -502,7 +502,7 @@ ; CHECK-LABEL: store_vfmv_f_s_nxv8f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re64.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, ma ; CHECK-NEXT: vse64.v v8, (a1) ; CHECK-NEXT: ret %a = load , * %x @@ -518,7 +518,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI47_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI47_0)(a0) -; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s ft1, v8 ; CHECK-NEXT: fadd.s fa0, ft1, ft0 @@ -535,7 +535,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI48_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI48_0)(a0) -; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 1 ; CHECK-NEXT: vfmv.f.s ft1, v8 ; CHECK-NEXT: fsub.s fa0, ft0, ft1 @@ -552,7 +552,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI49_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI49_0)(a0) -; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 3 ; CHECK-NEXT: vfmv.f.s ft1, v8 ; CHECK-NEXT: fmul.s fa0, ft1, ft0 @@ -569,7 +569,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI50_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI50_0)(a0) -; CHECK-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; CHECK-NEXT: vfmv.f.s ft1, v8 ; CHECK-NEXT: fdiv.s fa0, ft1, ft0 ; CHECK-NEXT: ret @@ -583,7 +583,7 @@ define double @extractelt_nxv16f64_0( %v) { ; CHECK-LABEL: extractelt_nxv16f64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m8, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -620,7 +620,7 @@ define double @extractelt_nxv16f64_imm( %v) { ; CHECK-LABEL: extractelt_nxv16f64_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll --- a/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll @@ -5,12 +5,12 @@ define i1 @extractelt_nxv1i1(* %x, i64 %idx) nounwind { ; CHECK-LABEL: extractelt_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmseq.vi v0, v8, 0 ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -23,12 +23,12 @@ define i1 @extractelt_nxv2i1(* %x, i64 %idx) nounwind { ; CHECK-LABEL: extractelt_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmseq.vi v0, v8, 0 ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -41,12 +41,12 @@ define i1 @extractelt_nxv4i1(* %x, i64 %idx) nounwind { ; CHECK-LABEL: extractelt_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmseq.vi v0, v8, 0 ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -60,11 +60,11 @@ ; CHECK-LABEL: extractelt_nxv8i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vl1r.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 0 ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -78,11 +78,11 @@ ; CHECK-LABEL: extractelt_nxv16i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vl2r.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 0 ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 1, e8, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -96,11 +96,11 @@ ; CHECK-LABEL: extractelt_nxv32i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vl4r.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 0 ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 1, e8, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -114,11 +114,11 @@ ; CHECK-LABEL: extractelt_nxv64i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8r.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 0 ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -150,7 +150,7 @@ ; CHECK-NEXT: vl8r.v v16, (a4) ; CHECK-NEXT: vl8r.v v24, (a0) ; CHECK-NEXT: add a0, a3, a1 -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vmseq.vi v8, v16, 0 ; CHECK-NEXT: vmseq.vi v0, v24, 0 ; CHECK-NEXT: vmv.v.i v16, 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll @@ -8,7 +8,7 @@ define signext i8 @extractelt_nxv1i8_0( %v) { ; CHECK-LABEL: extractelt_nxv1i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, mf8, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -18,7 +18,7 @@ define signext i8 @extractelt_nxv1i8_imm( %v) { ; CHECK-LABEL: extractelt_nxv1i8_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -29,7 +29,7 @@ define signext i8 @extractelt_nxv1i8_idx( %v, i32 %idx) { ; CHECK-LABEL: extractelt_nxv1i8_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -40,7 +40,7 @@ define signext i8 @extractelt_nxv2i8_0( %v) { ; CHECK-LABEL: extractelt_nxv2i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -50,7 +50,7 @@ define signext i8 @extractelt_nxv2i8_imm( %v) { ; CHECK-LABEL: extractelt_nxv2i8_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -61,7 +61,7 @@ define signext i8 @extractelt_nxv2i8_idx( %v, i32 %idx) { ; CHECK-LABEL: extractelt_nxv2i8_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -72,7 +72,7 @@ define signext i8 @extractelt_nxv4i8_0( %v) { ; CHECK-LABEL: extractelt_nxv4i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, mf2, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -82,7 +82,7 @@ define signext i8 @extractelt_nxv4i8_imm( %v) { ; CHECK-LABEL: extractelt_nxv4i8_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -93,7 +93,7 @@ define signext i8 @extractelt_nxv4i8_idx( %v, i32 %idx) { ; CHECK-LABEL: extractelt_nxv4i8_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -104,7 +104,7 @@ define signext i8 @extractelt_nxv8i8_0( %v) { ; CHECK-LABEL: extractelt_nxv8i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, m1, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -114,7 +114,7 @@ define signext i8 @extractelt_nxv8i8_imm( %v) { ; CHECK-LABEL: extractelt_nxv8i8_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -125,7 +125,7 @@ define signext i8 @extractelt_nxv8i8_idx( %v, i32 %idx) { ; CHECK-LABEL: extractelt_nxv8i8_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -136,7 +136,7 @@ define signext i8 @extractelt_nxv16i8_0( %v) { ; CHECK-LABEL: extractelt_nxv16i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e8, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, m2, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -146,7 +146,7 @@ define signext i8 @extractelt_nxv16i8_imm( %v) { ; CHECK-LABEL: extractelt_nxv16i8_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -157,7 +157,7 @@ define signext i8 @extractelt_nxv16i8_idx( %v, i32 %idx) { ; CHECK-LABEL: extractelt_nxv16i8_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -168,7 +168,7 @@ define signext i8 @extractelt_nxv32i8_0( %v) { ; CHECK-LABEL: extractelt_nxv32i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e8, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, m4, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -178,7 +178,7 @@ define signext i8 @extractelt_nxv32i8_imm( %v) { ; CHECK-LABEL: extractelt_nxv32i8_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -189,7 +189,7 @@ define signext i8 @extractelt_nxv32i8_idx( %v, i32 %idx) { ; CHECK-LABEL: extractelt_nxv32i8_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -200,7 +200,7 @@ define signext i8 @extractelt_nxv64i8_0( %v) { ; CHECK-LABEL: extractelt_nxv64i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -210,7 +210,7 @@ define signext i8 @extractelt_nxv64i8_imm( %v) { ; CHECK-LABEL: extractelt_nxv64i8_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -221,7 +221,7 @@ define signext i8 @extractelt_nxv64i8_idx( %v, i32 %idx) { ; CHECK-LABEL: extractelt_nxv64i8_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -232,7 +232,7 @@ define signext i16 @extractelt_nxv1i16_0( %v) { ; CHECK-LABEL: extractelt_nxv1i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -242,7 +242,7 @@ define signext i16 @extractelt_nxv1i16_imm( %v) { ; CHECK-LABEL: extractelt_nxv1i16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -253,7 +253,7 @@ define signext i16 @extractelt_nxv1i16_idx( %v, i32 %idx) { ; CHECK-LABEL: extractelt_nxv1i16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -264,7 +264,7 @@ define signext i16 @extractelt_nxv2i16_0( %v) { ; CHECK-LABEL: extractelt_nxv2i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, mf2, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -274,7 +274,7 @@ define signext i16 @extractelt_nxv2i16_imm( %v) { ; CHECK-LABEL: extractelt_nxv2i16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -285,7 +285,7 @@ define signext i16 @extractelt_nxv2i16_idx( %v, i32 %idx) { ; CHECK-LABEL: extractelt_nxv2i16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -296,7 +296,7 @@ define signext i16 @extractelt_nxv4i16_0( %v) { ; CHECK-LABEL: extractelt_nxv4i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -306,7 +306,7 @@ define signext i16 @extractelt_nxv4i16_imm( %v) { ; CHECK-LABEL: extractelt_nxv4i16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -317,7 +317,7 @@ define signext i16 @extractelt_nxv4i16_idx( %v, i32 %idx) { ; CHECK-LABEL: extractelt_nxv4i16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -328,7 +328,7 @@ define signext i16 @extractelt_nxv8i16_0( %v) { ; CHECK-LABEL: extractelt_nxv8i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m2, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -338,7 +338,7 @@ define signext i16 @extractelt_nxv8i16_imm( %v) { ; CHECK-LABEL: extractelt_nxv8i16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -349,7 +349,7 @@ define signext i16 @extractelt_nxv8i16_idx( %v, i32 %idx) { ; CHECK-LABEL: extractelt_nxv8i16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -360,7 +360,7 @@ define signext i16 @extractelt_nxv16i16_0( %v) { ; CHECK-LABEL: extractelt_nxv16i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -370,7 +370,7 @@ define signext i16 @extractelt_nxv16i16_imm( %v) { ; CHECK-LABEL: extractelt_nxv16i16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -381,7 +381,7 @@ define signext i16 @extractelt_nxv16i16_idx( %v, i32 %idx) { ; CHECK-LABEL: extractelt_nxv16i16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -392,7 +392,7 @@ define signext i16 @extractelt_nxv32i16_0( %v) { ; CHECK-LABEL: extractelt_nxv32i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e16, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m8, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -402,7 +402,7 @@ define signext i16 @extractelt_nxv32i16_imm( %v) { ; CHECK-LABEL: extractelt_nxv32i16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -413,7 +413,7 @@ define signext i16 @extractelt_nxv32i16_idx( %v, i32 %idx) { ; CHECK-LABEL: extractelt_nxv32i16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -424,7 +424,7 @@ define i32 @extractelt_nxv1i32_0( %v) { ; CHECK-LABEL: extractelt_nxv1i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -434,7 +434,7 @@ define i32 @extractelt_nxv1i32_imm( %v) { ; CHECK-LABEL: extractelt_nxv1i32_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -445,7 +445,7 @@ define i32 @extractelt_nxv1i32_idx( %v, i32 %idx) { ; CHECK-LABEL: extractelt_nxv1i32_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -456,7 +456,7 @@ define i32 @extractelt_nxv2i32_0( %v) { ; CHECK-LABEL: extractelt_nxv2i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -466,7 +466,7 @@ define i32 @extractelt_nxv2i32_imm( %v) { ; CHECK-LABEL: extractelt_nxv2i32_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -477,7 +477,7 @@ define i32 @extractelt_nxv2i32_idx( %v, i32 %idx) { ; CHECK-LABEL: extractelt_nxv2i32_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -488,7 +488,7 @@ define i32 @extractelt_nxv4i32_0( %v) { ; CHECK-LABEL: extractelt_nxv4i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -498,7 +498,7 @@ define i32 @extractelt_nxv4i32_imm( %v) { ; CHECK-LABEL: extractelt_nxv4i32_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -509,7 +509,7 @@ define i32 @extractelt_nxv4i32_idx( %v, i32 %idx) { ; CHECK-LABEL: extractelt_nxv4i32_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -520,7 +520,7 @@ define i32 @extractelt_nxv8i32_0( %v) { ; CHECK-LABEL: extractelt_nxv8i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m4, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -530,7 +530,7 @@ define i32 @extractelt_nxv8i32_imm( %v) { ; CHECK-LABEL: extractelt_nxv8i32_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -541,7 +541,7 @@ define i32 @extractelt_nxv8i32_idx( %v, i32 %idx) { ; CHECK-LABEL: extractelt_nxv8i32_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -552,7 +552,7 @@ define i32 @extractelt_nxv16i32_0( %v) { ; CHECK-LABEL: extractelt_nxv16i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m8, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -562,7 +562,7 @@ define i32 @extractelt_nxv16i32_imm( %v) { ; CHECK-LABEL: extractelt_nxv16i32_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -573,7 +573,7 @@ define i32 @extractelt_nxv16i32_idx( %v, i32 %idx) { ; CHECK-LABEL: extractelt_nxv16i32_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -585,7 +585,7 @@ ; CHECK-LABEL: extractelt_nxv1i64_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vsrl.vx v9, v8, a0 ; CHECK-NEXT: vmv.x.s a1, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -597,7 +597,7 @@ define i64 @extractelt_nxv1i64_imm( %v) { ; CHECK-LABEL: extractelt_nxv1i64_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: li a1, 32 @@ -611,7 +611,7 @@ define i64 @extractelt_nxv1i64_idx( %v, i32 %idx) { ; CHECK-LABEL: extractelt_nxv1i64_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: li a1, 32 @@ -626,7 +626,7 @@ ; CHECK-LABEL: extractelt_nxv2i64_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, ma ; CHECK-NEXT: vsrl.vx v10, v8, a0 ; CHECK-NEXT: vmv.x.s a1, v10 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -638,7 +638,7 @@ define i64 @extractelt_nxv2i64_imm( %v) { ; CHECK-LABEL: extractelt_nxv2i64_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: li a1, 32 @@ -652,7 +652,7 @@ define i64 @extractelt_nxv2i64_idx( %v, i32 %idx) { ; CHECK-LABEL: extractelt_nxv2i64_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: li a1, 32 @@ -667,7 +667,7 @@ ; CHECK-LABEL: extractelt_nxv4i64_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, ma ; CHECK-NEXT: vsrl.vx v12, v8, a0 ; CHECK-NEXT: vmv.x.s a1, v12 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -679,7 +679,7 @@ define i64 @extractelt_nxv4i64_imm( %v) { ; CHECK-LABEL: extractelt_nxv4i64_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: li a1, 32 @@ -693,7 +693,7 @@ define i64 @extractelt_nxv4i64_idx( %v, i32 %idx) { ; CHECK-LABEL: extractelt_nxv4i64_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: li a1, 32 @@ -708,7 +708,7 @@ ; CHECK-LABEL: extractelt_nxv8i64_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, ma ; CHECK-NEXT: vsrl.vx v16, v8, a0 ; CHECK-NEXT: vmv.x.s a1, v16 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -720,7 +720,7 @@ define i64 @extractelt_nxv8i64_imm( %v) { ; CHECK-LABEL: extractelt_nxv8i64_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: li a1, 32 @@ -734,7 +734,7 @@ define i64 @extractelt_nxv8i64_idx( %v, i32 %idx) { ; CHECK-LABEL: extractelt_nxv8i64_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: li a1, 32 @@ -748,7 +748,7 @@ define i32 @extractelt_add_nxv4i32_splat( %x) { ; CHECK-LABEL: extractelt_add_nxv4i32_splat: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: addi a0, a0, 3 @@ -763,7 +763,7 @@ define i32 @extractelt_sub_nxv4i32_splat( %x) { ; CHECK-LABEL: extractelt_sub_nxv4i32_splat: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 1 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: li a1, 3 @@ -780,16 +780,16 @@ ; RV32NOM-LABEL: extractelt_mul_nxv4i32_splat: ; RV32NOM: # %bb.0: ; RV32NOM-NEXT: li a0, 3 -; RV32NOM-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; RV32NOM-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; RV32NOM-NEXT: vmul.vx v8, v8, a0 -; RV32NOM-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32NOM-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32NOM-NEXT: vslidedown.vi v8, v8, 3 ; RV32NOM-NEXT: vmv.x.s a0, v8 ; RV32NOM-NEXT: ret ; ; RV32M-LABEL: extractelt_mul_nxv4i32_splat: ; RV32M: # %bb.0: -; RV32M-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32M-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32M-NEXT: vslidedown.vi v8, v8, 3 ; RV32M-NEXT: vmv.x.s a0, v8 ; RV32M-NEXT: slli a1, a0, 1 @@ -807,7 +807,7 @@ ; RV32NOM: # %bb.0: ; RV32NOM-NEXT: lui a0, 349525 ; RV32NOM-NEXT: addi a0, a0, 1366 -; RV32NOM-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; RV32NOM-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; RV32NOM-NEXT: vmulh.vx v8, v8, a0 ; RV32NOM-NEXT: vsrl.vi v10, v8, 31 ; RV32NOM-NEXT: vadd.vv v8, v8, v10 @@ -816,7 +816,7 @@ ; ; RV32M-LABEL: extractelt_sdiv_nxv4i32_splat: ; RV32M: # %bb.0: -; RV32M-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; RV32M-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; RV32M-NEXT: vmv.x.s a0, v8 ; RV32M-NEXT: lui a1, 349525 ; RV32M-NEXT: addi a1, a1, 1366 @@ -836,7 +836,7 @@ ; RV32NOM: # %bb.0: ; RV32NOM-NEXT: lui a0, 349525 ; RV32NOM-NEXT: addi a0, a0, 1366 -; RV32NOM-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; RV32NOM-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; RV32NOM-NEXT: vmulh.vx v8, v8, a0 ; RV32NOM-NEXT: vsrl.vi v10, v8, 31 ; RV32NOM-NEXT: vadd.vv v8, v8, v10 @@ -845,7 +845,7 @@ ; ; RV32M-LABEL: extractelt_udiv_nxv4i32_splat: ; RV32M: # %bb.0: -; RV32M-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; RV32M-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; RV32M-NEXT: vmv.x.s a0, v8 ; RV32M-NEXT: lui a1, 349525 ; RV32M-NEXT: addi a1, a1, 1366 @@ -863,7 +863,7 @@ define i32 @extractelt_nxv32i32_0( %v) { ; CHECK-LABEL: extractelt_nxv32i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m8, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -900,7 +900,7 @@ define i32 @extractelt_nxv32i32_imm( %v) { ; CHECK-LABEL: extractelt_nxv32i32_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -944,7 +944,7 @@ define i64 @extractelt_nxv16i64_0( %v) { ; CHECK-LABEL: extractelt_nxv16i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v16, v8, 1 ; CHECK-NEXT: vmv.x.s a1, v16 ; CHECK-NEXT: vmv.x.s a0, v8 diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll @@ -5,7 +5,7 @@ define signext i8 @extractelt_nxv1i8_0( %v) { ; CHECK-LABEL: extractelt_nxv1i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, mf8, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -15,7 +15,7 @@ define signext i8 @extractelt_nxv1i8_imm( %v) { ; CHECK-LABEL: extractelt_nxv1i8_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -26,7 +26,7 @@ define signext i8 @extractelt_nxv1i8_idx( %v, i32 signext %idx) { ; CHECK-LABEL: extractelt_nxv1i8_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -37,7 +37,7 @@ define signext i8 @extractelt_nxv2i8_0( %v) { ; CHECK-LABEL: extractelt_nxv2i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -47,7 +47,7 @@ define signext i8 @extractelt_nxv2i8_imm( %v) { ; CHECK-LABEL: extractelt_nxv2i8_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -58,7 +58,7 @@ define signext i8 @extractelt_nxv2i8_idx( %v, i32 signext %idx) { ; CHECK-LABEL: extractelt_nxv2i8_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -69,7 +69,7 @@ define signext i8 @extractelt_nxv4i8_0( %v) { ; CHECK-LABEL: extractelt_nxv4i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, mf2, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -79,7 +79,7 @@ define signext i8 @extractelt_nxv4i8_imm( %v) { ; CHECK-LABEL: extractelt_nxv4i8_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -90,7 +90,7 @@ define signext i8 @extractelt_nxv4i8_idx( %v, i32 signext %idx) { ; CHECK-LABEL: extractelt_nxv4i8_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -101,7 +101,7 @@ define signext i8 @extractelt_nxv8i8_0( %v) { ; CHECK-LABEL: extractelt_nxv8i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, m1, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -111,7 +111,7 @@ define signext i8 @extractelt_nxv8i8_imm( %v) { ; CHECK-LABEL: extractelt_nxv8i8_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -122,7 +122,7 @@ define signext i8 @extractelt_nxv8i8_idx( %v, i32 signext %idx) { ; CHECK-LABEL: extractelt_nxv8i8_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -133,7 +133,7 @@ define signext i8 @extractelt_nxv16i8_0( %v) { ; CHECK-LABEL: extractelt_nxv16i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e8, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, m2, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -143,7 +143,7 @@ define signext i8 @extractelt_nxv16i8_imm( %v) { ; CHECK-LABEL: extractelt_nxv16i8_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -154,7 +154,7 @@ define signext i8 @extractelt_nxv16i8_idx( %v, i32 signext %idx) { ; CHECK-LABEL: extractelt_nxv16i8_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -165,7 +165,7 @@ define signext i8 @extractelt_nxv32i8_0( %v) { ; CHECK-LABEL: extractelt_nxv32i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e8, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, m4, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -175,7 +175,7 @@ define signext i8 @extractelt_nxv32i8_imm( %v) { ; CHECK-LABEL: extractelt_nxv32i8_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -186,7 +186,7 @@ define signext i8 @extractelt_nxv32i8_idx( %v, i32 signext %idx) { ; CHECK-LABEL: extractelt_nxv32i8_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -197,7 +197,7 @@ define signext i8 @extractelt_nxv64i8_0( %v) { ; CHECK-LABEL: extractelt_nxv64i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -207,7 +207,7 @@ define signext i8 @extractelt_nxv64i8_imm( %v) { ; CHECK-LABEL: extractelt_nxv64i8_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -218,7 +218,7 @@ define signext i8 @extractelt_nxv64i8_idx( %v, i32 signext %idx) { ; CHECK-LABEL: extractelt_nxv64i8_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -229,7 +229,7 @@ define signext i16 @extractelt_nxv1i16_0( %v) { ; CHECK-LABEL: extractelt_nxv1i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -239,7 +239,7 @@ define signext i16 @extractelt_nxv1i16_imm( %v) { ; CHECK-LABEL: extractelt_nxv1i16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -250,7 +250,7 @@ define signext i16 @extractelt_nxv1i16_idx( %v, i32 signext %idx) { ; CHECK-LABEL: extractelt_nxv1i16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -261,7 +261,7 @@ define signext i16 @extractelt_nxv2i16_0( %v) { ; CHECK-LABEL: extractelt_nxv2i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, mf2, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -271,7 +271,7 @@ define signext i16 @extractelt_nxv2i16_imm( %v) { ; CHECK-LABEL: extractelt_nxv2i16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -282,7 +282,7 @@ define signext i16 @extractelt_nxv2i16_idx( %v, i32 signext %idx) { ; CHECK-LABEL: extractelt_nxv2i16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -293,7 +293,7 @@ define signext i16 @extractelt_nxv4i16_0( %v) { ; CHECK-LABEL: extractelt_nxv4i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -303,7 +303,7 @@ define signext i16 @extractelt_nxv4i16_imm( %v) { ; CHECK-LABEL: extractelt_nxv4i16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -314,7 +314,7 @@ define signext i16 @extractelt_nxv4i16_idx( %v, i32 signext %idx) { ; CHECK-LABEL: extractelt_nxv4i16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -325,7 +325,7 @@ define signext i16 @extractelt_nxv8i16_0( %v) { ; CHECK-LABEL: extractelt_nxv8i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m2, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -335,7 +335,7 @@ define signext i16 @extractelt_nxv8i16_imm( %v) { ; CHECK-LABEL: extractelt_nxv8i16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -346,7 +346,7 @@ define signext i16 @extractelt_nxv8i16_idx( %v, i32 signext %idx) { ; CHECK-LABEL: extractelt_nxv8i16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -357,7 +357,7 @@ define signext i16 @extractelt_nxv16i16_0( %v) { ; CHECK-LABEL: extractelt_nxv16i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -367,7 +367,7 @@ define signext i16 @extractelt_nxv16i16_imm( %v) { ; CHECK-LABEL: extractelt_nxv16i16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -378,7 +378,7 @@ define signext i16 @extractelt_nxv16i16_idx( %v, i32 signext %idx) { ; CHECK-LABEL: extractelt_nxv16i16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -389,7 +389,7 @@ define signext i16 @extractelt_nxv32i16_0( %v) { ; CHECK-LABEL: extractelt_nxv32i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e16, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m8, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -399,7 +399,7 @@ define signext i16 @extractelt_nxv32i16_imm( %v) { ; CHECK-LABEL: extractelt_nxv32i16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -410,7 +410,7 @@ define signext i16 @extractelt_nxv32i16_idx( %v, i32 signext %idx) { ; CHECK-LABEL: extractelt_nxv32i16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -421,7 +421,7 @@ define signext i32 @extractelt_nxv1i32_0( %v) { ; CHECK-LABEL: extractelt_nxv1i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -431,7 +431,7 @@ define signext i32 @extractelt_nxv1i32_imm( %v) { ; CHECK-LABEL: extractelt_nxv1i32_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -442,7 +442,7 @@ define signext i32 @extractelt_nxv1i32_idx( %v, i32 signext %idx) { ; CHECK-LABEL: extractelt_nxv1i32_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -453,7 +453,7 @@ define signext i32 @extractelt_nxv2i32_0( %v) { ; CHECK-LABEL: extractelt_nxv2i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -463,7 +463,7 @@ define signext i32 @extractelt_nxv2i32_imm( %v) { ; CHECK-LABEL: extractelt_nxv2i32_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -474,7 +474,7 @@ define signext i32 @extractelt_nxv2i32_idx( %v, i32 signext %idx) { ; CHECK-LABEL: extractelt_nxv2i32_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -485,7 +485,7 @@ define signext i32 @extractelt_nxv4i32_0( %v) { ; CHECK-LABEL: extractelt_nxv4i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -495,7 +495,7 @@ define signext i32 @extractelt_nxv4i32_imm( %v) { ; CHECK-LABEL: extractelt_nxv4i32_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -506,7 +506,7 @@ define signext i32 @extractelt_nxv4i32_idx( %v, i32 signext %idx) { ; CHECK-LABEL: extractelt_nxv4i32_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -517,7 +517,7 @@ define signext i32 @extractelt_nxv8i32_0( %v) { ; CHECK-LABEL: extractelt_nxv8i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m4, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -527,7 +527,7 @@ define signext i32 @extractelt_nxv8i32_imm( %v) { ; CHECK-LABEL: extractelt_nxv8i32_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -538,7 +538,7 @@ define signext i32 @extractelt_nxv8i32_idx( %v, i32 signext %idx) { ; CHECK-LABEL: extractelt_nxv8i32_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -549,7 +549,7 @@ define signext i32 @extractelt_nxv16i32_0( %v) { ; CHECK-LABEL: extractelt_nxv16i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m8, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -559,7 +559,7 @@ define signext i32 @extractelt_nxv16i32_imm( %v) { ; CHECK-LABEL: extractelt_nxv16i32_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -570,7 +570,7 @@ define signext i32 @extractelt_nxv16i32_idx( %v, i32 signext %idx) { ; CHECK-LABEL: extractelt_nxv16i32_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -581,7 +581,7 @@ define i64 @extractelt_nxv1i64_0( %v) { ; CHECK-LABEL: extractelt_nxv1i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -591,7 +591,7 @@ define i64 @extractelt_nxv1i64_imm( %v) { ; CHECK-LABEL: extractelt_nxv1i64_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -602,7 +602,7 @@ define i64 @extractelt_nxv1i64_idx( %v, i32 signext %idx) { ; CHECK-LABEL: extractelt_nxv1i64_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -613,7 +613,7 @@ define i64 @extractelt_nxv2i64_0( %v) { ; CHECK-LABEL: extractelt_nxv2i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m2, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -623,7 +623,7 @@ define i64 @extractelt_nxv2i64_imm( %v) { ; CHECK-LABEL: extractelt_nxv2i64_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -634,7 +634,7 @@ define i64 @extractelt_nxv2i64_idx( %v, i32 signext %idx) { ; CHECK-LABEL: extractelt_nxv2i64_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -645,7 +645,7 @@ define i64 @extractelt_nxv4i64_0( %v) { ; CHECK-LABEL: extractelt_nxv4i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m4, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -655,7 +655,7 @@ define i64 @extractelt_nxv4i64_imm( %v) { ; CHECK-LABEL: extractelt_nxv4i64_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -666,7 +666,7 @@ define i64 @extractelt_nxv4i64_idx( %v, i32 signext %idx) { ; CHECK-LABEL: extractelt_nxv4i64_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -677,7 +677,7 @@ define i64 @extractelt_nxv8i64_0( %v) { ; CHECK-LABEL: extractelt_nxv8i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m8, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -687,7 +687,7 @@ define i64 @extractelt_nxv8i64_imm( %v) { ; CHECK-LABEL: extractelt_nxv8i64_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -698,7 +698,7 @@ define i64 @extractelt_nxv8i64_idx( %v, i32 signext %idx) { ; CHECK-LABEL: extractelt_nxv8i64_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -709,9 +709,9 @@ define i32 @extractelt_add_nxv4i32_splat( %x) { ; CHECK-LABEL: extractelt_add_nxv4i32_splat: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 3 -; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -725,9 +725,9 @@ define i32 @extractelt_sub_nxv4i32_splat( %x) { ; CHECK-LABEL: extractelt_sub_nxv4i32_splat: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 3 -; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 1 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -742,9 +742,9 @@ ; CHECK-LABEL: extractelt_mul_nxv4i32_splat: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 3 -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 -; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 3 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -760,7 +760,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, 349525 ; CHECK-NEXT: addiw a0, a0, 1366 -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vmulh.vx v8, v8, a0 ; CHECK-NEXT: vsrl.vi v10, v8, 31 ; CHECK-NEXT: vadd.vv v8, v8, v10 @@ -778,7 +778,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, 349525 ; CHECK-NEXT: addiw a0, a0, 1366 -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vmulh.vx v8, v8, a0 ; CHECK-NEXT: vsrl.vi v10, v8, 31 ; CHECK-NEXT: vadd.vv v8, v8, v10 @@ -794,7 +794,7 @@ define i64 @extractelt_nxv16i64_0( %v) { ; CHECK-LABEL: extractelt_nxv16i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m8, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %r = extractelement %v, i32 0 @@ -831,7 +831,7 @@ define i64 @extractelt_nxv16i64_imm( %v) { ; CHECK-LABEL: extractelt_nxv16i64_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-fpext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-fpext-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-fpext-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-fpext-vp.ll @@ -18,7 +18,7 @@ define <2 x float> @vfpext_v2f16_v2f32_unmasked(<2 x half> %a, i32 zeroext %vl) { ; CHECK-LABEL: vfpext_v2f16_v2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -43,9 +43,9 @@ define <2 x double> @vfpext_v2f16_v2f64_unmasked(<2 x half> %a, i32 zeroext %vl) { ; CHECK-LABEL: vfpext_v2f16_v2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v8, v9 ; CHECK-NEXT: ret %v = call <2 x double> @llvm.vp.fpext.v2f64.v2f16(<2 x half> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl) @@ -68,7 +68,7 @@ define <2 x double> @vfpext_v2f32_v2f64_unmasked(<2 x float> %a, i32 zeroext %vl) { ; CHECK-LABEL: vfpext_v2f32_v2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -96,14 +96,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v1, v0 ; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: addi a2, a0, -16 ; CHECK-NEXT: vslidedown.vi v0, v0, 2 ; CHECK-NEXT: bltu a0, a2, .LBB7_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a1, a2 ; CHECK-NEXT: .LBB7_2: -; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v24, v8, 16 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: li a1, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-fptrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-fptrunc-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-fptrunc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-fptrunc-vp.ll @@ -18,7 +18,7 @@ define <2 x half> @vfptrunc_v2f16_v2f32_unmasked(<2 x float> %a, i32 zeroext %vl) { ; CHECK-LABEL: vfptrunc_v2f16_v2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -43,9 +43,9 @@ define <2 x half> @vfptrunc_v2f16_v2f64_unmasked(<2 x double> %a, i32 zeroext %vl) { ; CHECK-LABEL: vfptrunc_v2f16_v2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfncvt.rod.f.f.w v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v8, v9 ; CHECK-NEXT: ret %v = call <2 x half> @llvm.vp.fptrunc.v2f16.v2f64(<2 x double> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl) @@ -68,7 +68,7 @@ define <2 x float> @vfptrunc_v2f32_v2f64_unmasked(<2 x double> %a, i32 zeroext %vl) { ; CHECK-LABEL: vfptrunc_v2f32_v2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -103,7 +103,7 @@ ; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: addi a2, a0, -16 ; CHECK-NEXT: vslidedown.vi v0, v0, 2 ; CHECK-NEXT: bltu a0, a2, .LBB7_2 @@ -123,7 +123,7 @@ ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vfncvt.f.f.w v16, v24, v0.t ; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vslideup.vi v16, v8, 16 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: csrr a0, vlenb diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-segN-load.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-segN-load.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-segN-load.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-segN-load.ll @@ -5,7 +5,7 @@ define <8 x i8> @load_factor2(<16 x i8>* %ptr) { ; CHECK-LABEL: load_factor2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vlseg2e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -19,7 +19,7 @@ define <8 x i8> @load_factor3(<24 x i8>* %ptr) { ; CHECK-LABEL: load_factor3: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vlseg3e8.v v6, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v6_v7_v8 ; CHECK-NEXT: ret @@ -34,7 +34,7 @@ define <8 x i8> @load_factor4(<32 x i8>* %ptr) { ; CHECK-LABEL: load_factor4: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vlseg4e8.v v5, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v5_v6_v7_v8 ; CHECK-NEXT: ret @@ -50,7 +50,7 @@ define <8 x i8> @load_factor5(<40 x i8>* %ptr) { ; CHECK-LABEL: load_factor5: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vlseg5e8.v v4, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v4_v5_v6_v7_v8 ; CHECK-NEXT: ret @@ -67,7 +67,7 @@ define <8 x i8> @load_factor6(<48 x i8>* %ptr) { ; CHECK-LABEL: load_factor6: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vlseg6e8.v v3, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v3_v4_v5_v6_v7_v8 ; CHECK-NEXT: ret @@ -85,7 +85,7 @@ define <8 x i8> @load_factor7(<56 x i8>* %ptr) { ; CHECK-LABEL: load_factor7: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vlseg7e8.v v2, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v2_v3_v4_v5_v6_v7_v8 ; CHECK-NEXT: ret @@ -104,7 +104,7 @@ define <8 x i8> @load_factor8(<64 x i8>* %ptr) { ; CHECK-LABEL: load_factor8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vlseg8e8.v v1, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v1_v2_v3_v4_v5_v6_v7_v8 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-shuffle-reverse.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-shuffle-reverse.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-shuffle-reverse.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-shuffle-reverse.ll @@ -13,18 +13,18 @@ define <2 x i1> @reverse_v2i1(<2 x i1> %a) { ; CHECK-LABEL: reverse_v2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vslidedown.vi v9, v8, 1 ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: vmv.x.s a1, v8 -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.x v8, a1 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -37,11 +37,11 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: addi a0, sp, 15 -; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: vslidedown.vi v9, v8, 1 ; CHECK-NEXT: addi a0, sp, 14 @@ -52,7 +52,7 @@ ; CHECK-NEXT: vslidedown.vi v8, v8, 3 ; CHECK-NEXT: addi a0, sp, 12 ; CHECK-NEXT: vse8.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -67,7 +67,7 @@ ; RV32-BITS-UNKNOWN: # %bb.0: ; RV32-BITS-UNKNOWN-NEXT: addi sp, sp, -16 ; RV32-BITS-UNKNOWN-NEXT: .cfi_def_cfa_offset 16 -; RV32-BITS-UNKNOWN-NEXT: vsetivli zero, 0, e8, mf8, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetivli zero, 0, e8, mf8, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vmv.x.s a0, v0 ; RV32-BITS-UNKNOWN-NEXT: andi a1, a0, 1 ; RV32-BITS-UNKNOWN-NEXT: sb a1, 15(sp) @@ -93,7 +93,7 @@ ; RV32-BITS-UNKNOWN-NEXT: srli a0, a0, 31 ; RV32-BITS-UNKNOWN-NEXT: sb a0, 8(sp) ; RV32-BITS-UNKNOWN-NEXT: addi a0, sp, 8 -; RV32-BITS-UNKNOWN-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vle8.v v8, (a0) ; RV32-BITS-UNKNOWN-NEXT: vand.vi v8, v8, 1 ; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 @@ -104,7 +104,7 @@ ; RV32-BITS-256: # %bb.0: ; RV32-BITS-256-NEXT: addi sp, sp, -16 ; RV32-BITS-256-NEXT: .cfi_def_cfa_offset 16 -; RV32-BITS-256-NEXT: vsetivli zero, 0, e8, mf8, ta, mu +; RV32-BITS-256-NEXT: vsetivli zero, 0, e8, mf8, ta, ma ; RV32-BITS-256-NEXT: vmv.x.s a0, v0 ; RV32-BITS-256-NEXT: andi a1, a0, 1 ; RV32-BITS-256-NEXT: sb a1, 15(sp) @@ -130,7 +130,7 @@ ; RV32-BITS-256-NEXT: srli a0, a0, 31 ; RV32-BITS-256-NEXT: sb a0, 8(sp) ; RV32-BITS-256-NEXT: addi a0, sp, 8 -; RV32-BITS-256-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV32-BITS-256-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV32-BITS-256-NEXT: vle8.v v8, (a0) ; RV32-BITS-256-NEXT: vand.vi v8, v8, 1 ; RV32-BITS-256-NEXT: vmsne.vi v0, v8, 0 @@ -141,7 +141,7 @@ ; RV32-BITS-512: # %bb.0: ; RV32-BITS-512-NEXT: addi sp, sp, -16 ; RV32-BITS-512-NEXT: .cfi_def_cfa_offset 16 -; RV32-BITS-512-NEXT: vsetivli zero, 0, e8, mf8, ta, mu +; RV32-BITS-512-NEXT: vsetivli zero, 0, e8, mf8, ta, ma ; RV32-BITS-512-NEXT: vmv.x.s a0, v0 ; RV32-BITS-512-NEXT: andi a1, a0, 1 ; RV32-BITS-512-NEXT: sb a1, 15(sp) @@ -167,7 +167,7 @@ ; RV32-BITS-512-NEXT: srli a0, a0, 31 ; RV32-BITS-512-NEXT: sb a0, 8(sp) ; RV32-BITS-512-NEXT: addi a0, sp, 8 -; RV32-BITS-512-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV32-BITS-512-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV32-BITS-512-NEXT: vle8.v v8, (a0) ; RV32-BITS-512-NEXT: vand.vi v8, v8, 1 ; RV32-BITS-512-NEXT: vmsne.vi v0, v8, 0 @@ -178,7 +178,7 @@ ; RV64-BITS-UNKNOWN: # %bb.0: ; RV64-BITS-UNKNOWN-NEXT: addi sp, sp, -16 ; RV64-BITS-UNKNOWN-NEXT: .cfi_def_cfa_offset 16 -; RV64-BITS-UNKNOWN-NEXT: vsetivli zero, 0, e8, mf8, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vsetivli zero, 0, e8, mf8, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vmv.x.s a0, v0 ; RV64-BITS-UNKNOWN-NEXT: andi a1, a0, 1 ; RV64-BITS-UNKNOWN-NEXT: sb a1, 15(sp) @@ -204,7 +204,7 @@ ; RV64-BITS-UNKNOWN-NEXT: srli a0, a0, 63 ; RV64-BITS-UNKNOWN-NEXT: sb a0, 8(sp) ; RV64-BITS-UNKNOWN-NEXT: addi a0, sp, 8 -; RV64-BITS-UNKNOWN-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vle8.v v8, (a0) ; RV64-BITS-UNKNOWN-NEXT: vand.vi v8, v8, 1 ; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 @@ -215,7 +215,7 @@ ; RV64-BITS-256: # %bb.0: ; RV64-BITS-256-NEXT: addi sp, sp, -16 ; RV64-BITS-256-NEXT: .cfi_def_cfa_offset 16 -; RV64-BITS-256-NEXT: vsetivli zero, 0, e8, mf8, ta, mu +; RV64-BITS-256-NEXT: vsetivli zero, 0, e8, mf8, ta, ma ; RV64-BITS-256-NEXT: vmv.x.s a0, v0 ; RV64-BITS-256-NEXT: andi a1, a0, 1 ; RV64-BITS-256-NEXT: sb a1, 15(sp) @@ -241,7 +241,7 @@ ; RV64-BITS-256-NEXT: srli a0, a0, 63 ; RV64-BITS-256-NEXT: sb a0, 8(sp) ; RV64-BITS-256-NEXT: addi a0, sp, 8 -; RV64-BITS-256-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV64-BITS-256-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV64-BITS-256-NEXT: vle8.v v8, (a0) ; RV64-BITS-256-NEXT: vand.vi v8, v8, 1 ; RV64-BITS-256-NEXT: vmsne.vi v0, v8, 0 @@ -252,7 +252,7 @@ ; RV64-BITS-512: # %bb.0: ; RV64-BITS-512-NEXT: addi sp, sp, -16 ; RV64-BITS-512-NEXT: .cfi_def_cfa_offset 16 -; RV64-BITS-512-NEXT: vsetivli zero, 0, e8, mf8, ta, mu +; RV64-BITS-512-NEXT: vsetivli zero, 0, e8, mf8, ta, ma ; RV64-BITS-512-NEXT: vmv.x.s a0, v0 ; RV64-BITS-512-NEXT: andi a1, a0, 1 ; RV64-BITS-512-NEXT: sb a1, 15(sp) @@ -278,7 +278,7 @@ ; RV64-BITS-512-NEXT: srli a0, a0, 63 ; RV64-BITS-512-NEXT: sb a0, 8(sp) ; RV64-BITS-512-NEXT: addi a0, sp, 8 -; RV64-BITS-512-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV64-BITS-512-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV64-BITS-512-NEXT: vle8.v v8, (a0) ; RV64-BITS-512-NEXT: vand.vi v8, v8, 1 ; RV64-BITS-512-NEXT: vmsne.vi v0, v8, 0 @@ -293,7 +293,7 @@ ; RV32-BITS-UNKNOWN: # %bb.0: ; RV32-BITS-UNKNOWN-NEXT: addi sp, sp, -16 ; RV32-BITS-UNKNOWN-NEXT: .cfi_def_cfa_offset 16 -; RV32-BITS-UNKNOWN-NEXT: vsetivli zero, 0, e16, mf4, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetivli zero, 0, e16, mf4, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vmv.x.s a0, v0 ; RV32-BITS-UNKNOWN-NEXT: andi a1, a0, 1 ; RV32-BITS-UNKNOWN-NEXT: sb a1, 15(sp) @@ -343,7 +343,7 @@ ; RV32-BITS-UNKNOWN-NEXT: srli a0, a0, 31 ; RV32-BITS-UNKNOWN-NEXT: sb a0, 0(sp) ; RV32-BITS-UNKNOWN-NEXT: mv a0, sp -; RV32-BITS-UNKNOWN-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vle8.v v8, (a0) ; RV32-BITS-UNKNOWN-NEXT: vand.vi v8, v8, 1 ; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 @@ -354,7 +354,7 @@ ; RV32-BITS-256: # %bb.0: ; RV32-BITS-256-NEXT: addi sp, sp, -16 ; RV32-BITS-256-NEXT: .cfi_def_cfa_offset 16 -; RV32-BITS-256-NEXT: vsetivli zero, 0, e16, mf4, ta, mu +; RV32-BITS-256-NEXT: vsetivli zero, 0, e16, mf4, ta, ma ; RV32-BITS-256-NEXT: vmv.x.s a0, v0 ; RV32-BITS-256-NEXT: andi a1, a0, 1 ; RV32-BITS-256-NEXT: sb a1, 15(sp) @@ -404,7 +404,7 @@ ; RV32-BITS-256-NEXT: srli a0, a0, 31 ; RV32-BITS-256-NEXT: sb a0, 0(sp) ; RV32-BITS-256-NEXT: mv a0, sp -; RV32-BITS-256-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; RV32-BITS-256-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV32-BITS-256-NEXT: vle8.v v8, (a0) ; RV32-BITS-256-NEXT: vand.vi v8, v8, 1 ; RV32-BITS-256-NEXT: vmsne.vi v0, v8, 0 @@ -415,7 +415,7 @@ ; RV32-BITS-512: # %bb.0: ; RV32-BITS-512-NEXT: addi sp, sp, -16 ; RV32-BITS-512-NEXT: .cfi_def_cfa_offset 16 -; RV32-BITS-512-NEXT: vsetivli zero, 0, e16, mf4, ta, mu +; RV32-BITS-512-NEXT: vsetivli zero, 0, e16, mf4, ta, ma ; RV32-BITS-512-NEXT: vmv.x.s a0, v0 ; RV32-BITS-512-NEXT: andi a1, a0, 1 ; RV32-BITS-512-NEXT: sb a1, 15(sp) @@ -465,7 +465,7 @@ ; RV32-BITS-512-NEXT: srli a0, a0, 31 ; RV32-BITS-512-NEXT: sb a0, 0(sp) ; RV32-BITS-512-NEXT: mv a0, sp -; RV32-BITS-512-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; RV32-BITS-512-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV32-BITS-512-NEXT: vle8.v v8, (a0) ; RV32-BITS-512-NEXT: vand.vi v8, v8, 1 ; RV32-BITS-512-NEXT: vmsne.vi v0, v8, 0 @@ -476,7 +476,7 @@ ; RV64-BITS-UNKNOWN: # %bb.0: ; RV64-BITS-UNKNOWN-NEXT: addi sp, sp, -16 ; RV64-BITS-UNKNOWN-NEXT: .cfi_def_cfa_offset 16 -; RV64-BITS-UNKNOWN-NEXT: vsetivli zero, 0, e16, mf4, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vsetivli zero, 0, e16, mf4, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vmv.x.s a0, v0 ; RV64-BITS-UNKNOWN-NEXT: andi a1, a0, 1 ; RV64-BITS-UNKNOWN-NEXT: sb a1, 15(sp) @@ -526,7 +526,7 @@ ; RV64-BITS-UNKNOWN-NEXT: srli a0, a0, 63 ; RV64-BITS-UNKNOWN-NEXT: sb a0, 0(sp) ; RV64-BITS-UNKNOWN-NEXT: mv a0, sp -; RV64-BITS-UNKNOWN-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vle8.v v8, (a0) ; RV64-BITS-UNKNOWN-NEXT: vand.vi v8, v8, 1 ; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 @@ -537,7 +537,7 @@ ; RV64-BITS-256: # %bb.0: ; RV64-BITS-256-NEXT: addi sp, sp, -16 ; RV64-BITS-256-NEXT: .cfi_def_cfa_offset 16 -; RV64-BITS-256-NEXT: vsetivli zero, 0, e16, mf4, ta, mu +; RV64-BITS-256-NEXT: vsetivli zero, 0, e16, mf4, ta, ma ; RV64-BITS-256-NEXT: vmv.x.s a0, v0 ; RV64-BITS-256-NEXT: andi a1, a0, 1 ; RV64-BITS-256-NEXT: sb a1, 15(sp) @@ -587,7 +587,7 @@ ; RV64-BITS-256-NEXT: srli a0, a0, 63 ; RV64-BITS-256-NEXT: sb a0, 0(sp) ; RV64-BITS-256-NEXT: mv a0, sp -; RV64-BITS-256-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; RV64-BITS-256-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV64-BITS-256-NEXT: vle8.v v8, (a0) ; RV64-BITS-256-NEXT: vand.vi v8, v8, 1 ; RV64-BITS-256-NEXT: vmsne.vi v0, v8, 0 @@ -598,7 +598,7 @@ ; RV64-BITS-512: # %bb.0: ; RV64-BITS-512-NEXT: addi sp, sp, -16 ; RV64-BITS-512-NEXT: .cfi_def_cfa_offset 16 -; RV64-BITS-512-NEXT: vsetivli zero, 0, e16, mf4, ta, mu +; RV64-BITS-512-NEXT: vsetivli zero, 0, e16, mf4, ta, ma ; RV64-BITS-512-NEXT: vmv.x.s a0, v0 ; RV64-BITS-512-NEXT: andi a1, a0, 1 ; RV64-BITS-512-NEXT: sb a1, 15(sp) @@ -648,7 +648,7 @@ ; RV64-BITS-512-NEXT: srli a0, a0, 63 ; RV64-BITS-512-NEXT: sb a0, 0(sp) ; RV64-BITS-512-NEXT: mv a0, sp -; RV64-BITS-512-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; RV64-BITS-512-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV64-BITS-512-NEXT: vle8.v v8, (a0) ; RV64-BITS-512-NEXT: vand.vi v8, v8, 1 ; RV64-BITS-512-NEXT: vmsne.vi v0, v8, 0 @@ -670,7 +670,7 @@ ; RV32-BITS-UNKNOWN-NEXT: addi s0, sp, 64 ; RV32-BITS-UNKNOWN-NEXT: .cfi_def_cfa s0, 0 ; RV32-BITS-UNKNOWN-NEXT: andi sp, sp, -32 -; RV32-BITS-UNKNOWN-NEXT: vsetivli zero, 0, e32, mf2, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetivli zero, 0, e32, mf2, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vmv.x.s a0, v0 ; RV32-BITS-UNKNOWN-NEXT: andi a1, a0, 1 ; RV32-BITS-UNKNOWN-NEXT: sb a1, 31(sp) @@ -768,7 +768,7 @@ ; RV32-BITS-UNKNOWN-NEXT: sb a0, 1(sp) ; RV32-BITS-UNKNOWN-NEXT: li a0, 32 ; RV32-BITS-UNKNOWN-NEXT: mv a1, sp -; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vle8.v v8, (a1) ; RV32-BITS-UNKNOWN-NEXT: vand.vi v8, v8, 1 ; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 @@ -789,7 +789,7 @@ ; RV32-BITS-256-NEXT: addi s0, sp, 64 ; RV32-BITS-256-NEXT: .cfi_def_cfa s0, 0 ; RV32-BITS-256-NEXT: andi sp, sp, -32 -; RV32-BITS-256-NEXT: vsetivli zero, 0, e32, mf2, ta, mu +; RV32-BITS-256-NEXT: vsetivli zero, 0, e32, mf2, ta, ma ; RV32-BITS-256-NEXT: vmv.x.s a0, v0 ; RV32-BITS-256-NEXT: andi a1, a0, 1 ; RV32-BITS-256-NEXT: sb a1, 31(sp) @@ -887,7 +887,7 @@ ; RV32-BITS-256-NEXT: sb a0, 1(sp) ; RV32-BITS-256-NEXT: li a0, 32 ; RV32-BITS-256-NEXT: mv a1, sp -; RV32-BITS-256-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; RV32-BITS-256-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; RV32-BITS-256-NEXT: vle8.v v8, (a1) ; RV32-BITS-256-NEXT: vand.vi v8, v8, 1 ; RV32-BITS-256-NEXT: vmsne.vi v0, v8, 0 @@ -908,7 +908,7 @@ ; RV32-BITS-512-NEXT: addi s0, sp, 64 ; RV32-BITS-512-NEXT: .cfi_def_cfa s0, 0 ; RV32-BITS-512-NEXT: andi sp, sp, -32 -; RV32-BITS-512-NEXT: vsetivli zero, 0, e32, mf2, ta, mu +; RV32-BITS-512-NEXT: vsetivli zero, 0, e32, mf2, ta, ma ; RV32-BITS-512-NEXT: vmv.x.s a0, v0 ; RV32-BITS-512-NEXT: andi a1, a0, 1 ; RV32-BITS-512-NEXT: sb a1, 31(sp) @@ -1006,7 +1006,7 @@ ; RV32-BITS-512-NEXT: sb a0, 1(sp) ; RV32-BITS-512-NEXT: li a0, 32 ; RV32-BITS-512-NEXT: mv a1, sp -; RV32-BITS-512-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; RV32-BITS-512-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; RV32-BITS-512-NEXT: vle8.v v8, (a1) ; RV32-BITS-512-NEXT: vand.vi v8, v8, 1 ; RV32-BITS-512-NEXT: vmsne.vi v0, v8, 0 @@ -1027,7 +1027,7 @@ ; RV64-BITS-UNKNOWN-NEXT: addi s0, sp, 64 ; RV64-BITS-UNKNOWN-NEXT: .cfi_def_cfa s0, 0 ; RV64-BITS-UNKNOWN-NEXT: andi sp, sp, -32 -; RV64-BITS-UNKNOWN-NEXT: vsetivli zero, 0, e32, mf2, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vsetivli zero, 0, e32, mf2, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vmv.x.s a0, v0 ; RV64-BITS-UNKNOWN-NEXT: andi a1, a0, 1 ; RV64-BITS-UNKNOWN-NEXT: sb a1, 31(sp) @@ -1125,7 +1125,7 @@ ; RV64-BITS-UNKNOWN-NEXT: sb a0, 1(sp) ; RV64-BITS-UNKNOWN-NEXT: li a0, 32 ; RV64-BITS-UNKNOWN-NEXT: mv a1, sp -; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vle8.v v8, (a1) ; RV64-BITS-UNKNOWN-NEXT: vand.vi v8, v8, 1 ; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 @@ -1146,7 +1146,7 @@ ; RV64-BITS-256-NEXT: addi s0, sp, 64 ; RV64-BITS-256-NEXT: .cfi_def_cfa s0, 0 ; RV64-BITS-256-NEXT: andi sp, sp, -32 -; RV64-BITS-256-NEXT: vsetivli zero, 0, e32, mf2, ta, mu +; RV64-BITS-256-NEXT: vsetivli zero, 0, e32, mf2, ta, ma ; RV64-BITS-256-NEXT: vmv.x.s a0, v0 ; RV64-BITS-256-NEXT: andi a1, a0, 1 ; RV64-BITS-256-NEXT: sb a1, 31(sp) @@ -1244,7 +1244,7 @@ ; RV64-BITS-256-NEXT: sb a0, 1(sp) ; RV64-BITS-256-NEXT: li a0, 32 ; RV64-BITS-256-NEXT: mv a1, sp -; RV64-BITS-256-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; RV64-BITS-256-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; RV64-BITS-256-NEXT: vle8.v v8, (a1) ; RV64-BITS-256-NEXT: vand.vi v8, v8, 1 ; RV64-BITS-256-NEXT: vmsne.vi v0, v8, 0 @@ -1265,7 +1265,7 @@ ; RV64-BITS-512-NEXT: addi s0, sp, 64 ; RV64-BITS-512-NEXT: .cfi_def_cfa s0, 0 ; RV64-BITS-512-NEXT: andi sp, sp, -32 -; RV64-BITS-512-NEXT: vsetivli zero, 0, e32, mf2, ta, mu +; RV64-BITS-512-NEXT: vsetivli zero, 0, e32, mf2, ta, ma ; RV64-BITS-512-NEXT: vmv.x.s a0, v0 ; RV64-BITS-512-NEXT: andi a1, a0, 1 ; RV64-BITS-512-NEXT: sb a1, 31(sp) @@ -1363,7 +1363,7 @@ ; RV64-BITS-512-NEXT: sb a0, 1(sp) ; RV64-BITS-512-NEXT: li a0, 32 ; RV64-BITS-512-NEXT: mv a1, sp -; RV64-BITS-512-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; RV64-BITS-512-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; RV64-BITS-512-NEXT: vle8.v v8, (a1) ; RV64-BITS-512-NEXT: vand.vi v8, v8, 1 ; RV64-BITS-512-NEXT: vmsne.vi v0, v8, 0 @@ -1388,7 +1388,7 @@ ; RV32-BITS-UNKNOWN-NEXT: addi s0, sp, 128 ; RV32-BITS-UNKNOWN-NEXT: .cfi_def_cfa s0, 0 ; RV32-BITS-UNKNOWN-NEXT: andi sp, sp, -64 -; RV32-BITS-UNKNOWN-NEXT: vsetivli zero, 0, e32, mf2, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetivli zero, 0, e32, mf2, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vmv.x.s a0, v0 ; RV32-BITS-UNKNOWN-NEXT: andi a1, a0, 1 ; RV32-BITS-UNKNOWN-NEXT: sb a1, 63(sp) @@ -1484,7 +1484,7 @@ ; RV32-BITS-UNKNOWN-NEXT: slli a0, a0, 1 ; RV32-BITS-UNKNOWN-NEXT: srli a0, a0, 31 ; RV32-BITS-UNKNOWN-NEXT: sb a0, 33(sp) -; RV32-BITS-UNKNOWN-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vslidedown.vi v8, v0, 1 ; RV32-BITS-UNKNOWN-NEXT: vmv.x.s a0, v8 ; RV32-BITS-UNKNOWN-NEXT: andi a1, a0, 1 @@ -1583,7 +1583,7 @@ ; RV32-BITS-UNKNOWN-NEXT: sb a0, 1(sp) ; RV32-BITS-UNKNOWN-NEXT: li a0, 64 ; RV32-BITS-UNKNOWN-NEXT: mv a1, sp -; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vle8.v v8, (a1) ; RV32-BITS-UNKNOWN-NEXT: vand.vi v8, v8, 1 ; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 @@ -1604,7 +1604,7 @@ ; RV32-BITS-256-NEXT: addi s0, sp, 128 ; RV32-BITS-256-NEXT: .cfi_def_cfa s0, 0 ; RV32-BITS-256-NEXT: andi sp, sp, -64 -; RV32-BITS-256-NEXT: vsetivli zero, 0, e32, mf2, ta, mu +; RV32-BITS-256-NEXT: vsetivli zero, 0, e32, mf2, ta, ma ; RV32-BITS-256-NEXT: vmv.x.s a0, v0 ; RV32-BITS-256-NEXT: andi a1, a0, 1 ; RV32-BITS-256-NEXT: sb a1, 63(sp) @@ -1700,7 +1700,7 @@ ; RV32-BITS-256-NEXT: slli a0, a0, 1 ; RV32-BITS-256-NEXT: srli a0, a0, 31 ; RV32-BITS-256-NEXT: sb a0, 33(sp) -; RV32-BITS-256-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; RV32-BITS-256-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV32-BITS-256-NEXT: vslidedown.vi v8, v0, 1 ; RV32-BITS-256-NEXT: vmv.x.s a0, v8 ; RV32-BITS-256-NEXT: andi a1, a0, 1 @@ -1799,7 +1799,7 @@ ; RV32-BITS-256-NEXT: sb a0, 1(sp) ; RV32-BITS-256-NEXT: li a0, 64 ; RV32-BITS-256-NEXT: mv a1, sp -; RV32-BITS-256-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; RV32-BITS-256-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; RV32-BITS-256-NEXT: vle8.v v8, (a1) ; RV32-BITS-256-NEXT: vand.vi v8, v8, 1 ; RV32-BITS-256-NEXT: vmsne.vi v0, v8, 0 @@ -1820,7 +1820,7 @@ ; RV32-BITS-512-NEXT: addi s0, sp, 128 ; RV32-BITS-512-NEXT: .cfi_def_cfa s0, 0 ; RV32-BITS-512-NEXT: andi sp, sp, -64 -; RV32-BITS-512-NEXT: vsetivli zero, 0, e32, mf2, ta, mu +; RV32-BITS-512-NEXT: vsetivli zero, 0, e32, mf2, ta, ma ; RV32-BITS-512-NEXT: vmv.x.s a0, v0 ; RV32-BITS-512-NEXT: andi a1, a0, 1 ; RV32-BITS-512-NEXT: sb a1, 63(sp) @@ -1916,7 +1916,7 @@ ; RV32-BITS-512-NEXT: slli a0, a0, 1 ; RV32-BITS-512-NEXT: srli a0, a0, 31 ; RV32-BITS-512-NEXT: sb a0, 33(sp) -; RV32-BITS-512-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; RV32-BITS-512-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV32-BITS-512-NEXT: vslidedown.vi v8, v0, 1 ; RV32-BITS-512-NEXT: vmv.x.s a0, v8 ; RV32-BITS-512-NEXT: andi a1, a0, 1 @@ -2015,7 +2015,7 @@ ; RV32-BITS-512-NEXT: sb a0, 1(sp) ; RV32-BITS-512-NEXT: li a0, 64 ; RV32-BITS-512-NEXT: mv a1, sp -; RV32-BITS-512-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; RV32-BITS-512-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; RV32-BITS-512-NEXT: vle8.v v8, (a1) ; RV32-BITS-512-NEXT: vand.vi v8, v8, 1 ; RV32-BITS-512-NEXT: vmsne.vi v0, v8, 0 @@ -2036,7 +2036,7 @@ ; RV64-BITS-UNKNOWN-NEXT: addi s0, sp, 128 ; RV64-BITS-UNKNOWN-NEXT: .cfi_def_cfa s0, 0 ; RV64-BITS-UNKNOWN-NEXT: andi sp, sp, -64 -; RV64-BITS-UNKNOWN-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vmv.x.s a0, v0 ; RV64-BITS-UNKNOWN-NEXT: andi a1, a0, 1 ; RV64-BITS-UNKNOWN-NEXT: sb a1, 63(sp) @@ -2229,7 +2229,7 @@ ; RV64-BITS-UNKNOWN-NEXT: sb a0, 1(sp) ; RV64-BITS-UNKNOWN-NEXT: li a0, 64 ; RV64-BITS-UNKNOWN-NEXT: mv a1, sp -; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vle8.v v8, (a1) ; RV64-BITS-UNKNOWN-NEXT: vand.vi v8, v8, 1 ; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 @@ -2250,7 +2250,7 @@ ; RV64-BITS-256-NEXT: addi s0, sp, 128 ; RV64-BITS-256-NEXT: .cfi_def_cfa s0, 0 ; RV64-BITS-256-NEXT: andi sp, sp, -64 -; RV64-BITS-256-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; RV64-BITS-256-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; RV64-BITS-256-NEXT: vmv.x.s a0, v0 ; RV64-BITS-256-NEXT: andi a1, a0, 1 ; RV64-BITS-256-NEXT: sb a1, 63(sp) @@ -2443,7 +2443,7 @@ ; RV64-BITS-256-NEXT: sb a0, 1(sp) ; RV64-BITS-256-NEXT: li a0, 64 ; RV64-BITS-256-NEXT: mv a1, sp -; RV64-BITS-256-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; RV64-BITS-256-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; RV64-BITS-256-NEXT: vle8.v v8, (a1) ; RV64-BITS-256-NEXT: vand.vi v8, v8, 1 ; RV64-BITS-256-NEXT: vmsne.vi v0, v8, 0 @@ -2464,7 +2464,7 @@ ; RV64-BITS-512-NEXT: addi s0, sp, 128 ; RV64-BITS-512-NEXT: .cfi_def_cfa s0, 0 ; RV64-BITS-512-NEXT: andi sp, sp, -64 -; RV64-BITS-512-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; RV64-BITS-512-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; RV64-BITS-512-NEXT: vmv.x.s a0, v0 ; RV64-BITS-512-NEXT: andi a1, a0, 1 ; RV64-BITS-512-NEXT: sb a1, 63(sp) @@ -2657,7 +2657,7 @@ ; RV64-BITS-512-NEXT: sb a0, 1(sp) ; RV64-BITS-512-NEXT: li a0, 64 ; RV64-BITS-512-NEXT: mv a1, sp -; RV64-BITS-512-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; RV64-BITS-512-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; RV64-BITS-512-NEXT: vle8.v v8, (a1) ; RV64-BITS-512-NEXT: vand.vi v8, v8, 1 ; RV64-BITS-512-NEXT: vmsne.vi v0, v8, 0 @@ -2682,9 +2682,9 @@ define <2 x i8> @reverse_v2i8(<2 x i8> %a) { ; CHECK-LABEL: reverse_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vslidedown.vi v9, v8, 1 -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 1 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -2695,7 +2695,7 @@ define <4 x i8> @reverse_v4i8(<4 x i8> %a) { ; CHECK-LABEL: reverse_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vi v10, v9, 3 ; CHECK-NEXT: vrgather.vv v9, v8, v10 @@ -2708,7 +2708,7 @@ define <8 x i8> @reverse_v8i8(<8 x i8> %a) { ; CHECK-LABEL: reverse_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vi v10, v9, 7 ; CHECK-NEXT: vrgather.vv v9, v8, v10 @@ -2721,7 +2721,7 @@ define <16 x i8> @reverse_v16i8(<16 x i8> %a) { ; CHECK-LABEL: reverse_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vi v10, v9, 15 ; CHECK-NEXT: vrgather.vv v9, v8, v10 @@ -2737,7 +2737,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI11_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI11_0) ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vle8.v v12, (a0) ; CHECK-NEXT: vrgather.vv v10, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v10 @@ -2752,7 +2752,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI12_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI12_0) ; CHECK-NEXT: li a1, 64 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vrgather.vv v12, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v12 @@ -2772,9 +2772,9 @@ define <2 x i16> @reverse_v2i16(<2 x i16> %a) { ; CHECK-LABEL: reverse_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v9, v8, 1 -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 1 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -2785,7 +2785,7 @@ define <4 x i16> @reverse_v4i16(<4 x i16> %a) { ; CHECK-LABEL: reverse_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vi v10, v9, 3 ; CHECK-NEXT: vrgather.vv v9, v8, v10 @@ -2798,7 +2798,7 @@ define <8 x i16> @reverse_v8i16(<8 x i16> %a) { ; CHECK-LABEL: reverse_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vi v10, v9, 7 ; CHECK-NEXT: vrgather.vv v9, v8, v10 @@ -2811,7 +2811,7 @@ define <16 x i16> @reverse_v16i16(<16 x i16> %a) { ; CHECK-LABEL: reverse_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vid.v v10 ; CHECK-NEXT: vrsub.vi v12, v10, 15 ; CHECK-NEXT: vrgather.vv v10, v8, v12 @@ -2827,7 +2827,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI18_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI18_0) ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vrgather.vv v12, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v12 @@ -2847,9 +2847,9 @@ define <2 x i32> @reverse_v2i32(<2 x i32> %a) { ; CHECK-LABEL: reverse_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v9, v8, 1 -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 1 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -2860,7 +2860,7 @@ define <4 x i32> @reverse_v4i32(<4 x i32> %a) { ; CHECK-LABEL: reverse_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vi v10, v9, 3 ; CHECK-NEXT: vrgather.vv v9, v8, v10 @@ -2873,7 +2873,7 @@ define <8 x i32> @reverse_v8i32(<8 x i32> %a) { ; CHECK-LABEL: reverse_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vid.v v10 ; CHECK-NEXT: vrsub.vi v12, v10, 7 ; CHECK-NEXT: vrgather.vv v10, v8, v12 @@ -2886,7 +2886,7 @@ define <16 x i32> @reverse_v16i32(<16 x i32> %a) { ; CHECK-LABEL: reverse_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vid.v v12 ; CHECK-NEXT: vrsub.vi v16, v12, 15 ; CHECK-NEXT: vrgather.vv v12, v8, v16 @@ -2907,9 +2907,9 @@ define <2 x i64> @reverse_v2i64(<2 x i64> %a) { ; CHECK-LABEL: reverse_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v9, v8, 1 -; CHECK-NEXT: vsetivli zero, 2, e64, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 1 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -2920,37 +2920,37 @@ define <4 x i64> @reverse_v4i64(<4 x i64> %a) { ; RV32-BITS-UNKNOWN-LABEL: reverse_v4i64: ; RV32-BITS-UNKNOWN: # %bb.0: -; RV32-BITS-UNKNOWN-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vid.v v10 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vi v12, v10, 3 -; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v8, v12 ; RV32-BITS-UNKNOWN-NEXT: vmv.v.v v8, v10 ; RV32-BITS-UNKNOWN-NEXT: ret ; ; RV32-BITS-256-LABEL: reverse_v4i64: ; RV32-BITS-256: # %bb.0: -; RV32-BITS-256-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; RV32-BITS-256-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; RV32-BITS-256-NEXT: vid.v v10 ; RV32-BITS-256-NEXT: vrsub.vi v12, v10, 3 -; RV32-BITS-256-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32-BITS-256-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; RV32-BITS-256-NEXT: vrgatherei16.vv v10, v8, v12 ; RV32-BITS-256-NEXT: vmv.v.v v8, v10 ; RV32-BITS-256-NEXT: ret ; ; RV32-BITS-512-LABEL: reverse_v4i64: ; RV32-BITS-512: # %bb.0: -; RV32-BITS-512-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; RV32-BITS-512-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; RV32-BITS-512-NEXT: vid.v v10 ; RV32-BITS-512-NEXT: vrsub.vi v12, v10, 3 -; RV32-BITS-512-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32-BITS-512-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; RV32-BITS-512-NEXT: vrgatherei16.vv v10, v8, v12 ; RV32-BITS-512-NEXT: vmv.v.v v8, v10 ; RV32-BITS-512-NEXT: ret ; ; RV64-BITS-UNKNOWN-LABEL: reverse_v4i64: ; RV64-BITS-UNKNOWN: # %bb.0: -; RV64-BITS-UNKNOWN-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vid.v v10 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vi v12, v10, 3 ; RV64-BITS-UNKNOWN-NEXT: vrgather.vv v10, v8, v12 @@ -2959,7 +2959,7 @@ ; ; RV64-BITS-256-LABEL: reverse_v4i64: ; RV64-BITS-256: # %bb.0: -; RV64-BITS-256-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-BITS-256-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-BITS-256-NEXT: vid.v v10 ; RV64-BITS-256-NEXT: vrsub.vi v12, v10, 3 ; RV64-BITS-256-NEXT: vrgather.vv v10, v8, v12 @@ -2968,7 +2968,7 @@ ; ; RV64-BITS-512-LABEL: reverse_v4i64: ; RV64-BITS-512: # %bb.0: -; RV64-BITS-512-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-BITS-512-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-BITS-512-NEXT: vid.v v10 ; RV64-BITS-512-NEXT: vrsub.vi v12, v10, 3 ; RV64-BITS-512-NEXT: vrgather.vv v10, v8, v12 @@ -2981,37 +2981,37 @@ define <8 x i64> @reverse_v8i64(<8 x i64> %a) { ; RV32-BITS-UNKNOWN-LABEL: reverse_v8i64: ; RV32-BITS-UNKNOWN: # %bb.0: -; RV32-BITS-UNKNOWN-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vid.v v12 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vi v16, v12, 7 -; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v12, v8, v16 ; RV32-BITS-UNKNOWN-NEXT: vmv.v.v v8, v12 ; RV32-BITS-UNKNOWN-NEXT: ret ; ; RV32-BITS-256-LABEL: reverse_v8i64: ; RV32-BITS-256: # %bb.0: -; RV32-BITS-256-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV32-BITS-256-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV32-BITS-256-NEXT: vid.v v12 ; RV32-BITS-256-NEXT: vrsub.vi v16, v12, 7 -; RV32-BITS-256-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32-BITS-256-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; RV32-BITS-256-NEXT: vrgatherei16.vv v12, v8, v16 ; RV32-BITS-256-NEXT: vmv.v.v v8, v12 ; RV32-BITS-256-NEXT: ret ; ; RV32-BITS-512-LABEL: reverse_v8i64: ; RV32-BITS-512: # %bb.0: -; RV32-BITS-512-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV32-BITS-512-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV32-BITS-512-NEXT: vid.v v12 ; RV32-BITS-512-NEXT: vrsub.vi v16, v12, 7 -; RV32-BITS-512-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32-BITS-512-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; RV32-BITS-512-NEXT: vrgatherei16.vv v12, v8, v16 ; RV32-BITS-512-NEXT: vmv.v.v v8, v12 ; RV32-BITS-512-NEXT: ret ; ; RV64-BITS-UNKNOWN-LABEL: reverse_v8i64: ; RV64-BITS-UNKNOWN: # %bb.0: -; RV64-BITS-UNKNOWN-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vid.v v12 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vi v16, v12, 7 ; RV64-BITS-UNKNOWN-NEXT: vrgather.vv v12, v8, v16 @@ -3020,7 +3020,7 @@ ; ; RV64-BITS-256-LABEL: reverse_v8i64: ; RV64-BITS-256: # %bb.0: -; RV64-BITS-256-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-BITS-256-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-BITS-256-NEXT: vid.v v12 ; RV64-BITS-256-NEXT: vrsub.vi v16, v12, 7 ; RV64-BITS-256-NEXT: vrgather.vv v12, v8, v16 @@ -3029,7 +3029,7 @@ ; ; RV64-BITS-512-LABEL: reverse_v8i64: ; RV64-BITS-512: # %bb.0: -; RV64-BITS-512-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-BITS-512-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-BITS-512-NEXT: vid.v v12 ; RV64-BITS-512-NEXT: vrsub.vi v16, v12, 7 ; RV64-BITS-512-NEXT: vrgather.vv v12, v8, v16 @@ -3051,9 +3051,9 @@ define <2 x half> @reverse_v2f16(<2 x half> %a) { ; CHECK-LABEL: reverse_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v9, v8, 1 -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 1 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -3064,7 +3064,7 @@ define <4 x half> @reverse_v4f16(<4 x half> %a) { ; CHECK-LABEL: reverse_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vi v10, v9, 3 ; CHECK-NEXT: vrgather.vv v9, v8, v10 @@ -3077,7 +3077,7 @@ define <8 x half> @reverse_v8f16(<8 x half> %a) { ; CHECK-LABEL: reverse_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vi v10, v9, 7 ; CHECK-NEXT: vrgather.vv v9, v8, v10 @@ -3090,7 +3090,7 @@ define <16 x half> @reverse_v16f16(<16 x half> %a) { ; CHECK-LABEL: reverse_v16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vid.v v10 ; CHECK-NEXT: vrsub.vi v12, v10, 15 ; CHECK-NEXT: vrgather.vv v10, v8, v12 @@ -3106,7 +3106,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI33_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI33_0) ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vrgather.vv v12, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v12 @@ -3126,9 +3126,9 @@ define <2 x float> @reverse_v2f32(<2 x float> %a) { ; CHECK-LABEL: reverse_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v9, v8, 1 -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 1 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -3139,7 +3139,7 @@ define <4 x float> @reverse_v4f32(<4 x float> %a) { ; CHECK-LABEL: reverse_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vi v10, v9, 3 ; CHECK-NEXT: vrgather.vv v9, v8, v10 @@ -3152,7 +3152,7 @@ define <8 x float> @reverse_v8f32(<8 x float> %a) { ; CHECK-LABEL: reverse_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vid.v v10 ; CHECK-NEXT: vrsub.vi v12, v10, 7 ; CHECK-NEXT: vrgather.vv v10, v8, v12 @@ -3165,7 +3165,7 @@ define <16 x float> @reverse_v16f32(<16 x float> %a) { ; CHECK-LABEL: reverse_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vid.v v12 ; CHECK-NEXT: vrsub.vi v16, v12, 15 ; CHECK-NEXT: vrgather.vv v12, v8, v16 @@ -3186,9 +3186,9 @@ define <2 x double> @reverse_v2f64(<2 x double> %a) { ; CHECK-LABEL: reverse_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v9, v8, 1 -; CHECK-NEXT: vsetivli zero, 2, e64, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 1 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -3199,37 +3199,37 @@ define <4 x double> @reverse_v4f64(<4 x double> %a) { ; RV32-BITS-UNKNOWN-LABEL: reverse_v4f64: ; RV32-BITS-UNKNOWN: # %bb.0: -; RV32-BITS-UNKNOWN-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vid.v v10 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vi v12, v10, 3 -; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v8, v12 ; RV32-BITS-UNKNOWN-NEXT: vmv.v.v v8, v10 ; RV32-BITS-UNKNOWN-NEXT: ret ; ; RV32-BITS-256-LABEL: reverse_v4f64: ; RV32-BITS-256: # %bb.0: -; RV32-BITS-256-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; RV32-BITS-256-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; RV32-BITS-256-NEXT: vid.v v10 ; RV32-BITS-256-NEXT: vrsub.vi v12, v10, 3 -; RV32-BITS-256-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32-BITS-256-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; RV32-BITS-256-NEXT: vrgatherei16.vv v10, v8, v12 ; RV32-BITS-256-NEXT: vmv.v.v v8, v10 ; RV32-BITS-256-NEXT: ret ; ; RV32-BITS-512-LABEL: reverse_v4f64: ; RV32-BITS-512: # %bb.0: -; RV32-BITS-512-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; RV32-BITS-512-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; RV32-BITS-512-NEXT: vid.v v10 ; RV32-BITS-512-NEXT: vrsub.vi v12, v10, 3 -; RV32-BITS-512-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32-BITS-512-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; RV32-BITS-512-NEXT: vrgatherei16.vv v10, v8, v12 ; RV32-BITS-512-NEXT: vmv.v.v v8, v10 ; RV32-BITS-512-NEXT: ret ; ; RV64-BITS-UNKNOWN-LABEL: reverse_v4f64: ; RV64-BITS-UNKNOWN: # %bb.0: -; RV64-BITS-UNKNOWN-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vid.v v10 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vi v12, v10, 3 ; RV64-BITS-UNKNOWN-NEXT: vrgather.vv v10, v8, v12 @@ -3238,7 +3238,7 @@ ; ; RV64-BITS-256-LABEL: reverse_v4f64: ; RV64-BITS-256: # %bb.0: -; RV64-BITS-256-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-BITS-256-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-BITS-256-NEXT: vid.v v10 ; RV64-BITS-256-NEXT: vrsub.vi v12, v10, 3 ; RV64-BITS-256-NEXT: vrgather.vv v10, v8, v12 @@ -3247,7 +3247,7 @@ ; ; RV64-BITS-512-LABEL: reverse_v4f64: ; RV64-BITS-512: # %bb.0: -; RV64-BITS-512-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-BITS-512-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-BITS-512-NEXT: vid.v v10 ; RV64-BITS-512-NEXT: vrsub.vi v12, v10, 3 ; RV64-BITS-512-NEXT: vrgather.vv v10, v8, v12 @@ -3260,37 +3260,37 @@ define <8 x double> @reverse_v8f64(<8 x double> %a) { ; RV32-BITS-UNKNOWN-LABEL: reverse_v8f64: ; RV32-BITS-UNKNOWN: # %bb.0: -; RV32-BITS-UNKNOWN-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vid.v v12 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vi v16, v12, 7 -; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v12, v8, v16 ; RV32-BITS-UNKNOWN-NEXT: vmv.v.v v8, v12 ; RV32-BITS-UNKNOWN-NEXT: ret ; ; RV32-BITS-256-LABEL: reverse_v8f64: ; RV32-BITS-256: # %bb.0: -; RV32-BITS-256-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV32-BITS-256-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV32-BITS-256-NEXT: vid.v v12 ; RV32-BITS-256-NEXT: vrsub.vi v16, v12, 7 -; RV32-BITS-256-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32-BITS-256-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; RV32-BITS-256-NEXT: vrgatherei16.vv v12, v8, v16 ; RV32-BITS-256-NEXT: vmv.v.v v8, v12 ; RV32-BITS-256-NEXT: ret ; ; RV32-BITS-512-LABEL: reverse_v8f64: ; RV32-BITS-512: # %bb.0: -; RV32-BITS-512-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV32-BITS-512-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV32-BITS-512-NEXT: vid.v v12 ; RV32-BITS-512-NEXT: vrsub.vi v16, v12, 7 -; RV32-BITS-512-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32-BITS-512-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; RV32-BITS-512-NEXT: vrgatherei16.vv v12, v8, v16 ; RV32-BITS-512-NEXT: vmv.v.v v8, v12 ; RV32-BITS-512-NEXT: ret ; ; RV64-BITS-UNKNOWN-LABEL: reverse_v8f64: ; RV64-BITS-UNKNOWN: # %bb.0: -; RV64-BITS-UNKNOWN-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vid.v v12 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vi v16, v12, 7 ; RV64-BITS-UNKNOWN-NEXT: vrgather.vv v12, v8, v16 @@ -3299,7 +3299,7 @@ ; ; RV64-BITS-256-LABEL: reverse_v8f64: ; RV64-BITS-256: # %bb.0: -; RV64-BITS-256-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-BITS-256-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-BITS-256-NEXT: vid.v v12 ; RV64-BITS-256-NEXT: vrsub.vi v16, v12, 7 ; RV64-BITS-256-NEXT: vrgather.vv v12, v8, v16 @@ -3308,7 +3308,7 @@ ; ; RV64-BITS-512-LABEL: reverse_v8f64: ; RV64-BITS-512: # %bb.0: -; RV64-BITS-512-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-BITS-512-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-BITS-512-NEXT: vid.v v12 ; RV64-BITS-512-NEXT: vrsub.vi v16, v12, 7 ; RV64-BITS-512-NEXT: vrgather.vv v12, v8, v16 @@ -3324,7 +3324,7 @@ ; RV32-BITS-UNKNOWN: # %bb.0: ; RV32-BITS-UNKNOWN-NEXT: lui a0, %hi(.LCPI43_0) ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, %lo(.LCPI43_0) -; RV32-BITS-UNKNOWN-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vle32.v v12, (a0) ; RV32-BITS-UNKNOWN-NEXT: vrgather.vv v10, v8, v12 ; RV32-BITS-UNKNOWN-NEXT: vmv.v.v v8, v10 @@ -3334,7 +3334,7 @@ ; RV32-BITS-256: # %bb.0: ; RV32-BITS-256-NEXT: lui a0, %hi(.LCPI43_0) ; RV32-BITS-256-NEXT: addi a0, a0, %lo(.LCPI43_0) -; RV32-BITS-256-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-BITS-256-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-BITS-256-NEXT: vle32.v v12, (a0) ; RV32-BITS-256-NEXT: vrgather.vv v10, v8, v12 ; RV32-BITS-256-NEXT: vmv.v.v v8, v10 @@ -3344,7 +3344,7 @@ ; RV32-BITS-512: # %bb.0: ; RV32-BITS-512-NEXT: lui a0, %hi(.LCPI43_0) ; RV32-BITS-512-NEXT: addi a0, a0, %lo(.LCPI43_0) -; RV32-BITS-512-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-BITS-512-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-BITS-512-NEXT: vle32.v v12, (a0) ; RV32-BITS-512-NEXT: vrgather.vv v10, v8, v12 ; RV32-BITS-512-NEXT: vmv.v.v v8, v10 @@ -3352,7 +3352,7 @@ ; ; RV64-BITS-UNKNOWN-LABEL: reverse_v3i64: ; RV64-BITS-UNKNOWN: # %bb.0: -; RV64-BITS-UNKNOWN-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vid.v v10 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vi v12, v10, 2 ; RV64-BITS-UNKNOWN-NEXT: vrgather.vv v10, v8, v12 @@ -3361,7 +3361,7 @@ ; ; RV64-BITS-256-LABEL: reverse_v3i64: ; RV64-BITS-256: # %bb.0: -; RV64-BITS-256-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-BITS-256-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-BITS-256-NEXT: vid.v v10 ; RV64-BITS-256-NEXT: vrsub.vi v12, v10, 2 ; RV64-BITS-256-NEXT: vrgather.vv v10, v8, v12 @@ -3370,7 +3370,7 @@ ; ; RV64-BITS-512-LABEL: reverse_v3i64: ; RV64-BITS-512: # %bb.0: -; RV64-BITS-512-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-BITS-512-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-BITS-512-NEXT: vid.v v10 ; RV64-BITS-512-NEXT: vrsub.vi v12, v10, 2 ; RV64-BITS-512-NEXT: vrgather.vv v10, v8, v12 @@ -3385,7 +3385,7 @@ ; RV32-BITS-UNKNOWN: # %bb.0: ; RV32-BITS-UNKNOWN-NEXT: lui a0, %hi(.LCPI44_0) ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, %lo(.LCPI44_0) -; RV32-BITS-UNKNOWN-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vle32.v v16, (a0) ; RV32-BITS-UNKNOWN-NEXT: vrgather.vv v12, v8, v16 ; RV32-BITS-UNKNOWN-NEXT: vmv.v.v v8, v12 @@ -3395,7 +3395,7 @@ ; RV32-BITS-256: # %bb.0: ; RV32-BITS-256-NEXT: lui a0, %hi(.LCPI44_0) ; RV32-BITS-256-NEXT: addi a0, a0, %lo(.LCPI44_0) -; RV32-BITS-256-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; RV32-BITS-256-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; RV32-BITS-256-NEXT: vle32.v v16, (a0) ; RV32-BITS-256-NEXT: vrgather.vv v12, v8, v16 ; RV32-BITS-256-NEXT: vmv.v.v v8, v12 @@ -3405,7 +3405,7 @@ ; RV32-BITS-512: # %bb.0: ; RV32-BITS-512-NEXT: lui a0, %hi(.LCPI44_0) ; RV32-BITS-512-NEXT: addi a0, a0, %lo(.LCPI44_0) -; RV32-BITS-512-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; RV32-BITS-512-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; RV32-BITS-512-NEXT: vle32.v v16, (a0) ; RV32-BITS-512-NEXT: vrgather.vv v12, v8, v16 ; RV32-BITS-512-NEXT: vmv.v.v v8, v12 @@ -3413,7 +3413,7 @@ ; ; RV64-BITS-UNKNOWN-LABEL: reverse_v6i64: ; RV64-BITS-UNKNOWN: # %bb.0: -; RV64-BITS-UNKNOWN-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vid.v v12 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vi v16, v12, 5 ; RV64-BITS-UNKNOWN-NEXT: vrgather.vv v12, v8, v16 @@ -3422,7 +3422,7 @@ ; ; RV64-BITS-256-LABEL: reverse_v6i64: ; RV64-BITS-256: # %bb.0: -; RV64-BITS-256-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-BITS-256-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-BITS-256-NEXT: vid.v v12 ; RV64-BITS-256-NEXT: vrsub.vi v16, v12, 5 ; RV64-BITS-256-NEXT: vrgather.vv v12, v8, v16 @@ -3431,7 +3431,7 @@ ; ; RV64-BITS-512-LABEL: reverse_v6i64: ; RV64-BITS-512: # %bb.0: -; RV64-BITS-512-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-BITS-512-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-BITS-512-NEXT: vid.v v12 ; RV64-BITS-512-NEXT: vrsub.vi v16, v12, 5 ; RV64-BITS-512-NEXT: vrgather.vv v12, v8, v16 @@ -3447,7 +3447,7 @@ ; RV32-BITS-UNKNOWN-NEXT: lui a0, %hi(.LCPI45_0) ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, %lo(.LCPI45_0) ; RV32-BITS-UNKNOWN-NEXT: li a1, 32 -; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vle32.v v24, (a0) ; RV32-BITS-UNKNOWN-NEXT: vrgather.vv v16, v8, v24 ; RV32-BITS-UNKNOWN-NEXT: vmv.v.v v8, v16 @@ -3458,7 +3458,7 @@ ; RV32-BITS-256-NEXT: lui a0, %hi(.LCPI45_0) ; RV32-BITS-256-NEXT: addi a0, a0, %lo(.LCPI45_0) ; RV32-BITS-256-NEXT: li a1, 32 -; RV32-BITS-256-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; RV32-BITS-256-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; RV32-BITS-256-NEXT: vle32.v v24, (a0) ; RV32-BITS-256-NEXT: vrgather.vv v16, v8, v24 ; RV32-BITS-256-NEXT: vmv.v.v v8, v16 @@ -3469,7 +3469,7 @@ ; RV32-BITS-512-NEXT: lui a0, %hi(.LCPI45_0) ; RV32-BITS-512-NEXT: addi a0, a0, %lo(.LCPI45_0) ; RV32-BITS-512-NEXT: li a1, 32 -; RV32-BITS-512-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; RV32-BITS-512-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; RV32-BITS-512-NEXT: vle32.v v24, (a0) ; RV32-BITS-512-NEXT: vrgather.vv v16, v8, v24 ; RV32-BITS-512-NEXT: vmv.v.v v8, v16 @@ -3477,7 +3477,7 @@ ; ; RV64-BITS-UNKNOWN-LABEL: reverse_v12i64: ; RV64-BITS-UNKNOWN: # %bb.0: -; RV64-BITS-UNKNOWN-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vid.v v16 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vi v24, v16, 11 ; RV64-BITS-UNKNOWN-NEXT: vrgather.vv v16, v8, v24 @@ -3486,7 +3486,7 @@ ; ; RV64-BITS-256-LABEL: reverse_v12i64: ; RV64-BITS-256: # %bb.0: -; RV64-BITS-256-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-BITS-256-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-BITS-256-NEXT: vid.v v16 ; RV64-BITS-256-NEXT: vrsub.vi v24, v16, 11 ; RV64-BITS-256-NEXT: vrgather.vv v16, v8, v24 @@ -3495,7 +3495,7 @@ ; ; RV64-BITS-512-LABEL: reverse_v12i64: ; RV64-BITS-512: # %bb.0: -; RV64-BITS-512-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-BITS-512-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-BITS-512-NEXT: vid.v v16 ; RV64-BITS-512-NEXT: vrsub.vi v24, v16, 11 ; RV64-BITS-512-NEXT: vrgather.vv v16, v8, v24 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store-asm.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store-asm.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store-asm.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store-asm.ll @@ -18,7 +18,7 @@ ; CHECK-NEXT: li a5, 1024 ; CHECK-NEXT: .LBB0_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vsetvli zero, a3, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-NEXT: vlse8.v v8, (a1), a4 ; CHECK-NEXT: add a6, a0, a2 ; CHECK-NEXT: vle8.v v9, (a6) @@ -60,7 +60,7 @@ ; V-NEXT: li a2, 0 ; V-NEXT: lui a3, 983765 ; V-NEXT: addiw a3, a3, 873 -; V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; V-NEXT: vmv.s.x v0, a3 ; V-NEXT: li a3, 32 ; V-NEXT: li a4, 5 @@ -85,7 +85,7 @@ ; ZVE32F-NEXT: li a2, 0 ; ZVE32F-NEXT: lui a3, 983765 ; ZVE32F-NEXT: addiw a3, a3, 873 -; ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; ZVE32F-NEXT: vmv.s.x v0, a3 ; ZVE32F-NEXT: li a3, 32 ; ZVE32F-NEXT: li a4, 5 @@ -139,7 +139,7 @@ ; CHECK-NEXT: li a5, 1024 ; CHECK-NEXT: .LBB2_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vsetvli zero, a3, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-NEXT: vlse8.v v8, (a1), a4 ; CHECK-NEXT: add a6, a0, a2 ; CHECK-NEXT: vle8.v v9, (a6) @@ -183,7 +183,7 @@ ; CHECK-NEXT: li a4, 1024 ; CHECK-NEXT: .LBB3_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vsetvli zero, a3, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-NEXT: vlse8.v v8, (a1), zero ; CHECK-NEXT: add a5, a0, a2 ; CHECK-NEXT: vle8.v v9, (a5) @@ -233,7 +233,7 @@ ; CHECK-NEXT: .LBB4_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: add a6, a1, a2 -; CHECK-NEXT: vsetvli zero, a3, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a6) ; CHECK-NEXT: vlse8.v v9, (a0), a4 ; CHECK-NEXT: vadd.vv v8, v9, v8 @@ -274,7 +274,7 @@ ; V-NEXT: li a3, 32 ; V-NEXT: lui a4, 983765 ; V-NEXT: addiw a4, a4, 873 -; V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; V-NEXT: vmv.s.x v0, a4 ; V-NEXT: li a4, 5 ; V-NEXT: li a5, 1024 @@ -299,7 +299,7 @@ ; ZVE32F-NEXT: li a3, 32 ; ZVE32F-NEXT: lui a4, 983765 ; ZVE32F-NEXT: addiw a4, a4, 873 -; ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; ZVE32F-NEXT: vmv.s.x v0, a4 ; ZVE32F-NEXT: li a4, 5 ; ZVE32F-NEXT: li a5, 1024 @@ -353,13 +353,13 @@ ; CHECK-NEXT: li a4, 32 ; CHECK-NEXT: .LBB6_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vsetivli zero, 8, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m1, ta, ma ; CHECK-NEXT: vlse32.v v8, (a1), a3 -; CHECK-NEXT: vsetvli zero, a4, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) -; CHECK-NEXT: vsetivli zero, 8, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m1, ta, ma ; CHECK-NEXT: vadd.vv v8, v9, v8 -; CHECK-NEXT: vsetvli zero, a4, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a4, e8, m1, ta, ma ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: addi a2, a2, -8 ; CHECK-NEXT: addi a0, a0, 32 @@ -404,9 +404,9 @@ ; CHECK-NEXT: li a4, 16 ; CHECK-NEXT: .LBB7_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vsetvli zero, a3, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a1) -; CHECK-NEXT: vsetivli zero, 8, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m1, ta, ma ; CHECK-NEXT: vlse32.v v9, (a0), a4 ; CHECK-NEXT: vadd.vv v8, v9, v8 ; CHECK-NEXT: vsse32.v v8, (a0), a4 @@ -454,7 +454,7 @@ ; CHECK-NEXT: addi a1, a1, 132 ; CHECK-NEXT: li a2, 1024 ; CHECK-NEXT: li a3, 16 -; CHECK-NEXT: vsetivli zero, 8, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m1, ta, ma ; CHECK-NEXT: .LBB8_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: addi a4, a1, -128 @@ -520,7 +520,7 @@ ; CHECK-NEXT: li a2, 256 ; CHECK-NEXT: li a3, 64 ; CHECK-NEXT: li a4, 16 -; CHECK-NEXT: vsetivli zero, 8, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m1, ta, ma ; CHECK-NEXT: .LBB9_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vlse32.v v8, (a1), a3 @@ -609,7 +609,7 @@ ; V: # %bb.0: ; V-NEXT: li a2, 1024 ; V-NEXT: li a3, 40 -; V-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; V-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; V-NEXT: .LBB10_1: # =>This Inner Loop Header: Depth=1 ; V-NEXT: vlse64.v v8, (a1), a3 ; V-NEXT: addi a4, a1, 80 @@ -686,7 +686,7 @@ ; V: # %bb.0: ; V-NEXT: li a2, 1024 ; V-NEXT: li a3, 40 -; V-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; V-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; V-NEXT: .LBB11_1: # =>This Inner Loop Header: Depth=1 ; V-NEXT: vle64.v v8, (a1) ; V-NEXT: addi a4, a1, 16 @@ -782,7 +782,7 @@ ; CHECK-NEXT: li t0, 5 ; CHECK-NEXT: mv t1, a5 ; CHECK-NEXT: .LBB12_3: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vsetvli zero, a7, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a7, e8, m1, ta, ma ; CHECK-NEXT: vlse8.v v8, (a6), t0 ; CHECK-NEXT: vle8.v v9, (a2) ; CHECK-NEXT: vadd.vv v8, v9, v8 @@ -883,7 +883,7 @@ ; CHECK-NEXT: beqz a2, .LBB13_3 ; CHECK-NEXT: # %bb.1: # %bb2 ; CHECK-NEXT: li a3, 5 -; CHECK-NEXT: vsetivli zero, 16, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, mf2, ta, ma ; CHECK-NEXT: .LBB13_2: # %bb4 ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vlse8.v v8, (a1), a3 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-trunc-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-trunc-vp-mask.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-trunc-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-trunc-vp-mask.ll @@ -19,7 +19,7 @@ define <2 x i1> @vtrunc_nxv2i1_nxv2i16_unmasked(<2 x i16> %a, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i1_nxv2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -44,7 +44,7 @@ define <2 x i1> @vtrunc_nxv2i1_nxv2i32_unmasked(<2 x i32> %a, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i1_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -69,7 +69,7 @@ define <2 x i1> @vtrunc_nxv2i1_nxv2i64_unmasked(<2 x i64> %a, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i1_nxv2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-trunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-trunc-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-trunc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-trunc-vp.ll @@ -41,7 +41,7 @@ define <2 x i8> @vtrunc_nxv2i8_nxv2i16_unmasked(<2 x i16> %a, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i8_nxv2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i16(<2 x i16> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl) @@ -62,7 +62,7 @@ ; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, ma ; CHECK-NEXT: addi a2, a0, -64 ; CHECK-NEXT: vslidedown.vi v0, v0, 8 ; CHECK-NEXT: bltu a0, a2, .LBB4_2 @@ -82,7 +82,7 @@ ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vnsrl.wi v16, v24, 0, v0.t ; CHECK-NEXT: li a0, 128 -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma ; CHECK-NEXT: vslideup.vx v16, v8, a1 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: csrr a0, vlenb @@ -111,9 +111,9 @@ define <2 x i8> @vtrunc_nxv2i8_nxv2i32_unmasked(<2 x i32> %a, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i8_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i32(<2 x i32> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl) @@ -139,11 +139,11 @@ define <2 x i8> @vtrunc_nxv2i8_nxv2i64_unmasked(<2 x i64> %a, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i8_nxv2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i64(<2 x i64> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl) @@ -165,7 +165,7 @@ define <2 x i16> @vtrunc_nxv2i16_nxv2i32_unmasked(<2 x i32> %a, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i16_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %v = call <2 x i16> @llvm.vp.trunc.nxv2i16.nxv2i32(<2 x i32> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl) @@ -189,9 +189,9 @@ define <2 x i16> @vtrunc_nxv2i16_nxv2i64_unmasked(<2 x i64> %a, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i16_nxv2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %v = call <2 x i16> @llvm.vp.trunc.nxv2i16.nxv2i64(<2 x i64> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl) @@ -227,7 +227,7 @@ define <2 x i32> @vtrunc_nxv2i32_nxv2i64_unmasked(<2 x i64> %a, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i32_nxv2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %v = call <2 x i32> @llvm.vp.trunc.nxv2i64.nxv2i32(<2 x i64> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl) @@ -258,7 +258,7 @@ ; CHECK-NEXT: addi a2, a2, 16 ; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill ; CHECK-NEXT: li a2, 0 -; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, ma ; CHECK-NEXT: addi a3, a7, -64 ; CHECK-NEXT: vslidedown.vi v2, v0, 8 ; CHECK-NEXT: mv a4, a2 @@ -266,7 +266,7 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a4, a3 ; CHECK-NEXT: .LBB16_2: -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v3, v2, 4 ; CHECK-NEXT: addi a6, a4, -32 ; CHECK-NEXT: addi a3, a1, 640 @@ -275,9 +275,9 @@ ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a5, a6 ; CHECK-NEXT: .LBB16_4: -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v0, v3, 2 -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v16, (a3) ; CHECK-NEXT: addi t0, a5, -16 ; CHECK-NEXT: addi a6, a1, 512 @@ -313,7 +313,7 @@ ; CHECK-NEXT: # %bb.9: ; CHECK-NEXT: li a7, 64 ; CHECK-NEXT: .LBB16_10: -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v3, v1, 4 ; CHECK-NEXT: addi t0, a7, -32 ; CHECK-NEXT: addi a5, a1, 128 @@ -322,9 +322,9 @@ ; CHECK-NEXT: # %bb.11: ; CHECK-NEXT: mv a6, t0 ; CHECK-NEXT: .LBB16_12: -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v0, v3, 2 -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v16, (a5) ; CHECK-NEXT: addi a5, a6, -16 ; CHECK-NEXT: mv t0, a2 @@ -359,9 +359,9 @@ ; CHECK-NEXT: # %bb.17: ; CHECK-NEXT: li a4, 32 ; CHECK-NEXT: .LBB16_18: -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v0, v2, 2 -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v24, (t0) ; CHECK-NEXT: addi t0, a4, -16 ; CHECK-NEXT: addi a6, a1, 256 @@ -386,14 +386,14 @@ ; CHECK-NEXT: # %bb.23: ; CHECK-NEXT: li a7, 32 ; CHECK-NEXT: .LBB16_24: -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: addi a1, a7, -16 ; CHECK-NEXT: vslidedown.vi v0, v1, 2 ; CHECK-NEXT: bltu a7, a1, .LBB16_26 ; CHECK-NEXT: # %bb.25: ; CHECK-NEXT: mv a2, a1 ; CHECK-NEXT: .LBB16_26: -; CHECK-NEXT: vsetvli zero, a5, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a5, e32, m8, tu, ma ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: li a4, 48 ; CHECK-NEXT: mul a1, a1, a4 @@ -458,7 +458,7 @@ ; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload ; CHECK-NEXT: vnsrl.wi v16, v8, 0, v0.t -; CHECK-NEXT: vsetvli zero, a5, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a5, e32, m8, tu, ma ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 4 ; CHECK-NEXT: add a1, sp, a1 @@ -508,7 +508,7 @@ ; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: addi a2, a0, -16 ; CHECK-NEXT: vslidedown.vi v0, v0, 2 ; CHECK-NEXT: bltu a0, a2, .LBB17_2 @@ -528,7 +528,7 @@ ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vnsrl.wi v16, v24, 0, v0.t ; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vslideup.vi v16, v8, 16 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: csrr a0, vlenb diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll @@ -7,7 +7,7 @@ define void @abs_v16i8(<16 x i8>* %x) { ; CHECK-LABEL: abs_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vrsub.vi v9, v8, 0 ; CHECK-NEXT: vmax.vv v8, v8, v9 @@ -23,7 +23,7 @@ define void @abs_v8i16(<8 x i16>* %x) { ; CHECK-LABEL: abs_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vrsub.vi v9, v8, 0 ; CHECK-NEXT: vmax.vv v8, v8, v9 @@ -39,7 +39,7 @@ define void @abs_v4i32(<4 x i32>* %x) { ; CHECK-LABEL: abs_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vrsub.vi v9, v8, 0 ; CHECK-NEXT: vmax.vv v8, v8, v9 @@ -55,7 +55,7 @@ define void @abs_v2i64(<2 x i64>* %x) { ; CHECK-LABEL: abs_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vrsub.vi v9, v8, 0 ; CHECK-NEXT: vmax.vv v8, v8, v9 @@ -72,7 +72,7 @@ ; LMULMAX2-LABEL: abs_v32i8: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: li a1, 32 -; LMULMAX2-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; LMULMAX2-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; LMULMAX2-NEXT: vle8.v v8, (a0) ; LMULMAX2-NEXT: vrsub.vi v10, v8, 0 ; LMULMAX2-NEXT: vmax.vv v8, v8, v10 @@ -81,7 +81,7 @@ ; ; LMULMAX1-RV32-LABEL: abs_v32i8: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-RV32-NEXT: addi a1, a0, 16 ; LMULMAX1-RV32-NEXT: vle8.v v8, (a1) ; LMULMAX1-RV32-NEXT: vle8.v v9, (a0) @@ -95,7 +95,7 @@ ; ; LMULMAX1-RV64-LABEL: abs_v32i8: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-RV64-NEXT: addi a1, a0, 16 ; LMULMAX1-RV64-NEXT: vle8.v v8, (a1) ; LMULMAX1-RV64-NEXT: vle8.v v9, (a0) @@ -116,7 +116,7 @@ define void @abs_v16i16(<16 x i16>* %x) { ; LMULMAX2-LABEL: abs_v16i16: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX2-NEXT: vle16.v v8, (a0) ; LMULMAX2-NEXT: vrsub.vi v10, v8, 0 ; LMULMAX2-NEXT: vmax.vv v8, v8, v10 @@ -125,7 +125,7 @@ ; ; LMULMAX1-RV32-LABEL: abs_v16i16: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV32-NEXT: addi a1, a0, 16 ; LMULMAX1-RV32-NEXT: vle16.v v8, (a1) ; LMULMAX1-RV32-NEXT: vle16.v v9, (a0) @@ -139,7 +139,7 @@ ; ; LMULMAX1-RV64-LABEL: abs_v16i16: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV64-NEXT: addi a1, a0, 16 ; LMULMAX1-RV64-NEXT: vle16.v v8, (a1) ; LMULMAX1-RV64-NEXT: vle16.v v9, (a0) @@ -160,7 +160,7 @@ define void @abs_v8i32(<8 x i32>* %x) { ; LMULMAX2-LABEL: abs_v8i32: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vle32.v v8, (a0) ; LMULMAX2-NEXT: vrsub.vi v10, v8, 0 ; LMULMAX2-NEXT: vmax.vv v8, v8, v10 @@ -169,7 +169,7 @@ ; ; LMULMAX1-RV32-LABEL: abs_v8i32: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: addi a1, a0, 16 ; LMULMAX1-RV32-NEXT: vle32.v v8, (a1) ; LMULMAX1-RV32-NEXT: vle32.v v9, (a0) @@ -183,7 +183,7 @@ ; ; LMULMAX1-RV64-LABEL: abs_v8i32: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV64-NEXT: addi a1, a0, 16 ; LMULMAX1-RV64-NEXT: vle32.v v8, (a1) ; LMULMAX1-RV64-NEXT: vle32.v v9, (a0) @@ -204,7 +204,7 @@ define void @abs_v4i64(<4 x i64>* %x) { ; LMULMAX2-LABEL: abs_v4i64: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-NEXT: vle64.v v8, (a0) ; LMULMAX2-NEXT: vrsub.vi v10, v8, 0 ; LMULMAX2-NEXT: vmax.vv v8, v8, v10 @@ -213,7 +213,7 @@ ; ; LMULMAX1-RV32-LABEL: abs_v4i64: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: addi a1, a0, 16 ; LMULMAX1-RV32-NEXT: vle64.v v8, (a1) ; LMULMAX1-RV32-NEXT: vle64.v v9, (a0) @@ -227,7 +227,7 @@ ; ; LMULMAX1-RV64-LABEL: abs_v4i64: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV64-NEXT: addi a1, a0, 16 ; LMULMAX1-RV64-NEXT: vle64.v v8, (a1) ; LMULMAX1-RV64-NEXT: vle64.v v9, (a0) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast-large-vector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast-large-vector.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast-large-vector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast-large-vector.ll @@ -8,7 +8,7 @@ ; VLEN256: # %bb.0: ; VLEN256-NEXT: addi a1, a0, 256 ; VLEN256-NEXT: li a2, 256 -; VLEN256-NEXT: vsetvli zero, a2, e8, m8, ta, mu +; VLEN256-NEXT: vsetvli zero, a2, e8, m8, ta, ma ; VLEN256-NEXT: vle8.v v24, (a0) ; VLEN256-NEXT: vle8.v v0, (a1) ; VLEN256-NEXT: vadd.vv v8, v24, v8 @@ -18,14 +18,14 @@ ; VLEN512-LABEL: bitcast_1024B: ; VLEN512: # %bb.0: ; VLEN512-NEXT: li a0, 512 -; VLEN512-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; VLEN512-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; VLEN512-NEXT: vadd.vv v8, v16, v8 ; VLEN512-NEXT: ret ; ; VLEN1024-LABEL: bitcast_1024B: ; VLEN1024: # %bb.0: ; VLEN1024-NEXT: li a0, 512 -; VLEN1024-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; VLEN1024-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; VLEN1024-NEXT: vadd.vv v8, v12, v8 ; VLEN1024-NEXT: ret %c = bitcast <256 x i16> %a to <512 x i8> diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll @@ -16,14 +16,14 @@ ; CHECK-LABEL: bitcast_v4i8_v32i1: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret ; ; ELEN32-LABEL: bitcast_v4i8_v32i1: ; ELEN32: # %bb.0: ; ELEN32-NEXT: li a0, 32 -; ELEN32-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; ELEN32-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; ELEN32-NEXT: vmxor.mm v0, v0, v8 ; ELEN32-NEXT: ret %c = bitcast <4 x i8> %a to <32 x i1> @@ -34,13 +34,13 @@ define i8 @bitcast_v1i8_i8(<1 x i8> %a) { ; CHECK-LABEL: bitcast_v1i8_i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, mf8, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret ; ; ELEN32-LABEL: bitcast_v1i8_i8: ; ELEN32: # %bb.0: -; ELEN32-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; ELEN32-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; ELEN32-NEXT: vmv.x.s a0, v8 ; ELEN32-NEXT: ret %b = bitcast <1 x i8> %a to i8 @@ -50,13 +50,13 @@ define i16 @bitcast_v2i8_i16(<2 x i8> %a) { ; CHECK-LABEL: bitcast_v2i8_i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret ; ; ELEN32-LABEL: bitcast_v2i8_i16: ; ELEN32: # %bb.0: -; ELEN32-NEXT: vsetivli zero, 0, e16, mf2, ta, mu +; ELEN32-NEXT: vsetivli zero, 0, e16, mf2, ta, ma ; ELEN32-NEXT: vmv.x.s a0, v8 ; ELEN32-NEXT: ret %b = bitcast <2 x i8> %a to i16 @@ -66,13 +66,13 @@ define i16 @bitcast_v1i16_i16(<1 x i16> %a) { ; CHECK-LABEL: bitcast_v1i16_i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret ; ; ELEN32-LABEL: bitcast_v1i16_i16: ; ELEN32: # %bb.0: -; ELEN32-NEXT: vsetivli zero, 0, e16, mf2, ta, mu +; ELEN32-NEXT: vsetivli zero, 0, e16, mf2, ta, ma ; ELEN32-NEXT: vmv.x.s a0, v8 ; ELEN32-NEXT: ret %b = bitcast <1 x i16> %a to i16 @@ -82,13 +82,13 @@ define i32 @bitcast_v4i8_i32(<4 x i8> %a) { ; CHECK-LABEL: bitcast_v4i8_i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret ; ; ELEN32-LABEL: bitcast_v4i8_i32: ; ELEN32: # %bb.0: -; ELEN32-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; ELEN32-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; ELEN32-NEXT: vmv.x.s a0, v8 ; ELEN32-NEXT: ret %b = bitcast <4 x i8> %a to i32 @@ -98,13 +98,13 @@ define i32 @bitcast_v2i16_i32(<2 x i16> %a) { ; CHECK-LABEL: bitcast_v2i16_i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret ; ; ELEN32-LABEL: bitcast_v2i16_i32: ; ELEN32: # %bb.0: -; ELEN32-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; ELEN32-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; ELEN32-NEXT: vmv.x.s a0, v8 ; ELEN32-NEXT: ret %b = bitcast <2 x i16> %a to i32 @@ -114,13 +114,13 @@ define i32 @bitcast_v1i32_i32(<1 x i32> %a) { ; CHECK-LABEL: bitcast_v1i32_i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret ; ; ELEN32-LABEL: bitcast_v1i32_i32: ; ELEN32: # %bb.0: -; ELEN32-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; ELEN32-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; ELEN32-NEXT: vmv.x.s a0, v8 ; ELEN32-NEXT: ret %b = bitcast <1 x i32> %a to i32 @@ -131,7 +131,7 @@ ; RV32-LABEL: bitcast_v8i8_i64: ; RV32: # %bb.0: ; RV32-NEXT: li a0, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vmv.x.s a1, v9 ; RV32-NEXT: vmv.x.s a0, v8 @@ -139,13 +139,13 @@ ; ; RV64-LABEL: bitcast_v8i8_i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret ; ; RV32ELEN32-LABEL: bitcast_v8i8_i64: ; RV32ELEN32: # %bb.0: -; RV32ELEN32-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32ELEN32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32ELEN32-NEXT: vslidedown.vi v9, v8, 1 ; RV32ELEN32-NEXT: vmv.x.s a1, v9 ; RV32ELEN32-NEXT: vmv.x.s a0, v8 @@ -156,7 +156,7 @@ ; RV64ELEN32-NEXT: addi sp, sp, -16 ; RV64ELEN32-NEXT: .cfi_def_cfa_offset 16 ; RV64ELEN32-NEXT: addi a0, sp, 8 -; RV64ELEN32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV64ELEN32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV64ELEN32-NEXT: vse8.v v8, (a0) ; RV64ELEN32-NEXT: ld a0, 8(sp) ; RV64ELEN32-NEXT: addi sp, sp, 16 @@ -169,7 +169,7 @@ ; RV32-LABEL: bitcast_v4i16_i64: ; RV32: # %bb.0: ; RV32-NEXT: li a0, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vmv.x.s a1, v9 ; RV32-NEXT: vmv.x.s a0, v8 @@ -177,13 +177,13 @@ ; ; RV64-LABEL: bitcast_v4i16_i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret ; ; RV32ELEN32-LABEL: bitcast_v4i16_i64: ; RV32ELEN32: # %bb.0: -; RV32ELEN32-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32ELEN32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32ELEN32-NEXT: vslidedown.vi v9, v8, 1 ; RV32ELEN32-NEXT: vmv.x.s a1, v9 ; RV32ELEN32-NEXT: vmv.x.s a0, v8 @@ -194,7 +194,7 @@ ; RV64ELEN32-NEXT: addi sp, sp, -16 ; RV64ELEN32-NEXT: .cfi_def_cfa_offset 16 ; RV64ELEN32-NEXT: addi a0, sp, 8 -; RV64ELEN32-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; RV64ELEN32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; RV64ELEN32-NEXT: vse16.v v8, (a0) ; RV64ELEN32-NEXT: ld a0, 8(sp) ; RV64ELEN32-NEXT: addi sp, sp, 16 @@ -207,7 +207,7 @@ ; RV32-LABEL: bitcast_v2i32_i64: ; RV32: # %bb.0: ; RV32-NEXT: li a0, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vmv.x.s a1, v9 ; RV32-NEXT: vmv.x.s a0, v8 @@ -215,13 +215,13 @@ ; ; RV64-LABEL: bitcast_v2i32_i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret ; ; RV32ELEN32-LABEL: bitcast_v2i32_i64: ; RV32ELEN32: # %bb.0: -; RV32ELEN32-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32ELEN32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32ELEN32-NEXT: vslidedown.vi v9, v8, 1 ; RV32ELEN32-NEXT: vmv.x.s a1, v9 ; RV32ELEN32-NEXT: vmv.x.s a0, v8 @@ -232,7 +232,7 @@ ; RV64ELEN32-NEXT: addi sp, sp, -16 ; RV64ELEN32-NEXT: .cfi_def_cfa_offset 16 ; RV64ELEN32-NEXT: addi a0, sp, 8 -; RV64ELEN32-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV64ELEN32-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV64ELEN32-NEXT: vse32.v v8, (a0) ; RV64ELEN32-NEXT: ld a0, 8(sp) ; RV64ELEN32-NEXT: addi sp, sp, 16 @@ -245,7 +245,7 @@ ; RV32-LABEL: bitcast_v1i64_i64: ; RV32: # %bb.0: ; RV32-NEXT: li a0, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vmv.x.s a1, v9 ; RV32-NEXT: vmv.x.s a0, v8 @@ -253,7 +253,7 @@ ; ; RV64-LABEL: bitcast_v1i64_i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret ; @@ -267,13 +267,13 @@ define half @bitcast_v2i8_f16(<2 x i8> %a) { ; CHECK-LABEL: bitcast_v2i8_f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret ; ; ELEN32-LABEL: bitcast_v2i8_f16: ; ELEN32: # %bb.0: -; ELEN32-NEXT: vsetivli zero, 0, e16, mf2, ta, mu +; ELEN32-NEXT: vsetivli zero, 0, e16, mf2, ta, ma ; ELEN32-NEXT: vfmv.f.s fa0, v8 ; ELEN32-NEXT: ret %b = bitcast <2 x i8> %a to half @@ -283,13 +283,13 @@ define half @bitcast_v1i16_f16(<1 x i16> %a) { ; CHECK-LABEL: bitcast_v1i16_f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret ; ; ELEN32-LABEL: bitcast_v1i16_f16: ; ELEN32: # %bb.0: -; ELEN32-NEXT: vsetivli zero, 0, e16, mf2, ta, mu +; ELEN32-NEXT: vsetivli zero, 0, e16, mf2, ta, ma ; ELEN32-NEXT: vfmv.f.s fa0, v8 ; ELEN32-NEXT: ret %b = bitcast <1 x i16> %a to half @@ -299,13 +299,13 @@ define float @bitcast_v4i8_f32(<4 x i8> %a) { ; CHECK-LABEL: bitcast_v4i8_f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret ; ; ELEN32-LABEL: bitcast_v4i8_f32: ; ELEN32: # %bb.0: -; ELEN32-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; ELEN32-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; ELEN32-NEXT: vfmv.f.s fa0, v8 ; ELEN32-NEXT: ret %b = bitcast <4 x i8> %a to float @@ -315,13 +315,13 @@ define float @bitcast_v2i16_f32(<2 x i16> %a) { ; CHECK-LABEL: bitcast_v2i16_f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret ; ; ELEN32-LABEL: bitcast_v2i16_f32: ; ELEN32: # %bb.0: -; ELEN32-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; ELEN32-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; ELEN32-NEXT: vfmv.f.s fa0, v8 ; ELEN32-NEXT: ret %b = bitcast <2 x i16> %a to float @@ -331,13 +331,13 @@ define float @bitcast_v1i32_f32(<1 x i32> %a) { ; CHECK-LABEL: bitcast_v1i32_f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret ; ; ELEN32-LABEL: bitcast_v1i32_f32: ; ELEN32: # %bb.0: -; ELEN32-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; ELEN32-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; ELEN32-NEXT: vfmv.f.s fa0, v8 ; ELEN32-NEXT: ret %b = bitcast <1 x i32> %a to float @@ -347,7 +347,7 @@ define double @bitcast_v8i8_f64(<8 x i8> %a) { ; CHECK-LABEL: bitcast_v8i8_f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret ; @@ -356,7 +356,7 @@ ; ELEN32-NEXT: addi sp, sp, -16 ; ELEN32-NEXT: .cfi_def_cfa_offset 16 ; ELEN32-NEXT: addi a0, sp, 8 -; ELEN32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; ELEN32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; ELEN32-NEXT: vse8.v v8, (a0) ; ELEN32-NEXT: fld fa0, 8(sp) ; ELEN32-NEXT: addi sp, sp, 16 @@ -368,7 +368,7 @@ define double @bitcast_v4i16_f64(<4 x i16> %a) { ; CHECK-LABEL: bitcast_v4i16_f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret ; @@ -377,7 +377,7 @@ ; ELEN32-NEXT: addi sp, sp, -16 ; ELEN32-NEXT: .cfi_def_cfa_offset 16 ; ELEN32-NEXT: addi a0, sp, 8 -; ELEN32-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; ELEN32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; ELEN32-NEXT: vse16.v v8, (a0) ; ELEN32-NEXT: fld fa0, 8(sp) ; ELEN32-NEXT: addi sp, sp, 16 @@ -389,7 +389,7 @@ define double @bitcast_v2i32_f64(<2 x i32> %a) { ; CHECK-LABEL: bitcast_v2i32_f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret ; @@ -398,7 +398,7 @@ ; ELEN32-NEXT: addi sp, sp, -16 ; ELEN32-NEXT: .cfi_def_cfa_offset 16 ; ELEN32-NEXT: addi a0, sp, 8 -; ELEN32-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; ELEN32-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; ELEN32-NEXT: vse32.v v8, (a0) ; ELEN32-NEXT: fld fa0, 8(sp) ; ELEN32-NEXT: addi sp, sp, 16 @@ -410,7 +410,7 @@ define double @bitcast_v1i64_f64(<1 x i64> %a) { ; CHECK-LABEL: bitcast_v1i64_f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret ; @@ -435,13 +435,13 @@ define <1 x i16> @bitcast_i16_v1i16(i16 %a) { ; CHECK-LABEL: bitcast_i16_v1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: ret ; ; ELEN32-LABEL: bitcast_i16_v1i16: ; ELEN32: # %bb.0: -; ELEN32-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; ELEN32-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; ELEN32-NEXT: vmv.v.x v8, a0 ; ELEN32-NEXT: ret %b = bitcast i16 %a to <1 x i16> @@ -451,25 +451,25 @@ define <2 x i16> @bitcast_i32_v2i16(i32 %a) { ; RV32-LABEL: bitcast_i32_v2i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV32-NEXT: vmv.s.x v8, a0 ; RV32-NEXT: ret ; ; RV64-LABEL: bitcast_i32_v2i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV64-NEXT: vmv.v.x v8, a0 ; RV64-NEXT: ret ; ; RV32ELEN32-LABEL: bitcast_i32_v2i16: ; RV32ELEN32: # %bb.0: -; RV32ELEN32-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32ELEN32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32ELEN32-NEXT: vmv.s.x v8, a0 ; RV32ELEN32-NEXT: ret ; ; RV64ELEN32-LABEL: bitcast_i32_v2i16: ; RV64ELEN32: # %bb.0: -; RV64ELEN32-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ELEN32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ELEN32-NEXT: vmv.v.x v8, a0 ; RV64ELEN32-NEXT: ret %b = bitcast i32 %a to <2 x i16> @@ -479,25 +479,25 @@ define <1 x i32> @bitcast_i32_v1i32(i32 %a) { ; RV32-LABEL: bitcast_i32_v1i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV32-NEXT: vmv.s.x v8, a0 ; RV32-NEXT: ret ; ; RV64-LABEL: bitcast_i32_v1i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV64-NEXT: vmv.v.x v8, a0 ; RV64-NEXT: ret ; ; RV32ELEN32-LABEL: bitcast_i32_v1i32: ; RV32ELEN32: # %bb.0: -; RV32ELEN32-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32ELEN32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32ELEN32-NEXT: vmv.s.x v8, a0 ; RV32ELEN32-NEXT: ret ; ; RV64ELEN32-LABEL: bitcast_i32_v1i32: ; RV64ELEN32: # %bb.0: -; RV64ELEN32-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ELEN32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ELEN32-NEXT: vmv.v.x v8, a0 ; RV64ELEN32-NEXT: ret %b = bitcast i32 %a to <1 x i32> @@ -507,25 +507,25 @@ define <4 x i16> @bitcast_i64_v4i16(i64 %a) { ; RV32-LABEL: bitcast_i64_v4i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV32-NEXT: vmv.v.i v8, 0 ; RV32-NEXT: vslide1up.vx v9, v8, a1 ; RV32-NEXT: vslide1up.vx v10, v9, a0 -; RV32-NEXT: vsetivli zero, 1, e64, m1, tu, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, tu, ma ; RV32-NEXT: vslideup.vi v8, v10, 0 ; RV32-NEXT: ret ; ; RV64-LABEL: bitcast_i64_v4i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v8, a0 ; RV64-NEXT: ret ; ; RV32ELEN32-LABEL: bitcast_i64_v4i16: ; RV32ELEN32: # %bb.0: -; RV32ELEN32-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV32ELEN32-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV32ELEN32-NEXT: vmv.v.x v8, a1 -; RV32ELEN32-NEXT: vsetvli zero, zero, e32, m1, tu, mu +; RV32ELEN32-NEXT: vsetvli zero, zero, e32, m1, tu, ma ; RV32ELEN32-NEXT: vmv.s.x v8, a0 ; RV32ELEN32-NEXT: ret ; @@ -535,7 +535,7 @@ ; RV64ELEN32-NEXT: .cfi_def_cfa_offset 16 ; RV64ELEN32-NEXT: sd a0, 8(sp) ; RV64ELEN32-NEXT: addi a0, sp, 8 -; RV64ELEN32-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; RV64ELEN32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; RV64ELEN32-NEXT: vle16.v v8, (a0) ; RV64ELEN32-NEXT: addi sp, sp, 16 ; RV64ELEN32-NEXT: ret @@ -546,25 +546,25 @@ define <2 x i32> @bitcast_i64_v2i32(i64 %a) { ; RV32-LABEL: bitcast_i64_v2i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV32-NEXT: vmv.v.i v8, 0 ; RV32-NEXT: vslide1up.vx v9, v8, a1 ; RV32-NEXT: vslide1up.vx v10, v9, a0 -; RV32-NEXT: vsetivli zero, 1, e64, m1, tu, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, tu, ma ; RV32-NEXT: vslideup.vi v8, v10, 0 ; RV32-NEXT: ret ; ; RV64-LABEL: bitcast_i64_v2i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v8, a0 ; RV64-NEXT: ret ; ; RV32ELEN32-LABEL: bitcast_i64_v2i32: ; RV32ELEN32: # %bb.0: -; RV32ELEN32-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV32ELEN32-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV32ELEN32-NEXT: vmv.v.x v8, a1 -; RV32ELEN32-NEXT: vsetvli zero, zero, e32, m1, tu, mu +; RV32ELEN32-NEXT: vsetvli zero, zero, e32, m1, tu, ma ; RV32ELEN32-NEXT: vmv.s.x v8, a0 ; RV32ELEN32-NEXT: ret ; @@ -574,7 +574,7 @@ ; RV64ELEN32-NEXT: .cfi_def_cfa_offset 16 ; RV64ELEN32-NEXT: sd a0, 8(sp) ; RV64ELEN32-NEXT: addi a0, sp, 8 -; RV64ELEN32-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV64ELEN32-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV64ELEN32-NEXT: vle32.v v8, (a0) ; RV64ELEN32-NEXT: addi sp, sp, 16 ; RV64ELEN32-NEXT: ret @@ -585,17 +585,17 @@ define <1 x i64> @bitcast_i64_v1i64(i64 %a) { ; RV32-LABEL: bitcast_i64_v1i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV32-NEXT: vmv.v.i v8, 0 ; RV32-NEXT: vslide1up.vx v9, v8, a1 ; RV32-NEXT: vslide1up.vx v10, v9, a0 -; RV32-NEXT: vsetivli zero, 1, e64, m1, tu, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, tu, ma ; RV32-NEXT: vslideup.vi v8, v10, 0 ; RV32-NEXT: ret ; ; RV64-LABEL: bitcast_i64_v1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v8, a0 ; RV64-NEXT: ret ; diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse.ll @@ -7,7 +7,7 @@ define void @bitreverse_v8i16(<8 x i16>* %x, <8 x i16>* %y) { ; RV32-LABEL: bitreverse_v8i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV32-NEXT: vle16.v v8, (a0) ; RV32-NEXT: vsrl.vi v9, v8, 8 ; RV32-NEXT: vsll.vi v8, v8, 8 @@ -38,7 +38,7 @@ ; ; RV64-LABEL: bitreverse_v8i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64-NEXT: vle16.v v8, (a0) ; RV64-NEXT: vsrl.vi v9, v8, 8 ; RV64-NEXT: vsll.vi v8, v8, 8 @@ -77,7 +77,7 @@ define void @bitreverse_v4i32(<4 x i32>* %x, <4 x i32>* %y) { ; RV32-LABEL: bitreverse_v4i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: vsrl.vi v9, v8, 8 ; RV32-NEXT: lui a1, 16 @@ -117,7 +117,7 @@ ; ; RV64-LABEL: bitreverse_v4i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: vsrl.vi v9, v8, 8 ; RV64-NEXT: lui a1, 16 @@ -165,7 +165,7 @@ define void @bitreverse_v2i64(<2 x i64>* %x, <2 x i64>* %y) { ; RV32-LABEL: bitreverse_v2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: li a1, 56 ; RV32-NEXT: vsrl.vx v9, v8, a1 @@ -180,34 +180,34 @@ ; RV32-NEXT: vand.vx v10, v10, a4 ; RV32-NEXT: li a5, 5 ; RV32-NEXT: vmv.s.x v0, a5 -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vmv.v.i v11, 0 ; RV32-NEXT: lui a5, 1044480 ; RV32-NEXT: vmerge.vxm v11, v11, a5, v0 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vsrl.vi v12, v8, 8 ; RV32-NEXT: vand.vv v11, v12, v11 ; RV32-NEXT: vor.vv v10, v11, v10 ; RV32-NEXT: vor.vv v9, v10, v9 ; RV32-NEXT: li a5, 255 -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vmv.v.x v10, a5 ; RV32-NEXT: vmerge.vim v10, v10, 0, v0 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vsll.vi v11, v8, 8 ; RV32-NEXT: vand.vv v10, v11, v10 -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vmv.v.x v11, a3 ; RV32-NEXT: vmerge.vim v11, v11, 0, v0 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vsll.vi v12, v8, 24 ; RV32-NEXT: vand.vv v11, v12, v11 ; RV32-NEXT: vor.vv v10, v11, v10 ; RV32-NEXT: vsll.vx v11, v8, a2 -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vmv.v.x v12, a4 ; RV32-NEXT: vmerge.vim v12, v12, 0, v0 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vand.vv v11, v11, v12 ; RV32-NEXT: vsll.vx v8, v8, a1 ; RV32-NEXT: vor.vv v8, v8, v11 @@ -216,9 +216,9 @@ ; RV32-NEXT: vsrl.vi v9, v8, 4 ; RV32-NEXT: lui a1, 61681 ; RV32-NEXT: addi a1, a1, -241 -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vmv.v.x v10, a1 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vand.vv v9, v9, v10 ; RV32-NEXT: vand.vv v8, v8, v10 ; RV32-NEXT: vsll.vi v8, v8, 4 @@ -226,9 +226,9 @@ ; RV32-NEXT: vsrl.vi v9, v8, 2 ; RV32-NEXT: lui a1, 209715 ; RV32-NEXT: addi a1, a1, 819 -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vmv.v.x v10, a1 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vand.vv v9, v9, v10 ; RV32-NEXT: vand.vv v8, v8, v10 ; RV32-NEXT: vsll.vi v8, v8, 2 @@ -236,9 +236,9 @@ ; RV32-NEXT: vsrl.vi v9, v8, 1 ; RV32-NEXT: lui a1, 349525 ; RV32-NEXT: addi a1, a1, 1365 -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vmv.v.x v10, a1 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vand.vv v9, v9, v10 ; RV32-NEXT: vand.vv v8, v8, v10 ; RV32-NEXT: vadd.vv v8, v8, v8 @@ -248,7 +248,7 @@ ; ; RV64-LABEL: bitreverse_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: li a1, 56 ; RV64-NEXT: vsrl.vx v9, v8, a1 @@ -315,7 +315,7 @@ define void @bitreverse_v16i16(<16 x i16>* %x, <16 x i16>* %y) { ; LMULMAX2-RV32-LABEL: bitreverse_v16i16: ; LMULMAX2-RV32: # %bb.0: -; LMULMAX2-RV32-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX2-RV32-NEXT: vle16.v v8, (a0) ; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 8 ; LMULMAX2-RV32-NEXT: vsll.vi v8, v8, 8 @@ -346,7 +346,7 @@ ; ; LMULMAX2-RV64-LABEL: bitreverse_v16i16: ; LMULMAX2-RV64: # %bb.0: -; LMULMAX2-RV64-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX2-RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX2-RV64-NEXT: vle16.v v8, (a0) ; LMULMAX2-RV64-NEXT: vsrl.vi v10, v8, 8 ; LMULMAX2-RV64-NEXT: vsll.vi v8, v8, 8 @@ -377,7 +377,7 @@ ; ; LMULMAX1-RV32-LABEL: bitreverse_v16i16: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV32-NEXT: addi a1, a0, 16 ; LMULMAX1-RV32-NEXT: vle16.v v8, (a1) ; LMULMAX1-RV32-NEXT: vle16.v v9, (a0) @@ -429,7 +429,7 @@ ; ; LMULMAX1-RV64-LABEL: bitreverse_v16i16: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV64-NEXT: addi a1, a0, 16 ; LMULMAX1-RV64-NEXT: vle16.v v8, (a1) ; LMULMAX1-RV64-NEXT: vle16.v v9, (a0) @@ -489,7 +489,7 @@ define void @bitreverse_v8i32(<8 x i32>* %x, <8 x i32>* %y) { ; LMULMAX2-RV32-LABEL: bitreverse_v8i32: ; LMULMAX2-RV32: # %bb.0: -; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-RV32-NEXT: vle32.v v8, (a0) ; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 8 ; LMULMAX2-RV32-NEXT: lui a1, 16 @@ -529,7 +529,7 @@ ; ; LMULMAX2-RV64-LABEL: bitreverse_v8i32: ; LMULMAX2-RV64: # %bb.0: -; LMULMAX2-RV64-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-RV64-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-RV64-NEXT: vle32.v v8, (a0) ; LMULMAX2-RV64-NEXT: vsrl.vi v10, v8, 8 ; LMULMAX2-RV64-NEXT: lui a1, 16 @@ -569,7 +569,7 @@ ; ; LMULMAX1-RV32-LABEL: bitreverse_v8i32: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: addi a1, a0, 16 ; LMULMAX1-RV32-NEXT: vle32.v v8, (a1) ; LMULMAX1-RV32-NEXT: vle32.v v9, (a0) @@ -636,7 +636,7 @@ ; ; LMULMAX1-RV64-LABEL: bitreverse_v8i32: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV64-NEXT: addi a1, a0, 16 ; LMULMAX1-RV64-NEXT: vle32.v v8, (a1) ; LMULMAX1-RV64-NEXT: vle32.v v9, (a0) @@ -711,7 +711,7 @@ define void @bitreverse_v4i64(<4 x i64>* %x, <4 x i64>* %y) { ; LMULMAX2-RV32-LABEL: bitreverse_v4i64: ; LMULMAX2-RV32: # %bb.0: -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV32-NEXT: vle64.v v8, (a0) ; LMULMAX2-RV32-NEXT: li a1, 56 ; LMULMAX2-RV32-NEXT: vsrl.vx v10, v8, a1 @@ -726,34 +726,34 @@ ; LMULMAX2-RV32-NEXT: vand.vx v12, v12, a4 ; LMULMAX2-RV32-NEXT: li a5, 85 ; LMULMAX2-RV32-NEXT: vmv.s.x v0, a5 -; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-RV32-NEXT: vmv.v.i v14, 0 ; LMULMAX2-RV32-NEXT: lui a5, 1044480 ; LMULMAX2-RV32-NEXT: vmerge.vxm v14, v14, a5, v0 -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV32-NEXT: vsrl.vi v16, v8, 8 ; LMULMAX2-RV32-NEXT: vand.vv v14, v16, v14 ; LMULMAX2-RV32-NEXT: vor.vv v12, v14, v12 ; LMULMAX2-RV32-NEXT: vor.vv v10, v12, v10 ; LMULMAX2-RV32-NEXT: li a5, 255 -; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-RV32-NEXT: vmv.v.x v12, a5 ; LMULMAX2-RV32-NEXT: vmerge.vim v12, v12, 0, v0 -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV32-NEXT: vsll.vi v14, v8, 8 ; LMULMAX2-RV32-NEXT: vand.vv v12, v14, v12 -; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-RV32-NEXT: vmv.v.x v14, a3 ; LMULMAX2-RV32-NEXT: vmerge.vim v14, v14, 0, v0 -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV32-NEXT: vsll.vi v16, v8, 24 ; LMULMAX2-RV32-NEXT: vand.vv v14, v16, v14 ; LMULMAX2-RV32-NEXT: vor.vv v12, v14, v12 ; LMULMAX2-RV32-NEXT: vsll.vx v14, v8, a2 -; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-RV32-NEXT: vmv.v.x v16, a4 ; LMULMAX2-RV32-NEXT: vmerge.vim v16, v16, 0, v0 -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV32-NEXT: vand.vv v14, v14, v16 ; LMULMAX2-RV32-NEXT: vsll.vx v8, v8, a1 ; LMULMAX2-RV32-NEXT: vor.vv v8, v8, v14 @@ -762,9 +762,9 @@ ; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 4 ; LMULMAX2-RV32-NEXT: lui a1, 61681 ; LMULMAX2-RV32-NEXT: addi a1, a1, -241 -; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-RV32-NEXT: vmv.v.x v12, a1 -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV32-NEXT: vand.vv v10, v10, v12 ; LMULMAX2-RV32-NEXT: vand.vv v8, v8, v12 ; LMULMAX2-RV32-NEXT: vsll.vi v8, v8, 4 @@ -772,9 +772,9 @@ ; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 2 ; LMULMAX2-RV32-NEXT: lui a1, 209715 ; LMULMAX2-RV32-NEXT: addi a1, a1, 819 -; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-RV32-NEXT: vmv.v.x v12, a1 -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV32-NEXT: vand.vv v10, v10, v12 ; LMULMAX2-RV32-NEXT: vand.vv v8, v8, v12 ; LMULMAX2-RV32-NEXT: vsll.vi v8, v8, 2 @@ -782,9 +782,9 @@ ; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX2-RV32-NEXT: lui a1, 349525 ; LMULMAX2-RV32-NEXT: addi a1, a1, 1365 -; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-RV32-NEXT: vmv.v.x v12, a1 -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV32-NEXT: vand.vv v10, v10, v12 ; LMULMAX2-RV32-NEXT: vand.vv v8, v8, v12 ; LMULMAX2-RV32-NEXT: vadd.vv v8, v8, v8 @@ -794,7 +794,7 @@ ; ; LMULMAX2-RV64-LABEL: bitreverse_v4i64: ; LMULMAX2-RV64: # %bb.0: -; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX2-RV64-NEXT: li a1, 56 ; LMULMAX2-RV64-NEXT: vsrl.vx v10, v8, a1 @@ -853,7 +853,7 @@ ; ; LMULMAX1-RV32-LABEL: bitreverse_v4i64: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: addi a1, a0, 16 ; LMULMAX1-RV32-NEXT: vle64.v v12, (a1) ; LMULMAX1-RV32-NEXT: vle64.v v8, (a0) @@ -870,34 +870,34 @@ ; LMULMAX1-RV32-NEXT: vand.vx v11, v9, a5 ; LMULMAX1-RV32-NEXT: li a6, 5 ; LMULMAX1-RV32-NEXT: vmv.s.x v0, a6 -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.i v9, 0 ; LMULMAX1-RV32-NEXT: lui a6, 1044480 ; LMULMAX1-RV32-NEXT: vmerge.vxm v9, v9, a6, v0 -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vsrl.vi v13, v12, 8 ; LMULMAX1-RV32-NEXT: vand.vv v13, v13, v9 ; LMULMAX1-RV32-NEXT: vor.vv v11, v13, v11 ; LMULMAX1-RV32-NEXT: vor.vv v13, v11, v10 ; LMULMAX1-RV32-NEXT: li a6, 255 -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.x v10, a6 ; LMULMAX1-RV32-NEXT: vmerge.vim v10, v10, 0, v0 -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vsll.vi v11, v12, 8 ; LMULMAX1-RV32-NEXT: vand.vv v14, v11, v10 -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.x v11, a4 ; LMULMAX1-RV32-NEXT: vmerge.vim v11, v11, 0, v0 -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vsll.vi v15, v12, 24 ; LMULMAX1-RV32-NEXT: vand.vv v15, v15, v11 ; LMULMAX1-RV32-NEXT: vor.vv v14, v15, v14 ; LMULMAX1-RV32-NEXT: vsll.vx v15, v12, a3 -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.x v16, a5 ; LMULMAX1-RV32-NEXT: vmerge.vim v16, v16, 0, v0 -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vand.vv v15, v15, v16 ; LMULMAX1-RV32-NEXT: vsll.vx v12, v12, a2 ; LMULMAX1-RV32-NEXT: vor.vv v12, v12, v15 @@ -906,9 +906,9 @@ ; LMULMAX1-RV32-NEXT: vsrl.vi v13, v12, 4 ; LMULMAX1-RV32-NEXT: lui a6, 61681 ; LMULMAX1-RV32-NEXT: addi a6, a6, -241 -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.x v14, a6 -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vand.vv v13, v13, v14 ; LMULMAX1-RV32-NEXT: vand.vv v12, v12, v14 ; LMULMAX1-RV32-NEXT: vsll.vi v12, v12, 4 @@ -916,9 +916,9 @@ ; LMULMAX1-RV32-NEXT: vsrl.vi v13, v12, 2 ; LMULMAX1-RV32-NEXT: lui a6, 209715 ; LMULMAX1-RV32-NEXT: addi a6, a6, 819 -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.x v15, a6 -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vand.vv v13, v13, v15 ; LMULMAX1-RV32-NEXT: vand.vv v12, v12, v15 ; LMULMAX1-RV32-NEXT: vsll.vi v12, v12, 2 @@ -926,9 +926,9 @@ ; LMULMAX1-RV32-NEXT: vsrl.vi v13, v12, 1 ; LMULMAX1-RV32-NEXT: lui a6, 349525 ; LMULMAX1-RV32-NEXT: addi a6, a6, 1365 -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.x v17, a6 -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vand.vv v13, v13, v17 ; LMULMAX1-RV32-NEXT: vand.vv v12, v12, v17 ; LMULMAX1-RV32-NEXT: vadd.vv v12, v12, v12 @@ -975,7 +975,7 @@ ; ; LMULMAX1-RV64-LABEL: bitreverse_v4i64: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV64-NEXT: addi a1, a0, 16 ; LMULMAX1-RV64-NEXT: vle64.v v9, (a1) ; LMULMAX1-RV64-NEXT: vle64.v v8, (a0) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll @@ -7,7 +7,7 @@ define void @bswap_v8i16(<8 x i16>* %x, <8 x i16>* %y) { ; CHECK-LABEL: bswap_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsrl.vi v9, v8, 8 ; CHECK-NEXT: vsll.vi v8, v8, 8 @@ -25,7 +25,7 @@ define void @bswap_v4i32(<4 x i32>* %x, <4 x i32>* %y) { ; RV32-LABEL: bswap_v4i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: vsrl.vi v9, v8, 8 ; RV32-NEXT: lui a1, 16 @@ -44,7 +44,7 @@ ; ; RV64-LABEL: bswap_v4i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: vsrl.vi v9, v8, 8 ; RV64-NEXT: lui a1, 16 @@ -71,7 +71,7 @@ define void @bswap_v2i64(<2 x i64>* %x, <2 x i64>* %y) { ; RV32-LABEL: bswap_v2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: li a1, 56 ; RV32-NEXT: vsrl.vx v9, v8, a1 @@ -86,34 +86,34 @@ ; RV32-NEXT: vand.vx v10, v10, a4 ; RV32-NEXT: li a5, 5 ; RV32-NEXT: vmv.s.x v0, a5 -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vmv.v.i v11, 0 ; RV32-NEXT: lui a5, 1044480 ; RV32-NEXT: vmerge.vxm v11, v11, a5, v0 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vsrl.vi v12, v8, 8 ; RV32-NEXT: vand.vv v11, v12, v11 ; RV32-NEXT: vor.vv v10, v11, v10 ; RV32-NEXT: vor.vv v9, v10, v9 ; RV32-NEXT: li a5, 255 -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vmv.v.x v10, a5 ; RV32-NEXT: vmerge.vim v10, v10, 0, v0 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vsll.vi v11, v8, 8 ; RV32-NEXT: vand.vv v10, v11, v10 -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vmv.v.x v11, a3 ; RV32-NEXT: vmerge.vim v11, v11, 0, v0 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vsll.vi v12, v8, 24 ; RV32-NEXT: vand.vv v11, v12, v11 ; RV32-NEXT: vor.vv v10, v11, v10 ; RV32-NEXT: vsll.vx v11, v8, a2 -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vmv.v.x v12, a4 ; RV32-NEXT: vmerge.vim v12, v12, 0, v0 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vand.vv v11, v11, v12 ; RV32-NEXT: vsll.vx v8, v8, a1 ; RV32-NEXT: vor.vv v8, v8, v11 @@ -124,7 +124,7 @@ ; ; RV64-LABEL: bswap_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: li a1, 56 ; RV64-NEXT: vsrl.vx v9, v8, a1 @@ -170,7 +170,7 @@ define void @bswap_v16i16(<16 x i16>* %x, <16 x i16>* %y) { ; LMULMAX2-RV32-LABEL: bswap_v16i16: ; LMULMAX2-RV32: # %bb.0: -; LMULMAX2-RV32-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX2-RV32-NEXT: vle16.v v8, (a0) ; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 8 ; LMULMAX2-RV32-NEXT: vsll.vi v8, v8, 8 @@ -180,7 +180,7 @@ ; ; LMULMAX2-RV64-LABEL: bswap_v16i16: ; LMULMAX2-RV64: # %bb.0: -; LMULMAX2-RV64-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX2-RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX2-RV64-NEXT: vle16.v v8, (a0) ; LMULMAX2-RV64-NEXT: vsrl.vi v10, v8, 8 ; LMULMAX2-RV64-NEXT: vsll.vi v8, v8, 8 @@ -190,7 +190,7 @@ ; ; LMULMAX1-RV32-LABEL: bswap_v16i16: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV32-NEXT: addi a1, a0, 16 ; LMULMAX1-RV32-NEXT: vle16.v v8, (a1) ; LMULMAX1-RV32-NEXT: vle16.v v9, (a0) @@ -206,7 +206,7 @@ ; ; LMULMAX1-RV64-LABEL: bswap_v16i16: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV64-NEXT: addi a1, a0, 16 ; LMULMAX1-RV64-NEXT: vle16.v v8, (a1) ; LMULMAX1-RV64-NEXT: vle16.v v9, (a0) @@ -230,7 +230,7 @@ define void @bswap_v8i32(<8 x i32>* %x, <8 x i32>* %y) { ; LMULMAX2-RV32-LABEL: bswap_v8i32: ; LMULMAX2-RV32: # %bb.0: -; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-RV32-NEXT: vle32.v v8, (a0) ; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 8 ; LMULMAX2-RV32-NEXT: lui a1, 16 @@ -249,7 +249,7 @@ ; ; LMULMAX2-RV64-LABEL: bswap_v8i32: ; LMULMAX2-RV64: # %bb.0: -; LMULMAX2-RV64-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-RV64-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-RV64-NEXT: vle32.v v8, (a0) ; LMULMAX2-RV64-NEXT: vsrl.vi v10, v8, 8 ; LMULMAX2-RV64-NEXT: lui a1, 16 @@ -268,7 +268,7 @@ ; ; LMULMAX1-RV32-LABEL: bswap_v8i32: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: addi a1, a0, 16 ; LMULMAX1-RV32-NEXT: vle32.v v8, (a1) ; LMULMAX1-RV32-NEXT: vle32.v v9, (a0) @@ -299,7 +299,7 @@ ; ; LMULMAX1-RV64-LABEL: bswap_v8i32: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV64-NEXT: addi a1, a0, 16 ; LMULMAX1-RV64-NEXT: vle32.v v8, (a1) ; LMULMAX1-RV64-NEXT: vle32.v v9, (a0) @@ -338,7 +338,7 @@ define void @bswap_v4i64(<4 x i64>* %x, <4 x i64>* %y) { ; LMULMAX2-RV32-LABEL: bswap_v4i64: ; LMULMAX2-RV32: # %bb.0: -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV32-NEXT: vle64.v v8, (a0) ; LMULMAX2-RV32-NEXT: li a1, 56 ; LMULMAX2-RV32-NEXT: vsrl.vx v10, v8, a1 @@ -353,34 +353,34 @@ ; LMULMAX2-RV32-NEXT: vand.vx v12, v12, a4 ; LMULMAX2-RV32-NEXT: li a5, 85 ; LMULMAX2-RV32-NEXT: vmv.s.x v0, a5 -; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-RV32-NEXT: vmv.v.i v14, 0 ; LMULMAX2-RV32-NEXT: lui a5, 1044480 ; LMULMAX2-RV32-NEXT: vmerge.vxm v14, v14, a5, v0 -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV32-NEXT: vsrl.vi v16, v8, 8 ; LMULMAX2-RV32-NEXT: vand.vv v14, v16, v14 ; LMULMAX2-RV32-NEXT: vor.vv v12, v14, v12 ; LMULMAX2-RV32-NEXT: vor.vv v10, v12, v10 ; LMULMAX2-RV32-NEXT: li a5, 255 -; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-RV32-NEXT: vmv.v.x v12, a5 ; LMULMAX2-RV32-NEXT: vmerge.vim v12, v12, 0, v0 -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV32-NEXT: vsll.vi v14, v8, 8 ; LMULMAX2-RV32-NEXT: vand.vv v12, v14, v12 -; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-RV32-NEXT: vmv.v.x v14, a3 ; LMULMAX2-RV32-NEXT: vmerge.vim v14, v14, 0, v0 -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV32-NEXT: vsll.vi v16, v8, 24 ; LMULMAX2-RV32-NEXT: vand.vv v14, v16, v14 ; LMULMAX2-RV32-NEXT: vor.vv v12, v14, v12 ; LMULMAX2-RV32-NEXT: vsll.vx v14, v8, a2 -; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-RV32-NEXT: vmv.v.x v16, a4 ; LMULMAX2-RV32-NEXT: vmerge.vim v16, v16, 0, v0 -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV32-NEXT: vand.vv v14, v14, v16 ; LMULMAX2-RV32-NEXT: vsll.vx v8, v8, a1 ; LMULMAX2-RV32-NEXT: vor.vv v8, v8, v14 @@ -391,7 +391,7 @@ ; ; LMULMAX2-RV64-LABEL: bswap_v4i64: ; LMULMAX2-RV64: # %bb.0: -; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX2-RV64-NEXT: li a1, 56 ; LMULMAX2-RV64-NEXT: vsrl.vx v10, v8, a1 @@ -429,7 +429,7 @@ ; ; LMULMAX1-RV32-LABEL: bswap_v4i64: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: addi a1, a0, 16 ; LMULMAX1-RV32-NEXT: vle64.v v9, (a1) ; LMULMAX1-RV32-NEXT: vle64.v v8, (a0) @@ -446,34 +446,34 @@ ; LMULMAX1-RV32-NEXT: vand.vx v11, v11, a5 ; LMULMAX1-RV32-NEXT: li a6, 5 ; LMULMAX1-RV32-NEXT: vmv.s.x v0, a6 -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.i v12, 0 ; LMULMAX1-RV32-NEXT: lui a6, 1044480 ; LMULMAX1-RV32-NEXT: vmerge.vxm v12, v12, a6, v0 -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vsrl.vi v13, v9, 8 ; LMULMAX1-RV32-NEXT: vand.vv v13, v13, v12 ; LMULMAX1-RV32-NEXT: vor.vv v11, v13, v11 ; LMULMAX1-RV32-NEXT: vor.vv v10, v11, v10 ; LMULMAX1-RV32-NEXT: li a6, 255 -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.x v11, a6 ; LMULMAX1-RV32-NEXT: vmerge.vim v11, v11, 0, v0 -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vsll.vi v13, v9, 8 ; LMULMAX1-RV32-NEXT: vand.vv v13, v13, v11 -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.x v14, a4 ; LMULMAX1-RV32-NEXT: vmerge.vim v14, v14, 0, v0 -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vsll.vi v15, v9, 24 ; LMULMAX1-RV32-NEXT: vand.vv v15, v15, v14 ; LMULMAX1-RV32-NEXT: vor.vv v13, v15, v13 ; LMULMAX1-RV32-NEXT: vsll.vx v15, v9, a3 -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.x v16, a5 ; LMULMAX1-RV32-NEXT: vmerge.vim v16, v16, 0, v0 -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vand.vv v15, v15, v16 ; LMULMAX1-RV32-NEXT: vsll.vx v9, v9, a2 ; LMULMAX1-RV32-NEXT: vor.vv v9, v9, v15 @@ -506,7 +506,7 @@ ; ; LMULMAX1-RV64-LABEL: bswap_v4i64: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV64-NEXT: addi a1, a0, 16 ; LMULMAX1-RV64-NEXT: vle64.v v8, (a1) ; LMULMAX1-RV64-NEXT: vle64.v v9, (a0) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll @@ -5,7 +5,7 @@ define fastcc <4 x i8> @ret_v4i8(<4 x i8>* %p) { ; CHECK-LABEL: ret_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: ret %v = load <4 x i8>, <4 x i8>* %p @@ -15,7 +15,7 @@ define fastcc <4 x i32> @ret_v4i32(<4 x i32>* %p) { ; CHECK-LABEL: ret_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: ret %v = load <4 x i32>, <4 x i32>* %p @@ -25,7 +25,7 @@ define fastcc <8 x i32> @ret_v8i32(<8 x i32>* %p) { ; CHECK-LABEL: ret_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: ret %v = load <8 x i32>, <8 x i32>* %p @@ -35,13 +35,13 @@ define fastcc <16 x i64> @ret_v16i64(<16 x i64>* %p) { ; LMULMAX8-LABEL: ret_v16i64: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; LMULMAX8-NEXT: vle64.v v8, (a0) ; LMULMAX8-NEXT: ret ; ; LMULMAX4-LABEL: ret_v16i64: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; LMULMAX4-NEXT: vle64.v v8, (a0) ; LMULMAX4-NEXT: addi a0, a0, 64 ; LMULMAX4-NEXT: vle64.v v12, (a0) @@ -53,7 +53,7 @@ define fastcc <8 x i1> @ret_mask_v8i1(<8 x i1>* %p) { ; CHECK-LABEL: ret_mask_v8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: ret %v = load <8 x i1>, <8 x i1>* %p @@ -64,7 +64,7 @@ ; CHECK-LABEL: ret_mask_v32i1: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: ret %v = load <32 x i1>, <32 x i1>* %p @@ -76,7 +76,7 @@ ; LMULMAX8-LABEL: ret_split_v64i32: ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: li a1, 32 -; LMULMAX8-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; LMULMAX8-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; LMULMAX8-NEXT: vle32.v v8, (a0) ; LMULMAX8-NEXT: addi a0, a0, 128 ; LMULMAX8-NEXT: vle32.v v16, (a0) @@ -84,7 +84,7 @@ ; ; LMULMAX4-LABEL: ret_split_v64i32: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; LMULMAX4-NEXT: vle32.v v8, (a0) ; LMULMAX4-NEXT: addi a1, a0, 64 ; LMULMAX4-NEXT: vle32.v v12, (a1) @@ -103,7 +103,7 @@ ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: addi a2, a1, 128 ; LMULMAX8-NEXT: li a3, 32 -; LMULMAX8-NEXT: vsetvli zero, a3, e32, m8, ta, mu +; LMULMAX8-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; LMULMAX8-NEXT: vle32.v v8, (a2) ; LMULMAX8-NEXT: addi a2, a1, 256 ; LMULMAX8-NEXT: vle32.v v16, (a1) @@ -122,7 +122,7 @@ ; LMULMAX4-LABEL: ret_split_v128i32: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: addi a2, a1, 64 -; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; LMULMAX4-NEXT: vle32.v v8, (a2) ; LMULMAX4-NEXT: addi a2, a1, 128 ; LMULMAX4-NEXT: vle32.v v12, (a2) @@ -160,7 +160,7 @@ define fastcc <4 x i8> @ret_v8i8_param_v4i8(<4 x i8> %v) { ; CHECK-LABEL: ret_v8i8_param_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 2 ; CHECK-NEXT: ret %r = add <4 x i8> %v, @@ -170,7 +170,7 @@ define fastcc <4 x i8> @ret_v4i8_param_v4i8_v4i8(<4 x i8> %v, <4 x i8> %w) { ; CHECK-LABEL: ret_v4i8_param_v4i8_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: ret %r = add <4 x i8> %v, %w @@ -180,7 +180,7 @@ define fastcc <4 x i64> @ret_v4i64_param_v4i64_v4i64(<4 x i64> %v, <4 x i64> %w) { ; CHECK-LABEL: ret_v4i64_param_v4i64_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v10 ; CHECK-NEXT: ret %r = add <4 x i64> %v, %w @@ -190,7 +190,7 @@ define fastcc <8 x i1> @ret_v8i1_param_v8i1_v8i1(<8 x i1> %v, <8 x i1> %w) { ; CHECK-LABEL: ret_v8i1_param_v8i1_v8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %r = xor <8 x i1> %v, %w @@ -201,7 +201,7 @@ ; CHECK-LABEL: ret_v32i1_param_v32i1_v32i1: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmand.mm v0, v0, v8 ; CHECK-NEXT: ret %r = and <32 x i1> %v, %w @@ -212,7 +212,7 @@ ; LMULMAX8-LABEL: ret_v32i32_param_v32i32_v32i32_v32i32_i32: ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: li a2, 32 -; LMULMAX8-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; LMULMAX8-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; LMULMAX8-NEXT: vle32.v v24, (a0) ; LMULMAX8-NEXT: vadd.vv v8, v8, v16 ; LMULMAX8-NEXT: vadd.vv v8, v8, v24 @@ -221,7 +221,7 @@ ; ; LMULMAX4-LABEL: ret_v32i32_param_v32i32_v32i32_v32i32_i32: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; LMULMAX4-NEXT: addi a1, a0, 64 ; LMULMAX4-NEXT: vle32.v v24, (a1) ; LMULMAX4-NEXT: vle32.v v28, (a0) @@ -293,7 +293,7 @@ ; LMULMAX8-NEXT: .cfi_def_cfa s0, 0 ; LMULMAX8-NEXT: andi sp, sp, -128 ; LMULMAX8-NEXT: li a2, 32 -; LMULMAX8-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; LMULMAX8-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; LMULMAX8-NEXT: vle32.v v24, (a0) ; LMULMAX8-NEXT: mv a3, sp ; LMULMAX8-NEXT: mv a0, sp @@ -318,7 +318,7 @@ ; LMULMAX4-NEXT: addi s0, sp, 256 ; LMULMAX4-NEXT: .cfi_def_cfa s0, 0 ; LMULMAX4-NEXT: andi sp, sp, -128 -; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; LMULMAX4-NEXT: vle32.v v24, (a0) ; LMULMAX4-NEXT: addi a0, a0, 64 ; LMULMAX4-NEXT: vle32.v v28, (a0) @@ -347,7 +347,7 @@ ; LMULMAX8-LABEL: vector_arg_indirect_stack: ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: li a0, 32 -; LMULMAX8-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; LMULMAX8-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; LMULMAX8-NEXT: vle32.v v16, (t2) ; LMULMAX8-NEXT: vadd.vv v8, v8, v16 ; LMULMAX8-NEXT: ret @@ -355,7 +355,7 @@ ; LMULMAX4-LABEL: vector_arg_indirect_stack: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: addi a0, t2, 64 -; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; LMULMAX4-NEXT: vle32.v v16, (t2) ; LMULMAX4-NEXT: vle32.v v20, (a0) ; LMULMAX4-NEXT: vadd.vv v8, v8, v16 @@ -379,7 +379,7 @@ ; LMULMAX8-NEXT: .cfi_def_cfa s0, 0 ; LMULMAX8-NEXT: andi sp, sp, -128 ; LMULMAX8-NEXT: li a0, 32 -; LMULMAX8-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; LMULMAX8-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; LMULMAX8-NEXT: vmv.v.i v8, 0 ; LMULMAX8-NEXT: mv a0, sp ; LMULMAX8-NEXT: li a1, 1 @@ -413,7 +413,7 @@ ; LMULMAX4-NEXT: .cfi_def_cfa s0, 0 ; LMULMAX4-NEXT: andi sp, sp, -128 ; LMULMAX4-NEXT: addi a0, sp, 64 -; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; LMULMAX4-NEXT: vmv.v.i v8, 0 ; LMULMAX4-NEXT: vse32.v v8, (a0) ; LMULMAX4-NEXT: mv a0, sp @@ -447,7 +447,7 @@ ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: li a0, 32 ; LMULMAX8-NEXT: addi a1, sp, 8 -; LMULMAX8-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; LMULMAX8-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; LMULMAX8-NEXT: vle32.v v24, (a1) ; LMULMAX8-NEXT: vadd.vv v8, v8, v16 ; LMULMAX8-NEXT: vadd.vv v8, v8, v24 @@ -455,7 +455,7 @@ ; ; LMULMAX4-LABEL: vector_arg_direct_stack: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; LMULMAX4-NEXT: addi a0, sp, 8 ; LMULMAX4-NEXT: vle32.v v24, (a0) ; LMULMAX4-NEXT: addi a0, sp, 72 @@ -479,7 +479,7 @@ ; LMULMAX8-NEXT: sd ra, 152(sp) # 8-byte Folded Spill ; LMULMAX8-NEXT: .cfi_offset ra, -8 ; LMULMAX8-NEXT: li a0, 32 -; LMULMAX8-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; LMULMAX8-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; LMULMAX8-NEXT: vmv.v.i v8, 0 ; LMULMAX8-NEXT: addi a0, sp, 8 ; LMULMAX8-NEXT: vse32.v v8, (a0) @@ -517,7 +517,7 @@ ; LMULMAX4-NEXT: li a0, 13 ; LMULMAX4-NEXT: sd a0, 0(sp) ; LMULMAX4-NEXT: addi a0, sp, 72 -; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; LMULMAX4-NEXT: vmv.v.i v8, 0 ; LMULMAX4-NEXT: vse32.v v8, (a0) ; LMULMAX4-NEXT: addi a0, sp, 8 @@ -552,7 +552,7 @@ ; CHECK-LABEL: vector_mask_arg_direct_stack: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a0, sp, 136 -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vlm.v v8, (a0) ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll @@ -7,7 +7,7 @@ define <4 x i8> @ret_v4i8(<4 x i8>* %p) { ; CHECK-LABEL: ret_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: ret %v = load <4 x i8>, <4 x i8>* %p @@ -17,7 +17,7 @@ define <4 x i32> @ret_v4i32(<4 x i32>* %p) { ; CHECK-LABEL: ret_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: ret %v = load <4 x i32>, <4 x i32>* %p @@ -27,25 +27,25 @@ define <8 x i32> @ret_v8i32(<8 x i32>* %p) { ; LMULMAX8-LABEL: ret_v8i32: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX8-NEXT: vle32.v v8, (a0) ; LMULMAX8-NEXT: ret ; ; LMULMAX4-LABEL: ret_v8i32: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX4-NEXT: vle32.v v8, (a0) ; LMULMAX4-NEXT: ret ; ; LMULMAX2-LABEL: ret_v8i32: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vle32.v v8, (a0) ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: ret_v8i32: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vle32.v v8, (a0) ; LMULMAX1-NEXT: addi a0, a0, 16 ; LMULMAX1-NEXT: vle32.v v9, (a0) @@ -57,13 +57,13 @@ define <16 x i64> @ret_v16i64(<16 x i64>* %p) { ; LMULMAX8-LABEL: ret_v16i64: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; LMULMAX8-NEXT: vle64.v v8, (a0) ; LMULMAX8-NEXT: ret ; ; LMULMAX4-LABEL: ret_v16i64: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; LMULMAX4-NEXT: vle64.v v8, (a0) ; LMULMAX4-NEXT: addi a0, a0, 64 ; LMULMAX4-NEXT: vle64.v v12, (a0) @@ -71,7 +71,7 @@ ; ; LMULMAX2-LABEL: ret_v16i64: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-NEXT: vle64.v v8, (a0) ; LMULMAX2-NEXT: addi a1, a0, 32 ; LMULMAX2-NEXT: vle64.v v10, (a1) @@ -83,7 +83,7 @@ ; ; LMULMAX1-LABEL: ret_v16i64: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vle64.v v8, (a0) ; LMULMAX1-NEXT: addi a1, a0, 16 ; LMULMAX1-NEXT: vle64.v v9, (a1) @@ -107,7 +107,7 @@ define <8 x i1> @ret_mask_v8i1(<8 x i1>* %p) { ; CHECK-LABEL: ret_mask_v8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: ret %v = load <8 x i1>, <8 x i1>* %p @@ -118,27 +118,27 @@ ; LMULMAX8-LABEL: ret_mask_v32i1: ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: li a1, 32 -; LMULMAX8-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; LMULMAX8-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; LMULMAX8-NEXT: vlm.v v0, (a0) ; LMULMAX8-NEXT: ret ; ; LMULMAX4-LABEL: ret_mask_v32i1: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: li a1, 32 -; LMULMAX4-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; LMULMAX4-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; LMULMAX4-NEXT: vlm.v v0, (a0) ; LMULMAX4-NEXT: ret ; ; LMULMAX2-LABEL: ret_mask_v32i1: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: li a1, 32 -; LMULMAX2-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; LMULMAX2-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; LMULMAX2-NEXT: vlm.v v0, (a0) ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: ret_mask_v32i1: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-NEXT: vlm.v v0, (a0) ; LMULMAX1-NEXT: addi a0, a0, 2 ; LMULMAX1-NEXT: vlm.v v8, (a0) @@ -152,7 +152,7 @@ ; LMULMAX8-LABEL: ret_split_v64i32: ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: li a1, 32 -; LMULMAX8-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; LMULMAX8-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; LMULMAX8-NEXT: vle32.v v8, (a0) ; LMULMAX8-NEXT: addi a0, a0, 128 ; LMULMAX8-NEXT: vle32.v v16, (a0) @@ -160,7 +160,7 @@ ; ; LMULMAX4-LABEL: ret_split_v64i32: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; LMULMAX4-NEXT: vle32.v v8, (a0) ; LMULMAX4-NEXT: addi a1, a0, 64 ; LMULMAX4-NEXT: vle32.v v12, (a1) @@ -172,7 +172,7 @@ ; ; LMULMAX2-LABEL: ret_split_v64i32: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vle32.v v8, (a0) ; LMULMAX2-NEXT: addi a1, a0, 32 ; LMULMAX2-NEXT: vle32.v v10, (a1) @@ -192,7 +192,7 @@ ; ; LMULMAX1-LABEL: ret_split_v64i32: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vle32.v v8, (a0) ; LMULMAX1-NEXT: addi a1, a0, 16 ; LMULMAX1-NEXT: vle32.v v9, (a1) @@ -235,7 +235,7 @@ ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: addi a2, a1, 128 ; LMULMAX8-NEXT: li a3, 32 -; LMULMAX8-NEXT: vsetvli zero, a3, e32, m8, ta, mu +; LMULMAX8-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; LMULMAX8-NEXT: vle32.v v8, (a2) ; LMULMAX8-NEXT: addi a2, a1, 256 ; LMULMAX8-NEXT: vle32.v v16, (a1) @@ -254,7 +254,7 @@ ; LMULMAX4-LABEL: ret_split_v128i32: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: addi a2, a1, 64 -; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; LMULMAX4-NEXT: vle32.v v8, (a2) ; LMULMAX4-NEXT: addi a2, a1, 128 ; LMULMAX4-NEXT: vle32.v v12, (a2) @@ -289,7 +289,7 @@ ; LMULMAX2-LABEL: ret_split_v128i32: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: addi a2, a1, 32 -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vle32.v v8, (a2) ; LMULMAX2-NEXT: addi a2, a1, 64 ; LMULMAX2-NEXT: vle32.v v10, (a2) @@ -356,7 +356,7 @@ ; LMULMAX1-LABEL: ret_split_v128i32: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: addi a2, a1, 16 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vle32.v v8, (a2) ; LMULMAX1-NEXT: addi a2, a1, 32 ; LMULMAX1-NEXT: vle32.v v9, (a2) @@ -490,7 +490,7 @@ define <4 x i8> @ret_v8i8_param_v4i8(<4 x i8> %v) { ; CHECK-LABEL: ret_v8i8_param_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 2 ; CHECK-NEXT: ret %r = add <4 x i8> %v, @@ -500,7 +500,7 @@ define <4 x i8> @ret_v4i8_param_v4i8_v4i8(<4 x i8> %v, <4 x i8> %w) { ; CHECK-LABEL: ret_v4i8_param_v4i8_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: ret %r = add <4 x i8> %v, %w @@ -510,25 +510,25 @@ define <4 x i64> @ret_v4i64_param_v4i64_v4i64(<4 x i64> %v, <4 x i64> %w) { ; LMULMAX8-LABEL: ret_v4i64_param_v4i64_v4i64: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX8-NEXT: vadd.vv v8, v8, v10 ; LMULMAX8-NEXT: ret ; ; LMULMAX4-LABEL: ret_v4i64_param_v4i64_v4i64: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX4-NEXT: vadd.vv v8, v8, v10 ; LMULMAX4-NEXT: ret ; ; LMULMAX2-LABEL: ret_v4i64_param_v4i64_v4i64: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-NEXT: vadd.vv v8, v8, v10 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: ret_v4i64_param_v4i64_v4i64: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vadd.vv v8, v8, v10 ; LMULMAX1-NEXT: vadd.vv v9, v9, v11 ; LMULMAX1-NEXT: ret @@ -539,7 +539,7 @@ define <8 x i1> @ret_v8i1_param_v8i1_v8i1(<8 x i1> %v, <8 x i1> %w) { ; CHECK-LABEL: ret_v8i1_param_v8i1_v8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %r = xor <8 x i1> %v, %w @@ -550,27 +550,27 @@ ; LMULMAX8-LABEL: ret_v32i1_param_v32i1_v32i1: ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: li a0, 32 -; LMULMAX8-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; LMULMAX8-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; LMULMAX8-NEXT: vmand.mm v0, v0, v8 ; LMULMAX8-NEXT: ret ; ; LMULMAX4-LABEL: ret_v32i1_param_v32i1_v32i1: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: li a0, 32 -; LMULMAX4-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; LMULMAX4-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; LMULMAX4-NEXT: vmand.mm v0, v0, v8 ; LMULMAX4-NEXT: ret ; ; LMULMAX2-LABEL: ret_v32i1_param_v32i1_v32i1: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: li a0, 32 -; LMULMAX2-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; LMULMAX2-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; LMULMAX2-NEXT: vmand.mm v0, v0, v8 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: ret_v32i1_param_v32i1_v32i1: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-NEXT: vmand.mm v0, v0, v9 ; LMULMAX1-NEXT: vmand.mm v8, v8, v10 ; LMULMAX1-NEXT: ret @@ -582,7 +582,7 @@ ; LMULMAX8-LABEL: ret_v32i32_param_v32i32_v32i32_v32i32_i32: ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: li a2, 32 -; LMULMAX8-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; LMULMAX8-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; LMULMAX8-NEXT: vle32.v v24, (a0) ; LMULMAX8-NEXT: vadd.vv v8, v8, v16 ; LMULMAX8-NEXT: vadd.vv v8, v8, v24 @@ -591,7 +591,7 @@ ; ; LMULMAX4-LABEL: ret_v32i32_param_v32i32_v32i32_v32i32_i32: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; LMULMAX4-NEXT: addi a1, a0, 64 ; LMULMAX4-NEXT: vle32.v v24, (a1) ; LMULMAX4-NEXT: vle32.v v28, (a0) @@ -605,7 +605,7 @@ ; ; LMULMAX2-LABEL: ret_v32i32_param_v32i32_v32i32_v32i32_i32: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vle32.v v24, (a0) ; LMULMAX2-NEXT: addi a1, a0, 32 ; LMULMAX2-NEXT: vle32.v v26, (a1) @@ -629,7 +629,7 @@ ; ; LMULMAX1-LABEL: ret_v32i32_param_v32i32_v32i32_v32i32_i32: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vle32.v v24, (a0) ; LMULMAX1-NEXT: addi a1, a0, 16 ; LMULMAX1-NEXT: vle32.v v25, (a1) @@ -792,7 +792,7 @@ ; LMULMAX8-NEXT: .cfi_def_cfa s0, 0 ; LMULMAX8-NEXT: andi sp, sp, -128 ; LMULMAX8-NEXT: li a2, 32 -; LMULMAX8-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; LMULMAX8-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; LMULMAX8-NEXT: vle32.v v24, (a0) ; LMULMAX8-NEXT: mv a3, sp ; LMULMAX8-NEXT: mv a0, sp @@ -817,7 +817,7 @@ ; LMULMAX4-NEXT: addi s0, sp, 256 ; LMULMAX4-NEXT: .cfi_def_cfa s0, 0 ; LMULMAX4-NEXT: andi sp, sp, -128 -; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; LMULMAX4-NEXT: vle32.v v24, (a0) ; LMULMAX4-NEXT: addi a0, a0, 64 ; LMULMAX4-NEXT: vle32.v v28, (a0) @@ -847,7 +847,7 @@ ; LMULMAX2-NEXT: addi s0, sp, 256 ; LMULMAX2-NEXT: .cfi_def_cfa s0, 0 ; LMULMAX2-NEXT: andi sp, sp, -128 -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vle32.v v24, (a0) ; LMULMAX2-NEXT: addi a1, a0, 32 ; LMULMAX2-NEXT: vle32.v v26, (a1) @@ -890,7 +890,7 @@ ; LMULMAX1-NEXT: .cfi_def_cfa s0, 0 ; LMULMAX1-NEXT: andi sp, sp, -128 ; LMULMAX1-NEXT: mv s1, sp -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vle32.v v24, (a0) ; LMULMAX1-NEXT: addi a1, a0, 16 ; LMULMAX1-NEXT: vle32.v v25, (a1) @@ -965,7 +965,7 @@ ; LMULMAX8-LABEL: split_vector_args: ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: li a1, 32 -; LMULMAX8-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; LMULMAX8-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; LMULMAX8-NEXT: vle32.v v8, (a0) ; LMULMAX8-NEXT: vadd.vv v8, v16, v8 ; LMULMAX8-NEXT: ret @@ -973,7 +973,7 @@ ; LMULMAX4-LABEL: split_vector_args: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: addi a1, a0, 64 -; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; LMULMAX4-NEXT: vle32.v v8, (a0) ; LMULMAX4-NEXT: vle32.v v12, (a1) ; LMULMAX4-NEXT: vadd.vv v8, v16, v8 @@ -983,7 +983,7 @@ ; LMULMAX2-LABEL: split_vector_args: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: addi a1, a0, 64 -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vle32.v v10, (a0) ; LMULMAX2-NEXT: addi a0, a0, 32 ; LMULMAX2-NEXT: vle32.v v12, (a0) @@ -997,7 +997,7 @@ ; LMULMAX1-LABEL: split_vector_args: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: addi a1, a0, 64 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vle32.v v24, (a1) ; LMULMAX1-NEXT: addi a1, a0, 48 ; LMULMAX1-NEXT: vle32.v v25, (a1) @@ -1031,10 +1031,10 @@ ; LMULMAX8-NEXT: addi s0, sp, 256 ; LMULMAX8-NEXT: .cfi_def_cfa s0, 0 ; LMULMAX8-NEXT: andi sp, sp, -128 -; LMULMAX8-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX8-NEXT: vle32.v v8, (a0) ; LMULMAX8-NEXT: li a0, 32 -; LMULMAX8-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; LMULMAX8-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; LMULMAX8-NEXT: vle32.v v16, (a1) ; LMULMAX8-NEXT: mv a1, sp ; LMULMAX8-NEXT: mv a0, sp @@ -1061,9 +1061,9 @@ ; LMULMAX4-NEXT: addi s0, sp, 256 ; LMULMAX4-NEXT: .cfi_def_cfa s0, 0 ; LMULMAX4-NEXT: andi sp, sp, -128 -; LMULMAX4-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX4-NEXT: vle32.v v8, (a0) -; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; LMULMAX4-NEXT: vle32.v v16, (a1) ; LMULMAX4-NEXT: addi a0, a1, 64 ; LMULMAX4-NEXT: vle32.v v20, (a0) @@ -1094,9 +1094,9 @@ ; LMULMAX2-NEXT: addi s0, sp, 128 ; LMULMAX2-NEXT: .cfi_def_cfa s0, 0 ; LMULMAX2-NEXT: andi sp, sp, -128 -; LMULMAX2-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX2-NEXT: vle32.v v8, (a0) -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vle32.v v14, (a1) ; LMULMAX2-NEXT: addi a0, a1, 32 ; LMULMAX2-NEXT: vle32.v v16, (a0) @@ -1134,9 +1134,9 @@ ; LMULMAX1-NEXT: addi s0, sp, 128 ; LMULMAX1-NEXT: .cfi_def_cfa s0, 0 ; LMULMAX1-NEXT: andi sp, sp, -128 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vle32.v v8, (a0) -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vle32.v v13, (a1) ; LMULMAX1-NEXT: addi a0, a1, 32 ; LMULMAX1-NEXT: vle32.v v15, (a0) @@ -1189,14 +1189,14 @@ ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: li a0, 32 ; LMULMAX8-NEXT: mv a1, sp -; LMULMAX8-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; LMULMAX8-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; LMULMAX8-NEXT: vle32.v v16, (a1) ; LMULMAX8-NEXT: vadd.vv v8, v8, v16 ; LMULMAX8-NEXT: ret ; ; LMULMAX4-LABEL: vector_arg_via_stack: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; LMULMAX4-NEXT: mv a0, sp ; LMULMAX4-NEXT: vle32.v v16, (a0) ; LMULMAX4-NEXT: addi a0, sp, 64 @@ -1207,7 +1207,7 @@ ; ; LMULMAX2-LABEL: vector_arg_via_stack: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: addi a0, sp, 64 ; LMULMAX2-NEXT: vle32.v v16, (a0) ; LMULMAX2-NEXT: mv a0, sp @@ -1225,7 +1225,7 @@ ; LMULMAX1-LABEL: vector_arg_via_stack: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: addi a0, sp, 112 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vle32.v v16, (a0) ; LMULMAX1-NEXT: addi a0, sp, 96 ; LMULMAX1-NEXT: vle32.v v17, (a0) @@ -1263,7 +1263,7 @@ ; LMULMAX8-NEXT: sd ra, 136(sp) # 8-byte Folded Spill ; LMULMAX8-NEXT: .cfi_offset ra, -8 ; LMULMAX8-NEXT: li a0, 32 -; LMULMAX8-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; LMULMAX8-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; LMULMAX8-NEXT: vmv.v.i v8, 0 ; LMULMAX8-NEXT: vse32.v v8, (sp) ; LMULMAX8-NEXT: li a0, 8 @@ -1290,7 +1290,7 @@ ; LMULMAX4-NEXT: .cfi_offset ra, -8 ; LMULMAX4-NEXT: li a0, 8 ; LMULMAX4-NEXT: sd a0, 128(sp) -; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; LMULMAX4-NEXT: vmv.v.i v8, 0 ; LMULMAX4-NEXT: vse32.v v8, (sp) ; LMULMAX4-NEXT: addi a0, sp, 64 @@ -1319,7 +1319,7 @@ ; LMULMAX2-NEXT: .cfi_offset ra, -8 ; LMULMAX2-NEXT: li a0, 8 ; LMULMAX2-NEXT: sd a0, 128(sp) -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vmv.v.i v8, 0 ; LMULMAX2-NEXT: vse32.v v8, (sp) ; LMULMAX2-NEXT: addi a0, sp, 96 @@ -1356,7 +1356,7 @@ ; LMULMAX1-NEXT: .cfi_offset ra, -8 ; LMULMAX1-NEXT: li a0, 8 ; LMULMAX1-NEXT: sd a0, 128(sp) -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vmv.v.i v8, 0 ; LMULMAX1-NEXT: vse32.v v8, (sp) ; LMULMAX1-NEXT: addi a0, sp, 112 @@ -1410,7 +1410,7 @@ ; CHECK-LABEL: vector_mask_arg_via_stack: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a0, sp, 136 -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: ret ret <4 x i1> %10 @@ -1426,19 +1426,19 @@ ; LMULMAX8-NEXT: sd ra, 152(sp) # 8-byte Folded Spill ; LMULMAX8-NEXT: .cfi_offset ra, -8 ; LMULMAX8-NEXT: li a0, 32 -; LMULMAX8-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; LMULMAX8-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; LMULMAX8-NEXT: vmv.v.i v8, 0 ; LMULMAX8-NEXT: vse32.v v8, (sp) ; LMULMAX8-NEXT: li a0, 8 ; LMULMAX8-NEXT: sd a0, 128(sp) -; LMULMAX8-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; LMULMAX8-NEXT: vmv.v.i v16, 0 ; LMULMAX8-NEXT: vmerge.vim v16, v16, 1, v0 -; LMULMAX8-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; LMULMAX8-NEXT: vmv.v.i v17, 0 -; LMULMAX8-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; LMULMAX8-NEXT: vsetivli zero, 4, e8, mf2, tu, ma ; LMULMAX8-NEXT: vslideup.vi v17, v16, 0 -; LMULMAX8-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; LMULMAX8-NEXT: vmsne.vi v16, v17, 0 ; LMULMAX8-NEXT: addi a0, sp, 136 ; LMULMAX8-NEXT: li a5, 5 @@ -1464,19 +1464,19 @@ ; LMULMAX4-NEXT: .cfi_offset ra, -8 ; LMULMAX4-NEXT: li a0, 8 ; LMULMAX4-NEXT: sd a0, 128(sp) -; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; LMULMAX4-NEXT: vmv.v.i v8, 0 ; LMULMAX4-NEXT: vse32.v v8, (sp) ; LMULMAX4-NEXT: addi a0, sp, 64 ; LMULMAX4-NEXT: vse32.v v8, (a0) -; LMULMAX4-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; LMULMAX4-NEXT: vmv.v.i v12, 0 ; LMULMAX4-NEXT: vmerge.vim v12, v12, 1, v0 -; LMULMAX4-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; LMULMAX4-NEXT: vmv.v.i v13, 0 -; LMULMAX4-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; LMULMAX4-NEXT: vsetivli zero, 4, e8, mf2, tu, ma ; LMULMAX4-NEXT: vslideup.vi v13, v12, 0 -; LMULMAX4-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; LMULMAX4-NEXT: vmsne.vi v12, v13, 0 ; LMULMAX4-NEXT: addi a0, sp, 136 ; LMULMAX4-NEXT: li a5, 5 @@ -1504,7 +1504,7 @@ ; LMULMAX2-NEXT: .cfi_offset ra, -8 ; LMULMAX2-NEXT: li a0, 8 ; LMULMAX2-NEXT: sd a0, 128(sp) -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vmv.v.i v8, 0 ; LMULMAX2-NEXT: vse32.v v8, (sp) ; LMULMAX2-NEXT: addi a0, sp, 96 @@ -1513,14 +1513,14 @@ ; LMULMAX2-NEXT: vse32.v v8, (a0) ; LMULMAX2-NEXT: addi a0, sp, 32 ; LMULMAX2-NEXT: vse32.v v8, (a0) -; LMULMAX2-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; LMULMAX2-NEXT: vmv.v.i v10, 0 ; LMULMAX2-NEXT: vmerge.vim v10, v10, 1, v0 -; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; LMULMAX2-NEXT: vmv.v.i v11, 0 -; LMULMAX2-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; LMULMAX2-NEXT: vsetivli zero, 4, e8, mf2, tu, ma ; LMULMAX2-NEXT: vslideup.vi v11, v10, 0 -; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; LMULMAX2-NEXT: vmsne.vi v10, v11, 0 ; LMULMAX2-NEXT: addi a0, sp, 136 ; LMULMAX2-NEXT: li a5, 5 @@ -1552,7 +1552,7 @@ ; LMULMAX1-NEXT: .cfi_offset ra, -8 ; LMULMAX1-NEXT: li a0, 8 ; LMULMAX1-NEXT: sd a0, 128(sp) -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vmv.v.i v8, 0 ; LMULMAX1-NEXT: vse32.v v8, (sp) ; LMULMAX1-NEXT: addi a0, sp, 112 @@ -1569,14 +1569,14 @@ ; LMULMAX1-NEXT: vse32.v v8, (a0) ; LMULMAX1-NEXT: addi a0, sp, 16 ; LMULMAX1-NEXT: vse32.v v8, (a0) -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; LMULMAX1-NEXT: vmv.v.i v9, 0 ; LMULMAX1-NEXT: vmerge.vim v9, v9, 1, v0 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; LMULMAX1-NEXT: vmv.v.i v10, 0 -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, ma ; LMULMAX1-NEXT: vslideup.vi v10, v9, 0 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; LMULMAX1-NEXT: vmsne.vi v9, v10, 0 ; LMULMAX1-NEXT: addi a0, sp, 136 ; LMULMAX1-NEXT: li a5, 5 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll @@ -29,7 +29,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI1_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI1_0)(a1) -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfabs.v v9, v8, v0.t @@ -71,7 +71,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI3_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI3_0)(a1) -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfabs.v v9, v8, v0.t @@ -113,7 +113,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI5_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI5_0)(a1) -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfabs.v v9, v8, v0.t @@ -157,7 +157,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI7_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI7_0)(a1) -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmset.m v10 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmv1r.v v0, v10 @@ -201,7 +201,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI9_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI9_0)(a1) -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfabs.v v9, v8, v0.t @@ -243,7 +243,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI11_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI11_0)(a1) -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfabs.v v9, v8, v0.t @@ -287,7 +287,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI13_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI13_0)(a1) -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmset.m v10 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmv1r.v v0, v10 @@ -333,7 +333,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI15_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI15_0)(a1) -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmset.m v12 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmv1r.v v0, v12 @@ -377,7 +377,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI17_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI17_0)(a1) -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfabs.v v9, v8, v0.t @@ -421,7 +421,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI19_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI19_0)(a1) -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmset.m v10 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmv1r.v v0, v10 @@ -467,7 +467,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI21_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI21_0)(a1) -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmset.m v12 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmv1r.v v0, v12 @@ -513,7 +513,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI23_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI23_0)(a1) -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmset.m v16 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v16 @@ -559,7 +559,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI25_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI25_0)(a1) -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmset.m v16 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v16 @@ -585,7 +585,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v1, v0 ; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: addi a2, a0, -16 ; CHECK-NEXT: vslidedown.vi v2, v0, 2 ; CHECK-NEXT: bltu a0, a2, .LBB26_2 @@ -645,7 +645,7 @@ ; CHECK-LABEL: vp_ceil_v32f64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: addi a2, a0, -16 ; CHECK-NEXT: vmset.m v1 ; CHECK-NEXT: bltu a0, a2, .LBB27_2 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll @@ -13,7 +13,7 @@ define void @ctlz_v16i8(<16 x i8>* %x, <16 x i8>* %y) nounwind { ; CHECK-LABEL: ctlz_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsrl.vi v9, v8, 1 ; CHECK-NEXT: vor.vv v8, v8, v9 @@ -39,13 +39,13 @@ ; ; LMULMAX8-LABEL: ctlz_v16i8: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; LMULMAX8-NEXT: vle8.v v8, (a0) ; LMULMAX8-NEXT: vzext.vf4 v12, v8 ; LMULMAX8-NEXT: vfcvt.f.xu.v v12, v12 -; LMULMAX8-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; LMULMAX8-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; LMULMAX8-NEXT: vnsrl.wi v10, v12, 23 -; LMULMAX8-NEXT: vsetvli zero, zero, e8, m1, ta, mu +; LMULMAX8-NEXT: vsetvli zero, zero, e8, m1, ta, ma ; LMULMAX8-NEXT: vnsrl.wi v9, v10, 0 ; LMULMAX8-NEXT: li a1, 134 ; LMULMAX8-NEXT: vmseq.vi v0, v8, 0 @@ -64,7 +64,7 @@ define void @ctlz_v8i16(<8 x i16>* %x, <8 x i16>* %y) nounwind { ; LMULMAX2-RV32I-LABEL: ctlz_v8i16: ; LMULMAX2-RV32I: # %bb.0: -; LMULMAX2-RV32I-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX2-RV32I-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX2-RV32I-NEXT: vle16.v v8, (a0) ; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v9 @@ -99,7 +99,7 @@ ; ; LMULMAX2-RV64I-LABEL: ctlz_v8i16: ; LMULMAX2-RV64I: # %bb.0: -; LMULMAX2-RV64I-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX2-RV64I-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX2-RV64I-NEXT: vle16.v v8, (a0) ; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v9 @@ -134,7 +134,7 @@ ; ; LMULMAX1-RV32-LABEL: ctlz_v8i16: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV32-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v9 @@ -169,7 +169,7 @@ ; ; LMULMAX1-RV64-LABEL: ctlz_v8i16: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV64-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v9 @@ -204,7 +204,7 @@ ; ; LMULMAX2-RV32D-LABEL: ctlz_v8i16: ; LMULMAX2-RV32D: # %bb.0: -; LMULMAX2-RV32D-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX2-RV32D-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX2-RV32D-NEXT: vle16.v v8, (a0) ; LMULMAX2-RV32D-NEXT: vfwcvt.f.xu.v v10, v8 ; LMULMAX2-RV32D-NEXT: vnsrl.wi v9, v10, 23 @@ -218,7 +218,7 @@ ; ; LMULMAX2-RV64D-LABEL: ctlz_v8i16: ; LMULMAX2-RV64D: # %bb.0: -; LMULMAX2-RV64D-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX2-RV64D-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX2-RV64D-NEXT: vle16.v v8, (a0) ; LMULMAX2-RV64D-NEXT: vfwcvt.f.xu.v v10, v8 ; LMULMAX2-RV64D-NEXT: vnsrl.wi v9, v10, 23 @@ -232,7 +232,7 @@ ; ; LMULMAX8-LABEL: ctlz_v8i16: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX8-NEXT: vle16.v v8, (a0) ; LMULMAX8-NEXT: vfwcvt.f.xu.v v10, v8 ; LMULMAX8-NEXT: vnsrl.wi v9, v10, 23 @@ -254,7 +254,7 @@ define void @ctlz_v4i32(<4 x i32>* %x, <4 x i32>* %y) nounwind { ; LMULMAX2-RV32I-LABEL: ctlz_v4i32: ; LMULMAX2-RV32I: # %bb.0: -; LMULMAX2-RV32I-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX2-RV32I-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX2-RV32I-NEXT: vle32.v v8, (a0) ; LMULMAX2-RV32I-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX2-RV32I-NEXT: vor.vv v8, v8, v9 @@ -292,7 +292,7 @@ ; ; LMULMAX2-RV64I-LABEL: ctlz_v4i32: ; LMULMAX2-RV64I: # %bb.0: -; LMULMAX2-RV64I-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX2-RV64I-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX2-RV64I-NEXT: vle32.v v8, (a0) ; LMULMAX2-RV64I-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX2-RV64I-NEXT: vor.vv v8, v8, v9 @@ -330,7 +330,7 @@ ; ; LMULMAX1-RV32-LABEL: ctlz_v4i32: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV32-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v9 @@ -368,7 +368,7 @@ ; ; LMULMAX1-RV64-LABEL: ctlz_v4i32: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV64-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v9 @@ -406,7 +406,7 @@ ; ; LMULMAX2-RV32D-LABEL: ctlz_v4i32: ; LMULMAX2-RV32D: # %bb.0: -; LMULMAX2-RV32D-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX2-RV32D-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX2-RV32D-NEXT: vle32.v v8, (a0) ; LMULMAX2-RV32D-NEXT: vfwcvt.f.xu.v v10, v8 ; LMULMAX2-RV32D-NEXT: li a1, 52 @@ -421,7 +421,7 @@ ; ; LMULMAX2-RV64D-LABEL: ctlz_v4i32: ; LMULMAX2-RV64D: # %bb.0: -; LMULMAX2-RV64D-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX2-RV64D-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX2-RV64D-NEXT: vle32.v v8, (a0) ; LMULMAX2-RV64D-NEXT: vfwcvt.f.xu.v v10, v8 ; LMULMAX2-RV64D-NEXT: li a1, 52 @@ -436,7 +436,7 @@ ; ; LMULMAX8-LABEL: ctlz_v4i32: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX8-NEXT: vle32.v v8, (a0) ; LMULMAX8-NEXT: vfwcvt.f.xu.v v10, v8 ; LMULMAX8-NEXT: li a1, 52 @@ -459,7 +459,7 @@ define void @ctlz_v2i64(<2 x i64>* %x, <2 x i64>* %y) nounwind { ; LMULMAX2-RV32-LABEL: ctlz_v2i64: ; LMULMAX2-RV32: # %bb.0: -; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX2-RV32-NEXT: vle64.v v8, (a0) ; LMULMAX2-RV32-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX2-RV32-NEXT: vor.vv v8, v8, v9 @@ -474,23 +474,23 @@ ; LMULMAX2-RV32-NEXT: li a1, 32 ; LMULMAX2-RV32-NEXT: vsrl.vx v9, v8, a1 ; LMULMAX2-RV32-NEXT: vor.vv v8, v8, v9 -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX2-RV32-NEXT: vmv.v.i v9, -1 -; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX2-RV32-NEXT: vxor.vv v8, v8, v9 ; LMULMAX2-RV32-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX2-RV32-NEXT: lui a1, 349525 ; LMULMAX2-RV32-NEXT: addi a1, a1, 1365 -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX2-RV32-NEXT: vmv.v.x v10, a1 -; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX2-RV32-NEXT: vand.vv v9, v9, v10 ; LMULMAX2-RV32-NEXT: vsub.vv v8, v8, v9 ; LMULMAX2-RV32-NEXT: lui a1, 209715 ; LMULMAX2-RV32-NEXT: addi a1, a1, 819 -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX2-RV32-NEXT: vmv.v.x v9, a1 -; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX2-RV32-NEXT: vand.vv v10, v8, v9 ; LMULMAX2-RV32-NEXT: vsrl.vi v8, v8, 2 ; LMULMAX2-RV32-NEXT: vand.vv v8, v8, v9 @@ -499,15 +499,15 @@ ; LMULMAX2-RV32-NEXT: vadd.vv v8, v8, v9 ; LMULMAX2-RV32-NEXT: lui a1, 61681 ; LMULMAX2-RV32-NEXT: addi a1, a1, -241 -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX2-RV32-NEXT: vmv.v.x v9, a1 -; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX2-RV32-NEXT: vand.vv v8, v8, v9 ; LMULMAX2-RV32-NEXT: lui a1, 4112 ; LMULMAX2-RV32-NEXT: addi a1, a1, 257 -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX2-RV32-NEXT: vmv.v.x v9, a1 -; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX2-RV32-NEXT: vmul.vv v8, v8, v9 ; LMULMAX2-RV32-NEXT: li a1, 56 ; LMULMAX2-RV32-NEXT: vsrl.vx v8, v8, a1 @@ -516,7 +516,7 @@ ; ; LMULMAX2-RV64-LABEL: ctlz_v2i64: ; LMULMAX2-RV64: # %bb.0: -; LMULMAX2-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX2-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX2-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX2-RV64-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v9 @@ -558,7 +558,7 @@ ; ; LMULMAX1-RV32-LABEL: ctlz_v2i64: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV32-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v9 @@ -573,23 +573,23 @@ ; LMULMAX1-RV32-NEXT: li a1, 32 ; LMULMAX1-RV32-NEXT: vsrl.vx v9, v8, a1 ; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v9 -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.i v9, -1 -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vxor.vv v8, v8, v9 ; LMULMAX1-RV32-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX1-RV32-NEXT: lui a1, 349525 ; LMULMAX1-RV32-NEXT: addi a1, a1, 1365 -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.x v10, a1 -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vand.vv v9, v9, v10 ; LMULMAX1-RV32-NEXT: vsub.vv v8, v8, v9 ; LMULMAX1-RV32-NEXT: lui a1, 209715 ; LMULMAX1-RV32-NEXT: addi a1, a1, 819 -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.x v9, a1 -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vand.vv v10, v8, v9 ; LMULMAX1-RV32-NEXT: vsrl.vi v8, v8, 2 ; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v9 @@ -598,15 +598,15 @@ ; LMULMAX1-RV32-NEXT: vadd.vv v8, v8, v9 ; LMULMAX1-RV32-NEXT: lui a1, 61681 ; LMULMAX1-RV32-NEXT: addi a1, a1, -241 -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.x v9, a1 -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v9 ; LMULMAX1-RV32-NEXT: lui a1, 4112 ; LMULMAX1-RV32-NEXT: addi a1, a1, 257 -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.x v9, a1 -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmul.vv v8, v8, v9 ; LMULMAX1-RV32-NEXT: li a1, 56 ; LMULMAX1-RV32-NEXT: vsrl.vx v8, v8, a1 @@ -615,7 +615,7 @@ ; ; LMULMAX1-RV64-LABEL: ctlz_v2i64: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV64-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX1-RV64-NEXT: vor.vv v8, v8, v9 @@ -657,7 +657,7 @@ ; ; LMULMAX8-RV32-LABEL: ctlz_v2i64: ; LMULMAX8-RV32: # %bb.0: -; LMULMAX8-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX8-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX8-RV32-NEXT: vle64.v v8, (a0) ; LMULMAX8-RV32-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX8-RV32-NEXT: vor.vv v8, v8, v9 @@ -672,23 +672,23 @@ ; LMULMAX8-RV32-NEXT: li a1, 32 ; LMULMAX8-RV32-NEXT: vsrl.vx v9, v8, a1 ; LMULMAX8-RV32-NEXT: vor.vv v8, v8, v9 -; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX8-RV32-NEXT: vmv.v.i v9, -1 -; LMULMAX8-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX8-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX8-RV32-NEXT: vxor.vv v8, v8, v9 ; LMULMAX8-RV32-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX8-RV32-NEXT: lui a1, 349525 ; LMULMAX8-RV32-NEXT: addi a1, a1, 1365 -; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX8-RV32-NEXT: vmv.v.x v10, a1 -; LMULMAX8-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX8-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX8-RV32-NEXT: vand.vv v9, v9, v10 ; LMULMAX8-RV32-NEXT: vsub.vv v8, v8, v9 ; LMULMAX8-RV32-NEXT: lui a1, 209715 ; LMULMAX8-RV32-NEXT: addi a1, a1, 819 -; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX8-RV32-NEXT: vmv.v.x v9, a1 -; LMULMAX8-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX8-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX8-RV32-NEXT: vand.vv v10, v8, v9 ; LMULMAX8-RV32-NEXT: vsrl.vi v8, v8, 2 ; LMULMAX8-RV32-NEXT: vand.vv v8, v8, v9 @@ -697,15 +697,15 @@ ; LMULMAX8-RV32-NEXT: vadd.vv v8, v8, v9 ; LMULMAX8-RV32-NEXT: lui a1, 61681 ; LMULMAX8-RV32-NEXT: addi a1, a1, -241 -; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX8-RV32-NEXT: vmv.v.x v9, a1 -; LMULMAX8-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX8-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX8-RV32-NEXT: vand.vv v8, v8, v9 ; LMULMAX8-RV32-NEXT: lui a1, 4112 ; LMULMAX8-RV32-NEXT: addi a1, a1, 257 -; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX8-RV32-NEXT: vmv.v.x v9, a1 -; LMULMAX8-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX8-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX8-RV32-NEXT: vmul.vv v8, v8, v9 ; LMULMAX8-RV32-NEXT: li a1, 56 ; LMULMAX8-RV32-NEXT: vsrl.vx v8, v8, a1 @@ -714,7 +714,7 @@ ; ; LMULMAX8-RV64-LABEL: ctlz_v2i64: ; LMULMAX8-RV64: # %bb.0: -; LMULMAX8-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX8-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX8-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX8-RV64-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX8-RV64-NEXT: vor.vv v8, v8, v9 @@ -765,7 +765,7 @@ ; LMULMAX2-LABEL: ctlz_v32i8: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: li a1, 32 -; LMULMAX2-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; LMULMAX2-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; LMULMAX2-NEXT: vle8.v v8, (a0) ; LMULMAX2-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX2-NEXT: vor.vv v8, v8, v10 @@ -791,7 +791,7 @@ ; ; LMULMAX1-LABEL: ctlz_v32i8: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-NEXT: addi a1, a0, 16 ; LMULMAX1-NEXT: vle8.v v8, (a1) ; LMULMAX1-NEXT: vle8.v v9, (a0) @@ -838,13 +838,13 @@ ; LMULMAX8-LABEL: ctlz_v32i8: ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: li a1, 32 -; LMULMAX8-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; LMULMAX8-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; LMULMAX8-NEXT: vle8.v v8, (a0) ; LMULMAX8-NEXT: vzext.vf4 v16, v8 ; LMULMAX8-NEXT: vfcvt.f.xu.v v16, v16 -; LMULMAX8-NEXT: vsetvli zero, zero, e16, m4, ta, mu +; LMULMAX8-NEXT: vsetvli zero, zero, e16, m4, ta, ma ; LMULMAX8-NEXT: vnsrl.wi v12, v16, 23 -; LMULMAX8-NEXT: vsetvli zero, zero, e8, m2, ta, mu +; LMULMAX8-NEXT: vsetvli zero, zero, e8, m2, ta, ma ; LMULMAX8-NEXT: vnsrl.wi v10, v12, 0 ; LMULMAX8-NEXT: li a1, 134 ; LMULMAX8-NEXT: vmseq.vi v0, v8, 0 @@ -863,7 +863,7 @@ define void @ctlz_v16i16(<16 x i16>* %x, <16 x i16>* %y) nounwind { ; LMULMAX2-RV32-LABEL: ctlz_v16i16: ; LMULMAX2-RV32: # %bb.0: -; LMULMAX2-RV32-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX2-RV32-NEXT: vle16.v v8, (a0) ; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX2-RV32-NEXT: vor.vv v8, v8, v10 @@ -898,7 +898,7 @@ ; ; LMULMAX2-RV64-LABEL: ctlz_v16i16: ; LMULMAX2-RV64: # %bb.0: -; LMULMAX2-RV64-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX2-RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX2-RV64-NEXT: vle16.v v8, (a0) ; LMULMAX2-RV64-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v10 @@ -933,7 +933,7 @@ ; ; LMULMAX1-RV32-LABEL: ctlz_v16i16: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV32-NEXT: addi a1, a0, 16 ; LMULMAX1-RV32-NEXT: vle16.v v8, (a1) ; LMULMAX1-RV32-NEXT: vle16.v v9, (a0) @@ -992,7 +992,7 @@ ; ; LMULMAX1-RV64-LABEL: ctlz_v16i16: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV64-NEXT: addi a1, a0, 16 ; LMULMAX1-RV64-NEXT: vle16.v v8, (a1) ; LMULMAX1-RV64-NEXT: vle16.v v9, (a0) @@ -1051,7 +1051,7 @@ ; ; LMULMAX8-LABEL: ctlz_v16i16: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX8-NEXT: vle16.v v8, (a0) ; LMULMAX8-NEXT: vfwcvt.f.xu.v v12, v8 ; LMULMAX8-NEXT: vnsrl.wi v10, v12, 23 @@ -1073,7 +1073,7 @@ define void @ctlz_v8i32(<8 x i32>* %x, <8 x i32>* %y) nounwind { ; LMULMAX2-RV32-LABEL: ctlz_v8i32: ; LMULMAX2-RV32: # %bb.0: -; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-RV32-NEXT: vle32.v v8, (a0) ; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX2-RV32-NEXT: vor.vv v8, v8, v10 @@ -1111,7 +1111,7 @@ ; ; LMULMAX2-RV64-LABEL: ctlz_v8i32: ; LMULMAX2-RV64: # %bb.0: -; LMULMAX2-RV64-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-RV64-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-RV64-NEXT: vle32.v v8, (a0) ; LMULMAX2-RV64-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v10 @@ -1149,7 +1149,7 @@ ; ; LMULMAX1-RV32-LABEL: ctlz_v8i32: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: addi a1, a0, 16 ; LMULMAX1-RV32-NEXT: vle32.v v8, (a1) ; LMULMAX1-RV32-NEXT: vle32.v v9, (a0) @@ -1213,7 +1213,7 @@ ; ; LMULMAX1-RV64-LABEL: ctlz_v8i32: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV64-NEXT: addi a1, a0, 16 ; LMULMAX1-RV64-NEXT: vle32.v v8, (a1) ; LMULMAX1-RV64-NEXT: vle32.v v9, (a0) @@ -1277,7 +1277,7 @@ ; ; LMULMAX8-LABEL: ctlz_v8i32: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX8-NEXT: vle32.v v8, (a0) ; LMULMAX8-NEXT: vfwcvt.f.xu.v v12, v8 ; LMULMAX8-NEXT: li a1, 52 @@ -1300,7 +1300,7 @@ define void @ctlz_v4i64(<4 x i64>* %x, <4 x i64>* %y) nounwind { ; LMULMAX2-RV32-LABEL: ctlz_v4i64: ; LMULMAX2-RV32: # %bb.0: -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV32-NEXT: vle64.v v8, (a0) ; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX2-RV32-NEXT: vor.vv v8, v8, v10 @@ -1315,23 +1315,23 @@ ; LMULMAX2-RV32-NEXT: li a1, 32 ; LMULMAX2-RV32-NEXT: vsrl.vx v10, v8, a1 ; LMULMAX2-RV32-NEXT: vor.vv v8, v8, v10 -; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-RV32-NEXT: vmv.v.i v10, -1 -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV32-NEXT: vxor.vv v8, v8, v10 ; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX2-RV32-NEXT: lui a1, 349525 ; LMULMAX2-RV32-NEXT: addi a1, a1, 1365 -; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-RV32-NEXT: vmv.v.x v12, a1 -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV32-NEXT: vand.vv v10, v10, v12 ; LMULMAX2-RV32-NEXT: vsub.vv v8, v8, v10 ; LMULMAX2-RV32-NEXT: lui a1, 209715 ; LMULMAX2-RV32-NEXT: addi a1, a1, 819 -; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-RV32-NEXT: vmv.v.x v10, a1 -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV32-NEXT: vand.vv v12, v8, v10 ; LMULMAX2-RV32-NEXT: vsrl.vi v8, v8, 2 ; LMULMAX2-RV32-NEXT: vand.vv v8, v8, v10 @@ -1340,15 +1340,15 @@ ; LMULMAX2-RV32-NEXT: vadd.vv v8, v8, v10 ; LMULMAX2-RV32-NEXT: lui a1, 61681 ; LMULMAX2-RV32-NEXT: addi a1, a1, -241 -; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-RV32-NEXT: vmv.v.x v10, a1 -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV32-NEXT: vand.vv v8, v8, v10 ; LMULMAX2-RV32-NEXT: lui a1, 4112 ; LMULMAX2-RV32-NEXT: addi a1, a1, 257 -; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-RV32-NEXT: vmv.v.x v10, a1 -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV32-NEXT: vmul.vv v8, v8, v10 ; LMULMAX2-RV32-NEXT: li a1, 56 ; LMULMAX2-RV32-NEXT: vsrl.vx v8, v8, a1 @@ -1357,7 +1357,7 @@ ; ; LMULMAX2-RV64-LABEL: ctlz_v4i64: ; LMULMAX2-RV64: # %bb.0: -; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX2-RV64-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX2-RV64-NEXT: vor.vv v8, v8, v10 @@ -1399,7 +1399,7 @@ ; ; LMULMAX1-RV32-LABEL: ctlz_v4i64: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: addi a1, a0, 16 ; LMULMAX1-RV32-NEXT: vle64.v v8, (a1) ; LMULMAX1-RV32-NEXT: vle64.v v9, (a0) @@ -1416,23 +1416,23 @@ ; LMULMAX1-RV32-NEXT: li a2, 32 ; LMULMAX1-RV32-NEXT: vsrl.vx v10, v8, a2 ; LMULMAX1-RV32-NEXT: vor.vv v8, v8, v10 -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.i v10, -1 -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vxor.vv v8, v8, v10 ; LMULMAX1-RV32-NEXT: vsrl.vi v11, v8, 1 ; LMULMAX1-RV32-NEXT: lui a3, 349525 ; LMULMAX1-RV32-NEXT: addi a3, a3, 1365 -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.x v12, a3 -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vand.vv v11, v11, v12 ; LMULMAX1-RV32-NEXT: vsub.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: lui a3, 209715 ; LMULMAX1-RV32-NEXT: addi a3, a3, 819 -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.x v11, a3 -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vand.vv v13, v8, v11 ; LMULMAX1-RV32-NEXT: vsrl.vi v8, v8, 2 ; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v11 @@ -1441,15 +1441,15 @@ ; LMULMAX1-RV32-NEXT: vadd.vv v8, v8, v13 ; LMULMAX1-RV32-NEXT: lui a3, 61681 ; LMULMAX1-RV32-NEXT: addi a3, a3, -241 -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.x v13, a3 -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v13 ; LMULMAX1-RV32-NEXT: lui a3, 4112 ; LMULMAX1-RV32-NEXT: addi a3, a3, 257 -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.x v14, a3 -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmul.vv v8, v8, v14 ; LMULMAX1-RV32-NEXT: li a3, 56 ; LMULMAX1-RV32-NEXT: vsrl.vx v8, v8, a3 @@ -1484,7 +1484,7 @@ ; ; LMULMAX1-RV64-LABEL: ctlz_v4i64: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV64-NEXT: addi a1, a0, 16 ; LMULMAX1-RV64-NEXT: vle64.v v8, (a1) ; LMULMAX1-RV64-NEXT: vle64.v v9, (a0) @@ -1554,7 +1554,7 @@ ; ; LMULMAX8-RV32-LABEL: ctlz_v4i64: ; LMULMAX8-RV32: # %bb.0: -; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX8-RV32-NEXT: vle64.v v8, (a0) ; LMULMAX8-RV32-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX8-RV32-NEXT: vor.vv v8, v8, v10 @@ -1569,23 +1569,23 @@ ; LMULMAX8-RV32-NEXT: li a1, 32 ; LMULMAX8-RV32-NEXT: vsrl.vx v10, v8, a1 ; LMULMAX8-RV32-NEXT: vor.vv v8, v8, v10 -; LMULMAX8-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX8-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX8-RV32-NEXT: vmv.v.i v10, -1 -; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX8-RV32-NEXT: vxor.vv v8, v8, v10 ; LMULMAX8-RV32-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX8-RV32-NEXT: lui a1, 349525 ; LMULMAX8-RV32-NEXT: addi a1, a1, 1365 -; LMULMAX8-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX8-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX8-RV32-NEXT: vmv.v.x v12, a1 -; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX8-RV32-NEXT: vand.vv v10, v10, v12 ; LMULMAX8-RV32-NEXT: vsub.vv v8, v8, v10 ; LMULMAX8-RV32-NEXT: lui a1, 209715 ; LMULMAX8-RV32-NEXT: addi a1, a1, 819 -; LMULMAX8-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX8-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX8-RV32-NEXT: vmv.v.x v10, a1 -; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX8-RV32-NEXT: vand.vv v12, v8, v10 ; LMULMAX8-RV32-NEXT: vsrl.vi v8, v8, 2 ; LMULMAX8-RV32-NEXT: vand.vv v8, v8, v10 @@ -1594,15 +1594,15 @@ ; LMULMAX8-RV32-NEXT: vadd.vv v8, v8, v10 ; LMULMAX8-RV32-NEXT: lui a1, 61681 ; LMULMAX8-RV32-NEXT: addi a1, a1, -241 -; LMULMAX8-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX8-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX8-RV32-NEXT: vmv.v.x v10, a1 -; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX8-RV32-NEXT: vand.vv v8, v8, v10 ; LMULMAX8-RV32-NEXT: lui a1, 4112 ; LMULMAX8-RV32-NEXT: addi a1, a1, 257 -; LMULMAX8-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX8-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX8-RV32-NEXT: vmv.v.x v10, a1 -; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX8-RV32-NEXT: vmul.vv v8, v8, v10 ; LMULMAX8-RV32-NEXT: li a1, 56 ; LMULMAX8-RV32-NEXT: vsrl.vx v8, v8, a1 @@ -1611,7 +1611,7 @@ ; ; LMULMAX8-RV64-LABEL: ctlz_v4i64: ; LMULMAX8-RV64: # %bb.0: -; LMULMAX8-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX8-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX8-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX8-RV64-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX8-RV64-NEXT: vor.vv v8, v8, v10 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop.ll @@ -7,7 +7,7 @@ define void @ctpop_v16i8(<16 x i8>* %x, <16 x i8>* %y) { ; CHECK-LABEL: ctpop_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsrl.vi v9, v8, 1 ; CHECK-NEXT: li a1, 85 @@ -34,7 +34,7 @@ define void @ctpop_v8i16(<8 x i16>* %x, <8 x i16>* %y) { ; LMULMAX2-RV32-LABEL: ctpop_v8i16: ; LMULMAX2-RV32: # %bb.0: -; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX2-RV32-NEXT: vle16.v v8, (a0) ; LMULMAX2-RV32-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX2-RV32-NEXT: lui a1, 5 @@ -60,7 +60,7 @@ ; ; LMULMAX2-RV64-LABEL: ctpop_v8i16: ; LMULMAX2-RV64: # %bb.0: -; LMULMAX2-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX2-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX2-RV64-NEXT: vle16.v v8, (a0) ; LMULMAX2-RV64-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX2-RV64-NEXT: lui a1, 5 @@ -86,7 +86,7 @@ ; ; LMULMAX1-RV32-LABEL: ctpop_v8i16: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV32-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX1-RV32-NEXT: lui a1, 5 @@ -112,7 +112,7 @@ ; ; LMULMAX1-RV64-LABEL: ctpop_v8i16: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV64-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX1-RV64-NEXT: lui a1, 5 @@ -146,7 +146,7 @@ define void @ctpop_v4i32(<4 x i32>* %x, <4 x i32>* %y) { ; LMULMAX2-RV32-LABEL: ctpop_v4i32: ; LMULMAX2-RV32: # %bb.0: -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX2-RV32-NEXT: vle32.v v8, (a0) ; LMULMAX2-RV32-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX2-RV32-NEXT: lui a1, 349525 @@ -173,7 +173,7 @@ ; ; LMULMAX2-RV64-LABEL: ctpop_v4i32: ; LMULMAX2-RV64: # %bb.0: -; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX2-RV64-NEXT: vle32.v v8, (a0) ; LMULMAX2-RV64-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX2-RV64-NEXT: lui a1, 349525 @@ -200,7 +200,7 @@ ; ; LMULMAX1-RV32-LABEL: ctpop_v4i32: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV32-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX1-RV32-NEXT: lui a1, 349525 @@ -227,7 +227,7 @@ ; ; LMULMAX1-RV64-LABEL: ctpop_v4i32: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV64-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX1-RV64-NEXT: lui a1, 349525 @@ -262,21 +262,21 @@ define void @ctpop_v2i64(<2 x i64>* %x, <2 x i64>* %y) { ; LMULMAX2-RV32-LABEL: ctpop_v2i64: ; LMULMAX2-RV32: # %bb.0: -; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX2-RV32-NEXT: vle64.v v8, (a0) ; LMULMAX2-RV32-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX2-RV32-NEXT: lui a1, 349525 ; LMULMAX2-RV32-NEXT: addi a1, a1, 1365 -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX2-RV32-NEXT: vmv.v.x v10, a1 -; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX2-RV32-NEXT: vand.vv v9, v9, v10 ; LMULMAX2-RV32-NEXT: vsub.vv v8, v8, v9 ; LMULMAX2-RV32-NEXT: lui a1, 209715 ; LMULMAX2-RV32-NEXT: addi a1, a1, 819 -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX2-RV32-NEXT: vmv.v.x v9, a1 -; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX2-RV32-NEXT: vand.vv v10, v8, v9 ; LMULMAX2-RV32-NEXT: vsrl.vi v8, v8, 2 ; LMULMAX2-RV32-NEXT: vand.vv v8, v8, v9 @@ -285,15 +285,15 @@ ; LMULMAX2-RV32-NEXT: vadd.vv v8, v8, v9 ; LMULMAX2-RV32-NEXT: lui a1, 61681 ; LMULMAX2-RV32-NEXT: addi a1, a1, -241 -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX2-RV32-NEXT: vmv.v.x v9, a1 -; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX2-RV32-NEXT: vand.vv v8, v8, v9 ; LMULMAX2-RV32-NEXT: lui a1, 4112 ; LMULMAX2-RV32-NEXT: addi a1, a1, 257 -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX2-RV32-NEXT: vmv.v.x v9, a1 -; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX2-RV32-NEXT: vmul.vv v8, v8, v9 ; LMULMAX2-RV32-NEXT: li a1, 56 ; LMULMAX2-RV32-NEXT: vsrl.vx v8, v8, a1 @@ -302,7 +302,7 @@ ; ; LMULMAX2-RV64-LABEL: ctpop_v2i64: ; LMULMAX2-RV64: # %bb.0: -; LMULMAX2-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX2-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX2-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX2-RV64-NEXT: lui a1, %hi(.LCPI3_0) ; LMULMAX2-RV64-NEXT: ld a1, %lo(.LCPI3_0)(a1) @@ -330,21 +330,21 @@ ; ; LMULMAX1-RV32-LABEL: ctpop_v2i64: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV32-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX1-RV32-NEXT: lui a1, 349525 ; LMULMAX1-RV32-NEXT: addi a1, a1, 1365 -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.x v10, a1 -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vand.vv v9, v9, v10 ; LMULMAX1-RV32-NEXT: vsub.vv v8, v8, v9 ; LMULMAX1-RV32-NEXT: lui a1, 209715 ; LMULMAX1-RV32-NEXT: addi a1, a1, 819 -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.x v9, a1 -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vand.vv v10, v8, v9 ; LMULMAX1-RV32-NEXT: vsrl.vi v8, v8, 2 ; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v9 @@ -353,15 +353,15 @@ ; LMULMAX1-RV32-NEXT: vadd.vv v8, v8, v9 ; LMULMAX1-RV32-NEXT: lui a1, 61681 ; LMULMAX1-RV32-NEXT: addi a1, a1, -241 -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.x v9, a1 -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v9 ; LMULMAX1-RV32-NEXT: lui a1, 4112 ; LMULMAX1-RV32-NEXT: addi a1, a1, 257 -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.x v9, a1 -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmul.vv v8, v8, v9 ; LMULMAX1-RV32-NEXT: li a1, 56 ; LMULMAX1-RV32-NEXT: vsrl.vx v8, v8, a1 @@ -370,7 +370,7 @@ ; ; LMULMAX1-RV64-LABEL: ctpop_v2i64: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV64-NEXT: lui a1, %hi(.LCPI3_0) ; LMULMAX1-RV64-NEXT: ld a1, %lo(.LCPI3_0)(a1) @@ -407,7 +407,7 @@ ; LMULMAX2-LABEL: ctpop_v32i8: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: li a1, 32 -; LMULMAX2-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; LMULMAX2-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; LMULMAX2-NEXT: vle8.v v8, (a0) ; LMULMAX2-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX2-NEXT: li a1, 85 @@ -426,7 +426,7 @@ ; ; LMULMAX1-LABEL: ctpop_v32i8: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-NEXT: addi a1, a0, 16 ; LMULMAX1-NEXT: vle8.v v8, (a1) ; LMULMAX1-NEXT: vle8.v v9, (a0) @@ -466,7 +466,7 @@ define void @ctpop_v16i16(<16 x i16>* %x, <16 x i16>* %y) { ; LMULMAX2-RV32-LABEL: ctpop_v16i16: ; LMULMAX2-RV32: # %bb.0: -; LMULMAX2-RV32-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX2-RV32-NEXT: vle16.v v8, (a0) ; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX2-RV32-NEXT: lui a1, 5 @@ -492,7 +492,7 @@ ; ; LMULMAX2-RV64-LABEL: ctpop_v16i16: ; LMULMAX2-RV64: # %bb.0: -; LMULMAX2-RV64-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX2-RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX2-RV64-NEXT: vle16.v v8, (a0) ; LMULMAX2-RV64-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX2-RV64-NEXT: lui a1, 5 @@ -518,7 +518,7 @@ ; ; LMULMAX1-RV32-LABEL: ctpop_v16i16: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV32-NEXT: addi a1, a0, 16 ; LMULMAX1-RV32-NEXT: vle16.v v8, (a1) ; LMULMAX1-RV32-NEXT: vle16.v v9, (a0) @@ -559,7 +559,7 @@ ; ; LMULMAX1-RV64-LABEL: ctpop_v16i16: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV64-NEXT: addi a1, a0, 16 ; LMULMAX1-RV64-NEXT: vle16.v v8, (a1) ; LMULMAX1-RV64-NEXT: vle16.v v9, (a0) @@ -608,7 +608,7 @@ define void @ctpop_v8i32(<8 x i32>* %x, <8 x i32>* %y) { ; LMULMAX2-RV32-LABEL: ctpop_v8i32: ; LMULMAX2-RV32: # %bb.0: -; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-RV32-NEXT: vle32.v v8, (a0) ; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX2-RV32-NEXT: lui a1, 349525 @@ -635,7 +635,7 @@ ; ; LMULMAX2-RV64-LABEL: ctpop_v8i32: ; LMULMAX2-RV64: # %bb.0: -; LMULMAX2-RV64-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-RV64-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-RV64-NEXT: vle32.v v8, (a0) ; LMULMAX2-RV64-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX2-RV64-NEXT: lui a1, 349525 @@ -662,7 +662,7 @@ ; ; LMULMAX1-RV32-LABEL: ctpop_v8i32: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: addi a1, a0, 16 ; LMULMAX1-RV32-NEXT: vle32.v v8, (a1) ; LMULMAX1-RV32-NEXT: vle32.v v9, (a0) @@ -704,7 +704,7 @@ ; ; LMULMAX1-RV64-LABEL: ctpop_v8i32: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV64-NEXT: addi a1, a0, 16 ; LMULMAX1-RV64-NEXT: vle32.v v8, (a1) ; LMULMAX1-RV64-NEXT: vle32.v v9, (a0) @@ -754,21 +754,21 @@ define void @ctpop_v4i64(<4 x i64>* %x, <4 x i64>* %y) { ; LMULMAX2-RV32-LABEL: ctpop_v4i64: ; LMULMAX2-RV32: # %bb.0: -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV32-NEXT: vle64.v v8, (a0) ; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX2-RV32-NEXT: lui a1, 349525 ; LMULMAX2-RV32-NEXT: addi a1, a1, 1365 -; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-RV32-NEXT: vmv.v.x v12, a1 -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV32-NEXT: vand.vv v10, v10, v12 ; LMULMAX2-RV32-NEXT: vsub.vv v8, v8, v10 ; LMULMAX2-RV32-NEXT: lui a1, 209715 ; LMULMAX2-RV32-NEXT: addi a1, a1, 819 -; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-RV32-NEXT: vmv.v.x v10, a1 -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV32-NEXT: vand.vv v12, v8, v10 ; LMULMAX2-RV32-NEXT: vsrl.vi v8, v8, 2 ; LMULMAX2-RV32-NEXT: vand.vv v8, v8, v10 @@ -777,15 +777,15 @@ ; LMULMAX2-RV32-NEXT: vadd.vv v8, v8, v10 ; LMULMAX2-RV32-NEXT: lui a1, 61681 ; LMULMAX2-RV32-NEXT: addi a1, a1, -241 -; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-RV32-NEXT: vmv.v.x v10, a1 -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV32-NEXT: vand.vv v8, v8, v10 ; LMULMAX2-RV32-NEXT: lui a1, 4112 ; LMULMAX2-RV32-NEXT: addi a1, a1, 257 -; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-RV32-NEXT: vmv.v.x v10, a1 -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV32-NEXT: vmul.vv v8, v8, v10 ; LMULMAX2-RV32-NEXT: li a1, 56 ; LMULMAX2-RV32-NEXT: vsrl.vx v8, v8, a1 @@ -794,7 +794,7 @@ ; ; LMULMAX2-RV64-LABEL: ctpop_v4i64: ; LMULMAX2-RV64: # %bb.0: -; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX2-RV64-NEXT: lui a1, %hi(.LCPI7_0) ; LMULMAX2-RV64-NEXT: ld a1, %lo(.LCPI7_0)(a1) @@ -822,23 +822,23 @@ ; ; LMULMAX1-RV32-LABEL: ctpop_v4i64: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: addi a1, a0, 16 ; LMULMAX1-RV32-NEXT: vle64.v v8, (a1) ; LMULMAX1-RV32-NEXT: vle64.v v9, (a0) ; LMULMAX1-RV32-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX1-RV32-NEXT: lui a2, 349525 ; LMULMAX1-RV32-NEXT: addi a2, a2, 1365 -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.x v11, a2 -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vand.vv v10, v10, v11 ; LMULMAX1-RV32-NEXT: vsub.vv v8, v8, v10 ; LMULMAX1-RV32-NEXT: lui a2, 209715 ; LMULMAX1-RV32-NEXT: addi a2, a2, 819 -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.x v10, a2 -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vand.vv v12, v8, v10 ; LMULMAX1-RV32-NEXT: vsrl.vi v8, v8, 2 ; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v10 @@ -847,15 +847,15 @@ ; LMULMAX1-RV32-NEXT: vadd.vv v8, v8, v12 ; LMULMAX1-RV32-NEXT: lui a2, 61681 ; LMULMAX1-RV32-NEXT: addi a2, a2, -241 -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.x v12, a2 -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v12 ; LMULMAX1-RV32-NEXT: lui a2, 4112 ; LMULMAX1-RV32-NEXT: addi a2, a2, 257 -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.x v13, a2 -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmul.vv v8, v8, v13 ; LMULMAX1-RV32-NEXT: li a2, 56 ; LMULMAX1-RV32-NEXT: vsrl.vx v8, v8, a2 @@ -877,7 +877,7 @@ ; ; LMULMAX1-RV64-LABEL: ctpop_v4i64: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a1, a0, 16 ; LMULMAX1-RV64-NEXT: vle64.v v9, (a1) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll @@ -13,7 +13,7 @@ define void @cttz_v16i8(<16 x i8>* %x, <16 x i8>* %y) nounwind { ; CHECK-LABEL: cttz_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a1, 1 ; CHECK-NEXT: vsub.vx v9, v8, a1 @@ -36,16 +36,16 @@ ; ; LMULMAX8-LABEL: cttz_v16i8: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX8-NEXT: vle8.v v8, (a0) ; LMULMAX8-NEXT: vrsub.vi v9, v8, 0 ; LMULMAX8-NEXT: vand.vv v9, v8, v9 -; LMULMAX8-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; LMULMAX8-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; LMULMAX8-NEXT: vzext.vf4 v12, v9 ; LMULMAX8-NEXT: vfcvt.f.xu.v v12, v12 -; LMULMAX8-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; LMULMAX8-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; LMULMAX8-NEXT: vnsrl.wi v10, v12, 23 -; LMULMAX8-NEXT: vsetvli zero, zero, e8, m1, ta, mu +; LMULMAX8-NEXT: vsetvli zero, zero, e8, m1, ta, ma ; LMULMAX8-NEXT: vnsrl.wi v9, v10, 0 ; LMULMAX8-NEXT: li a1, 127 ; LMULMAX8-NEXT: vmseq.vi v0, v8, 0 @@ -64,7 +64,7 @@ define void @cttz_v8i16(<8 x i16>* %x, <8 x i16>* %y) nounwind { ; LMULMAX2-RV32I-LABEL: cttz_v8i16: ; LMULMAX2-RV32I: # %bb.0: -; LMULMAX2-RV32I-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX2-RV32I-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX2-RV32I-NEXT: vle16.v v8, (a0) ; LMULMAX2-RV32I-NEXT: li a1, 1 ; LMULMAX2-RV32I-NEXT: vsub.vx v9, v8, a1 @@ -94,7 +94,7 @@ ; ; LMULMAX2-RV64I-LABEL: cttz_v8i16: ; LMULMAX2-RV64I: # %bb.0: -; LMULMAX2-RV64I-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX2-RV64I-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX2-RV64I-NEXT: vle16.v v8, (a0) ; LMULMAX2-RV64I-NEXT: li a1, 1 ; LMULMAX2-RV64I-NEXT: vsub.vx v9, v8, a1 @@ -124,7 +124,7 @@ ; ; LMULMAX1-RV32-LABEL: cttz_v8i16: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV32-NEXT: li a1, 1 ; LMULMAX1-RV32-NEXT: vsub.vx v9, v8, a1 @@ -154,7 +154,7 @@ ; ; LMULMAX1-RV64-LABEL: cttz_v8i16: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV64-NEXT: li a1, 1 ; LMULMAX1-RV64-NEXT: vsub.vx v9, v8, a1 @@ -184,7 +184,7 @@ ; ; LMULMAX2-RV32D-LABEL: cttz_v8i16: ; LMULMAX2-RV32D: # %bb.0: -; LMULMAX2-RV32D-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX2-RV32D-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX2-RV32D-NEXT: vle16.v v8, (a0) ; LMULMAX2-RV32D-NEXT: vrsub.vi v9, v8, 0 ; LMULMAX2-RV32D-NEXT: vand.vv v9, v8, v9 @@ -200,7 +200,7 @@ ; ; LMULMAX2-RV64D-LABEL: cttz_v8i16: ; LMULMAX2-RV64D: # %bb.0: -; LMULMAX2-RV64D-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX2-RV64D-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX2-RV64D-NEXT: vle16.v v8, (a0) ; LMULMAX2-RV64D-NEXT: vrsub.vi v9, v8, 0 ; LMULMAX2-RV64D-NEXT: vand.vv v9, v8, v9 @@ -216,7 +216,7 @@ ; ; LMULMAX8-LABEL: cttz_v8i16: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX8-NEXT: vle16.v v8, (a0) ; LMULMAX8-NEXT: vrsub.vi v9, v8, 0 ; LMULMAX8-NEXT: vand.vv v9, v8, v9 @@ -240,7 +240,7 @@ define void @cttz_v4i32(<4 x i32>* %x, <4 x i32>* %y) nounwind { ; LMULMAX2-RV32I-LABEL: cttz_v4i32: ; LMULMAX2-RV32I: # %bb.0: -; LMULMAX2-RV32I-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX2-RV32I-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX2-RV32I-NEXT: vle32.v v8, (a0) ; LMULMAX2-RV32I-NEXT: li a1, 1 ; LMULMAX2-RV32I-NEXT: vsub.vx v9, v8, a1 @@ -271,7 +271,7 @@ ; ; LMULMAX2-RV64I-LABEL: cttz_v4i32: ; LMULMAX2-RV64I: # %bb.0: -; LMULMAX2-RV64I-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX2-RV64I-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX2-RV64I-NEXT: vle32.v v8, (a0) ; LMULMAX2-RV64I-NEXT: li a1, 1 ; LMULMAX2-RV64I-NEXT: vsub.vx v9, v8, a1 @@ -302,7 +302,7 @@ ; ; LMULMAX1-RV32-LABEL: cttz_v4i32: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV32-NEXT: li a1, 1 ; LMULMAX1-RV32-NEXT: vsub.vx v9, v8, a1 @@ -333,7 +333,7 @@ ; ; LMULMAX1-RV64-LABEL: cttz_v4i32: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV64-NEXT: li a1, 1 ; LMULMAX1-RV64-NEXT: vsub.vx v9, v8, a1 @@ -364,7 +364,7 @@ ; ; LMULMAX2-RV32D-LABEL: cttz_v4i32: ; LMULMAX2-RV32D: # %bb.0: -; LMULMAX2-RV32D-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX2-RV32D-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX2-RV32D-NEXT: vle32.v v8, (a0) ; LMULMAX2-RV32D-NEXT: vrsub.vi v9, v8, 0 ; LMULMAX2-RV32D-NEXT: vand.vv v9, v8, v9 @@ -381,7 +381,7 @@ ; ; LMULMAX2-RV64D-LABEL: cttz_v4i32: ; LMULMAX2-RV64D: # %bb.0: -; LMULMAX2-RV64D-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX2-RV64D-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX2-RV64D-NEXT: vle32.v v8, (a0) ; LMULMAX2-RV64D-NEXT: vrsub.vi v9, v8, 0 ; LMULMAX2-RV64D-NEXT: vand.vv v9, v8, v9 @@ -398,7 +398,7 @@ ; ; LMULMAX8-LABEL: cttz_v4i32: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX8-NEXT: vle32.v v8, (a0) ; LMULMAX8-NEXT: vrsub.vi v9, v8, 0 ; LMULMAX8-NEXT: vand.vv v9, v8, v9 @@ -423,28 +423,28 @@ define void @cttz_v2i64(<2 x i64>* %x, <2 x i64>* %y) nounwind { ; LMULMAX2-RV32-LABEL: cttz_v2i64: ; LMULMAX2-RV32: # %bb.0: -; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX2-RV32-NEXT: vle64.v v8, (a0) ; LMULMAX2-RV32-NEXT: li a1, 1 ; LMULMAX2-RV32-NEXT: vsub.vx v9, v8, a1 -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX2-RV32-NEXT: vmv.v.i v10, -1 -; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX2-RV32-NEXT: vxor.vv v8, v8, v10 ; LMULMAX2-RV32-NEXT: vand.vv v8, v8, v9 ; LMULMAX2-RV32-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX2-RV32-NEXT: lui a1, 349525 ; LMULMAX2-RV32-NEXT: addi a1, a1, 1365 -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX2-RV32-NEXT: vmv.v.x v10, a1 -; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX2-RV32-NEXT: vand.vv v9, v9, v10 ; LMULMAX2-RV32-NEXT: vsub.vv v8, v8, v9 ; LMULMAX2-RV32-NEXT: lui a1, 209715 ; LMULMAX2-RV32-NEXT: addi a1, a1, 819 -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX2-RV32-NEXT: vmv.v.x v9, a1 -; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX2-RV32-NEXT: vand.vv v10, v8, v9 ; LMULMAX2-RV32-NEXT: vsrl.vi v8, v8, 2 ; LMULMAX2-RV32-NEXT: vand.vv v8, v8, v9 @@ -453,15 +453,15 @@ ; LMULMAX2-RV32-NEXT: vadd.vv v8, v8, v9 ; LMULMAX2-RV32-NEXT: lui a1, 61681 ; LMULMAX2-RV32-NEXT: addi a1, a1, -241 -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX2-RV32-NEXT: vmv.v.x v9, a1 -; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX2-RV32-NEXT: vand.vv v8, v8, v9 ; LMULMAX2-RV32-NEXT: lui a1, 4112 ; LMULMAX2-RV32-NEXT: addi a1, a1, 257 -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX2-RV32-NEXT: vmv.v.x v9, a1 -; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX2-RV32-NEXT: vmul.vv v8, v8, v9 ; LMULMAX2-RV32-NEXT: li a1, 56 ; LMULMAX2-RV32-NEXT: vsrl.vx v8, v8, a1 @@ -470,7 +470,7 @@ ; ; LMULMAX2-RV64-LABEL: cttz_v2i64: ; LMULMAX2-RV64: # %bb.0: -; LMULMAX2-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX2-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX2-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX2-RV64-NEXT: li a1, 1 ; LMULMAX2-RV64-NEXT: vsub.vx v9, v8, a1 @@ -502,28 +502,28 @@ ; ; LMULMAX1-RV32-LABEL: cttz_v2i64: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV32-NEXT: li a1, 1 ; LMULMAX1-RV32-NEXT: vsub.vx v9, v8, a1 -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.i v10, -1 -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vxor.vv v8, v8, v10 ; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v9 ; LMULMAX1-RV32-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX1-RV32-NEXT: lui a1, 349525 ; LMULMAX1-RV32-NEXT: addi a1, a1, 1365 -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.x v10, a1 -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vand.vv v9, v9, v10 ; LMULMAX1-RV32-NEXT: vsub.vv v8, v8, v9 ; LMULMAX1-RV32-NEXT: lui a1, 209715 ; LMULMAX1-RV32-NEXT: addi a1, a1, 819 -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.x v9, a1 -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vand.vv v10, v8, v9 ; LMULMAX1-RV32-NEXT: vsrl.vi v8, v8, 2 ; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v9 @@ -532,15 +532,15 @@ ; LMULMAX1-RV32-NEXT: vadd.vv v8, v8, v9 ; LMULMAX1-RV32-NEXT: lui a1, 61681 ; LMULMAX1-RV32-NEXT: addi a1, a1, -241 -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.x v9, a1 -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v9 ; LMULMAX1-RV32-NEXT: lui a1, 4112 ; LMULMAX1-RV32-NEXT: addi a1, a1, 257 -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.x v9, a1 -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmul.vv v8, v8, v9 ; LMULMAX1-RV32-NEXT: li a1, 56 ; LMULMAX1-RV32-NEXT: vsrl.vx v8, v8, a1 @@ -549,7 +549,7 @@ ; ; LMULMAX1-RV64-LABEL: cttz_v2i64: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV64-NEXT: li a1, 1 ; LMULMAX1-RV64-NEXT: vsub.vx v9, v8, a1 @@ -581,28 +581,28 @@ ; ; LMULMAX8-RV32-LABEL: cttz_v2i64: ; LMULMAX8-RV32: # %bb.0: -; LMULMAX8-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX8-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX8-RV32-NEXT: vle64.v v8, (a0) ; LMULMAX8-RV32-NEXT: li a1, 1 ; LMULMAX8-RV32-NEXT: vsub.vx v9, v8, a1 -; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX8-RV32-NEXT: vmv.v.i v10, -1 -; LMULMAX8-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX8-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX8-RV32-NEXT: vxor.vv v8, v8, v10 ; LMULMAX8-RV32-NEXT: vand.vv v8, v8, v9 ; LMULMAX8-RV32-NEXT: vsrl.vi v9, v8, 1 ; LMULMAX8-RV32-NEXT: lui a1, 349525 ; LMULMAX8-RV32-NEXT: addi a1, a1, 1365 -; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX8-RV32-NEXT: vmv.v.x v10, a1 -; LMULMAX8-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX8-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX8-RV32-NEXT: vand.vv v9, v9, v10 ; LMULMAX8-RV32-NEXT: vsub.vv v8, v8, v9 ; LMULMAX8-RV32-NEXT: lui a1, 209715 ; LMULMAX8-RV32-NEXT: addi a1, a1, 819 -; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX8-RV32-NEXT: vmv.v.x v9, a1 -; LMULMAX8-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX8-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX8-RV32-NEXT: vand.vv v10, v8, v9 ; LMULMAX8-RV32-NEXT: vsrl.vi v8, v8, 2 ; LMULMAX8-RV32-NEXT: vand.vv v8, v8, v9 @@ -611,15 +611,15 @@ ; LMULMAX8-RV32-NEXT: vadd.vv v8, v8, v9 ; LMULMAX8-RV32-NEXT: lui a1, 61681 ; LMULMAX8-RV32-NEXT: addi a1, a1, -241 -; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX8-RV32-NEXT: vmv.v.x v9, a1 -; LMULMAX8-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX8-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX8-RV32-NEXT: vand.vv v8, v8, v9 ; LMULMAX8-RV32-NEXT: lui a1, 4112 ; LMULMAX8-RV32-NEXT: addi a1, a1, 257 -; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX8-RV32-NEXT: vmv.v.x v9, a1 -; LMULMAX8-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX8-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX8-RV32-NEXT: vmul.vv v8, v8, v9 ; LMULMAX8-RV32-NEXT: li a1, 56 ; LMULMAX8-RV32-NEXT: vsrl.vx v8, v8, a1 @@ -628,7 +628,7 @@ ; ; LMULMAX8-RV64-LABEL: cttz_v2i64: ; LMULMAX8-RV64: # %bb.0: -; LMULMAX8-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX8-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX8-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX8-RV64-NEXT: li a1, 1 ; LMULMAX8-RV64-NEXT: vsub.vx v9, v8, a1 @@ -669,7 +669,7 @@ ; LMULMAX2-LABEL: cttz_v32i8: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: li a1, 32 -; LMULMAX2-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; LMULMAX2-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; LMULMAX2-NEXT: vle8.v v8, (a0) ; LMULMAX2-NEXT: li a1, 1 ; LMULMAX2-NEXT: vsub.vx v10, v8, a1 @@ -692,7 +692,7 @@ ; ; LMULMAX1-LABEL: cttz_v32i8: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-NEXT: addi a1, a0, 16 ; LMULMAX1-NEXT: vle8.v v8, (a1) ; LMULMAX1-NEXT: vle8.v v9, (a0) @@ -732,16 +732,16 @@ ; LMULMAX8-LABEL: cttz_v32i8: ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: li a1, 32 -; LMULMAX8-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; LMULMAX8-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; LMULMAX8-NEXT: vle8.v v8, (a0) ; LMULMAX8-NEXT: vrsub.vi v10, v8, 0 ; LMULMAX8-NEXT: vand.vv v10, v8, v10 -; LMULMAX8-NEXT: vsetvli zero, zero, e32, m8, ta, mu +; LMULMAX8-NEXT: vsetvli zero, zero, e32, m8, ta, ma ; LMULMAX8-NEXT: vzext.vf4 v16, v10 ; LMULMAX8-NEXT: vfcvt.f.xu.v v16, v16 -; LMULMAX8-NEXT: vsetvli zero, zero, e16, m4, ta, mu +; LMULMAX8-NEXT: vsetvli zero, zero, e16, m4, ta, ma ; LMULMAX8-NEXT: vnsrl.wi v12, v16, 23 -; LMULMAX8-NEXT: vsetvli zero, zero, e8, m2, ta, mu +; LMULMAX8-NEXT: vsetvli zero, zero, e8, m2, ta, ma ; LMULMAX8-NEXT: vnsrl.wi v10, v12, 0 ; LMULMAX8-NEXT: li a1, 127 ; LMULMAX8-NEXT: vmseq.vi v0, v8, 0 @@ -760,7 +760,7 @@ define void @cttz_v16i16(<16 x i16>* %x, <16 x i16>* %y) nounwind { ; LMULMAX2-RV32-LABEL: cttz_v16i16: ; LMULMAX2-RV32: # %bb.0: -; LMULMAX2-RV32-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX2-RV32-NEXT: vle16.v v8, (a0) ; LMULMAX2-RV32-NEXT: li a1, 1 ; LMULMAX2-RV32-NEXT: vsub.vx v10, v8, a1 @@ -790,7 +790,7 @@ ; ; LMULMAX2-RV64-LABEL: cttz_v16i16: ; LMULMAX2-RV64: # %bb.0: -; LMULMAX2-RV64-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX2-RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX2-RV64-NEXT: vle16.v v8, (a0) ; LMULMAX2-RV64-NEXT: li a1, 1 ; LMULMAX2-RV64-NEXT: vsub.vx v10, v8, a1 @@ -820,7 +820,7 @@ ; ; LMULMAX1-RV32-LABEL: cttz_v16i16: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV32-NEXT: addi a1, a0, 16 ; LMULMAX1-RV32-NEXT: vle16.v v8, (a1) ; LMULMAX1-RV32-NEXT: vle16.v v9, (a0) @@ -868,7 +868,7 @@ ; ; LMULMAX1-RV64-LABEL: cttz_v16i16: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV64-NEXT: addi a1, a0, 16 ; LMULMAX1-RV64-NEXT: vle16.v v8, (a1) ; LMULMAX1-RV64-NEXT: vle16.v v9, (a0) @@ -916,7 +916,7 @@ ; ; LMULMAX8-LABEL: cttz_v16i16: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX8-NEXT: vle16.v v8, (a0) ; LMULMAX8-NEXT: vrsub.vi v10, v8, 0 ; LMULMAX8-NEXT: vand.vv v10, v8, v10 @@ -940,7 +940,7 @@ define void @cttz_v8i32(<8 x i32>* %x, <8 x i32>* %y) nounwind { ; LMULMAX2-RV32-LABEL: cttz_v8i32: ; LMULMAX2-RV32: # %bb.0: -; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-RV32-NEXT: vle32.v v8, (a0) ; LMULMAX2-RV32-NEXT: li a1, 1 ; LMULMAX2-RV32-NEXT: vsub.vx v10, v8, a1 @@ -971,7 +971,7 @@ ; ; LMULMAX2-RV64-LABEL: cttz_v8i32: ; LMULMAX2-RV64: # %bb.0: -; LMULMAX2-RV64-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-RV64-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-RV64-NEXT: vle32.v v8, (a0) ; LMULMAX2-RV64-NEXT: li a1, 1 ; LMULMAX2-RV64-NEXT: vsub.vx v10, v8, a1 @@ -1002,7 +1002,7 @@ ; ; LMULMAX1-RV32-LABEL: cttz_v8i32: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: addi a1, a0, 16 ; LMULMAX1-RV32-NEXT: vle32.v v8, (a1) ; LMULMAX1-RV32-NEXT: vle32.v v9, (a0) @@ -1051,7 +1051,7 @@ ; ; LMULMAX1-RV64-LABEL: cttz_v8i32: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV64-NEXT: addi a1, a0, 16 ; LMULMAX1-RV64-NEXT: vle32.v v8, (a1) ; LMULMAX1-RV64-NEXT: vle32.v v9, (a0) @@ -1100,7 +1100,7 @@ ; ; LMULMAX8-LABEL: cttz_v8i32: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX8-NEXT: vle32.v v8, (a0) ; LMULMAX8-NEXT: vrsub.vi v10, v8, 0 ; LMULMAX8-NEXT: vand.vv v10, v8, v10 @@ -1125,28 +1125,28 @@ define void @cttz_v4i64(<4 x i64>* %x, <4 x i64>* %y) nounwind { ; LMULMAX2-RV32-LABEL: cttz_v4i64: ; LMULMAX2-RV32: # %bb.0: -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV32-NEXT: vle64.v v8, (a0) ; LMULMAX2-RV32-NEXT: li a1, 1 ; LMULMAX2-RV32-NEXT: vsub.vx v10, v8, a1 -; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-RV32-NEXT: vmv.v.i v12, -1 -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV32-NEXT: vxor.vv v8, v8, v12 ; LMULMAX2-RV32-NEXT: vand.vv v8, v8, v10 ; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX2-RV32-NEXT: lui a1, 349525 ; LMULMAX2-RV32-NEXT: addi a1, a1, 1365 -; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-RV32-NEXT: vmv.v.x v12, a1 -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV32-NEXT: vand.vv v10, v10, v12 ; LMULMAX2-RV32-NEXT: vsub.vv v8, v8, v10 ; LMULMAX2-RV32-NEXT: lui a1, 209715 ; LMULMAX2-RV32-NEXT: addi a1, a1, 819 -; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-RV32-NEXT: vmv.v.x v10, a1 -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV32-NEXT: vand.vv v12, v8, v10 ; LMULMAX2-RV32-NEXT: vsrl.vi v8, v8, 2 ; LMULMAX2-RV32-NEXT: vand.vv v8, v8, v10 @@ -1155,15 +1155,15 @@ ; LMULMAX2-RV32-NEXT: vadd.vv v8, v8, v10 ; LMULMAX2-RV32-NEXT: lui a1, 61681 ; LMULMAX2-RV32-NEXT: addi a1, a1, -241 -; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-RV32-NEXT: vmv.v.x v10, a1 -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV32-NEXT: vand.vv v8, v8, v10 ; LMULMAX2-RV32-NEXT: lui a1, 4112 ; LMULMAX2-RV32-NEXT: addi a1, a1, 257 -; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-RV32-NEXT: vmv.v.x v10, a1 -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV32-NEXT: vmul.vv v8, v8, v10 ; LMULMAX2-RV32-NEXT: li a1, 56 ; LMULMAX2-RV32-NEXT: vsrl.vx v8, v8, a1 @@ -1172,7 +1172,7 @@ ; ; LMULMAX2-RV64-LABEL: cttz_v4i64: ; LMULMAX2-RV64: # %bb.0: -; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX2-RV64-NEXT: li a1, 1 ; LMULMAX2-RV64-NEXT: vsub.vx v10, v8, a1 @@ -1204,30 +1204,30 @@ ; ; LMULMAX1-RV32-LABEL: cttz_v4i64: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: addi a1, a0, 16 ; LMULMAX1-RV32-NEXT: vle64.v v8, (a1) ; LMULMAX1-RV32-NEXT: vle64.v v9, (a0) ; LMULMAX1-RV32-NEXT: li a2, 1 ; LMULMAX1-RV32-NEXT: vsub.vx v10, v8, a2 -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.i v11, -1 -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vxor.vv v8, v8, v11 ; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v10 ; LMULMAX1-RV32-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX1-RV32-NEXT: lui a3, 349525 ; LMULMAX1-RV32-NEXT: addi a3, a3, 1365 -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.x v12, a3 -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vand.vv v10, v10, v12 ; LMULMAX1-RV32-NEXT: vsub.vv v8, v8, v10 ; LMULMAX1-RV32-NEXT: lui a3, 209715 ; LMULMAX1-RV32-NEXT: addi a3, a3, 819 -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.x v10, a3 -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vand.vv v13, v8, v10 ; LMULMAX1-RV32-NEXT: vsrl.vi v8, v8, 2 ; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v10 @@ -1236,15 +1236,15 @@ ; LMULMAX1-RV32-NEXT: vadd.vv v8, v8, v13 ; LMULMAX1-RV32-NEXT: lui a3, 61681 ; LMULMAX1-RV32-NEXT: addi a3, a3, -241 -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.x v13, a3 -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vand.vv v8, v8, v13 ; LMULMAX1-RV32-NEXT: lui a3, 4112 ; LMULMAX1-RV32-NEXT: addi a3, a3, 257 -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.x v14, a3 -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmul.vv v8, v8, v14 ; LMULMAX1-RV32-NEXT: li a3, 56 ; LMULMAX1-RV32-NEXT: vsrl.vx v8, v8, a3 @@ -1269,7 +1269,7 @@ ; ; LMULMAX1-RV64-LABEL: cttz_v4i64: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV64-NEXT: addi a1, a0, 16 ; LMULMAX1-RV64-NEXT: vle64.v v8, (a1) ; LMULMAX1-RV64-NEXT: vle64.v v9, (a0) @@ -1319,28 +1319,28 @@ ; ; LMULMAX8-RV32-LABEL: cttz_v4i64: ; LMULMAX8-RV32: # %bb.0: -; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX8-RV32-NEXT: vle64.v v8, (a0) ; LMULMAX8-RV32-NEXT: li a1, 1 ; LMULMAX8-RV32-NEXT: vsub.vx v10, v8, a1 -; LMULMAX8-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX8-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX8-RV32-NEXT: vmv.v.i v12, -1 -; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX8-RV32-NEXT: vxor.vv v8, v8, v12 ; LMULMAX8-RV32-NEXT: vand.vv v8, v8, v10 ; LMULMAX8-RV32-NEXT: vsrl.vi v10, v8, 1 ; LMULMAX8-RV32-NEXT: lui a1, 349525 ; LMULMAX8-RV32-NEXT: addi a1, a1, 1365 -; LMULMAX8-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX8-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX8-RV32-NEXT: vmv.v.x v12, a1 -; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX8-RV32-NEXT: vand.vv v10, v10, v12 ; LMULMAX8-RV32-NEXT: vsub.vv v8, v8, v10 ; LMULMAX8-RV32-NEXT: lui a1, 209715 ; LMULMAX8-RV32-NEXT: addi a1, a1, 819 -; LMULMAX8-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX8-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX8-RV32-NEXT: vmv.v.x v10, a1 -; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX8-RV32-NEXT: vand.vv v12, v8, v10 ; LMULMAX8-RV32-NEXT: vsrl.vi v8, v8, 2 ; LMULMAX8-RV32-NEXT: vand.vv v8, v8, v10 @@ -1349,15 +1349,15 @@ ; LMULMAX8-RV32-NEXT: vadd.vv v8, v8, v10 ; LMULMAX8-RV32-NEXT: lui a1, 61681 ; LMULMAX8-RV32-NEXT: addi a1, a1, -241 -; LMULMAX8-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX8-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX8-RV32-NEXT: vmv.v.x v10, a1 -; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX8-RV32-NEXT: vand.vv v8, v8, v10 ; LMULMAX8-RV32-NEXT: lui a1, 4112 ; LMULMAX8-RV32-NEXT: addi a1, a1, 257 -; LMULMAX8-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX8-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX8-RV32-NEXT: vmv.v.x v10, a1 -; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX8-RV32-NEXT: vmul.vv v8, v8, v10 ; LMULMAX8-RV32-NEXT: li a1, 56 ; LMULMAX8-RV32-NEXT: vsrl.vx v8, v8, a1 @@ -1366,7 +1366,7 @@ ; ; LMULMAX8-RV64-LABEL: cttz_v4i64: ; LMULMAX8-RV64: # %bb.0: -; LMULMAX8-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX8-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX8-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX8-RV64-NEXT: li a1, 1 ; LMULMAX8-RV64-NEXT: vsub.vx v10, v8, a1 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-elen.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-elen.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-elen.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-elen.ll @@ -9,7 +9,7 @@ define void @add_v4i32(<4 x i32>* %x, <4 x i32>* %y) { ; CHECK-LABEL: add_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vadd.vv v8, v8, v9 @@ -70,7 +70,7 @@ define void @add_v2i32(<2 x i32>* %x, <2 x i32>* %y) { ; CHECK-LABEL: add_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vadd.vv v8, v8, v9 @@ -117,7 +117,7 @@ define void @fadd_v4f32(<4 x float>* %x, <4 x float>* %y) { ; CHECK-LABEL: fadd_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vfadd.vv v8, v8, v9 @@ -154,7 +154,7 @@ define void @fadd_v2f32(<2 x float>* %x, <2 x float>* %y) { ; CHECK-LABEL: fadd_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vfadd.vv v8, v8, v9 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll @@ -7,7 +7,7 @@ define <2 x i16> @sextload_v2i1_v2i16(<2 x i1>* %x) { ; CHECK-LABEL: sextload_v2i1_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 @@ -20,7 +20,7 @@ define <2 x i16> @sextload_v2i8_v2i16(<2 x i8>* %x) { ; CHECK-LABEL: sextload_v2i8_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vsext.vf2 v8, v9 ; CHECK-NEXT: ret @@ -32,7 +32,7 @@ define <2 x i16> @zextload_v2i8_v2i16(<2 x i8>* %x) { ; CHECK-LABEL: zextload_v2i8_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vzext.vf2 v8, v9 ; CHECK-NEXT: ret @@ -44,7 +44,7 @@ define <2 x i32> @sextload_v2i8_v2i32(<2 x i8>* %x) { ; CHECK-LABEL: sextload_v2i8_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vsext.vf4 v8, v9 ; CHECK-NEXT: ret @@ -56,7 +56,7 @@ define <2 x i32> @zextload_v2i8_v2i32(<2 x i8>* %x) { ; CHECK-LABEL: zextload_v2i8_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vzext.vf4 v8, v9 ; CHECK-NEXT: ret @@ -68,7 +68,7 @@ define <2 x i64> @sextload_v2i8_v2i64(<2 x i8>* %x) { ; CHECK-LABEL: sextload_v2i8_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vsext.vf8 v8, v9 ; CHECK-NEXT: ret @@ -80,7 +80,7 @@ define <2 x i64> @zextload_v2i8_v2i64(<2 x i8>* %x) { ; CHECK-LABEL: zextload_v2i8_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vzext.vf8 v8, v9 ; CHECK-NEXT: ret @@ -92,7 +92,7 @@ define <4 x i16> @sextload_v4i8_v4i16(<4 x i8>* %x) { ; CHECK-LABEL: sextload_v4i8_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vsext.vf2 v8, v9 ; CHECK-NEXT: ret @@ -104,7 +104,7 @@ define <4 x i16> @zextload_v4i8_v4i16(<4 x i8>* %x) { ; CHECK-LABEL: zextload_v4i8_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vzext.vf2 v8, v9 ; CHECK-NEXT: ret @@ -116,7 +116,7 @@ define <4 x i32> @sextload_v4i8_v4i32(<4 x i8>* %x) { ; CHECK-LABEL: sextload_v4i8_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vsext.vf4 v8, v9 ; CHECK-NEXT: ret @@ -128,7 +128,7 @@ define <4 x i32> @zextload_v4i8_v4i32(<4 x i8>* %x) { ; CHECK-LABEL: zextload_v4i8_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vzext.vf4 v8, v9 ; CHECK-NEXT: ret @@ -140,18 +140,18 @@ define <4 x i64> @sextload_v4i8_v4i64(<4 x i8>* %x) { ; LMULMAX1-LABEL: sextload_v4i8_v4i64: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; LMULMAX1-NEXT: vle8.v v10, (a0) -; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v10, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vsext.vf8 v9, v8 ; LMULMAX1-NEXT: vsext.vf8 v8, v10 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: sextload_v4i8_v4i64: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX4-NEXT: vle8.v v10, (a0) ; LMULMAX4-NEXT: vsext.vf8 v8, v10 ; LMULMAX4-NEXT: ret @@ -163,18 +163,18 @@ define <4 x i64> @zextload_v4i8_v4i64(<4 x i8>* %x) { ; LMULMAX1-LABEL: zextload_v4i8_v4i64: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; LMULMAX1-NEXT: vle8.v v10, (a0) -; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v10, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vzext.vf8 v9, v8 ; LMULMAX1-NEXT: vzext.vf8 v8, v10 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: zextload_v4i8_v4i64: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX4-NEXT: vle8.v v10, (a0) ; LMULMAX4-NEXT: vzext.vf8 v8, v10 ; LMULMAX4-NEXT: ret @@ -186,7 +186,7 @@ define <8 x i16> @sextload_v8i8_v8i16(<8 x i8>* %x) { ; CHECK-LABEL: sextload_v8i8_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vsext.vf2 v8, v9 ; CHECK-NEXT: ret @@ -198,7 +198,7 @@ define <8 x i16> @zextload_v8i8_v8i16(<8 x i8>* %x) { ; CHECK-LABEL: zextload_v8i8_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vzext.vf2 v8, v9 ; CHECK-NEXT: ret @@ -210,18 +210,18 @@ define <8 x i32> @sextload_v8i8_v8i32(<8 x i8>* %x) { ; LMULMAX1-LABEL: sextload_v8i8_v8i32: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; LMULMAX1-NEXT: vle8.v v10, (a0) -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v10, 4 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vsext.vf4 v9, v8 ; LMULMAX1-NEXT: vsext.vf4 v8, v10 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: sextload_v8i8_v8i32: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX4-NEXT: vle8.v v10, (a0) ; LMULMAX4-NEXT: vsext.vf4 v8, v10 ; LMULMAX4-NEXT: ret @@ -233,18 +233,18 @@ define <8 x i32> @zextload_v8i8_v8i32(<8 x i8>* %x) { ; LMULMAX1-LABEL: zextload_v8i8_v8i32: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; LMULMAX1-NEXT: vle8.v v10, (a0) -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v10, 4 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vzext.vf4 v9, v8 ; LMULMAX1-NEXT: vzext.vf4 v8, v10 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: zextload_v8i8_v8i32: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX4-NEXT: vle8.v v10, (a0) ; LMULMAX4-NEXT: vzext.vf4 v8, v10 ; LMULMAX4-NEXT: ret @@ -256,26 +256,26 @@ define <8 x i64> @sextload_v8i8_v8i64(<8 x i8>* %x) { ; LMULMAX1-LABEL: sextload_v8i8_v8i64: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; LMULMAX1-NEXT: vle8.v v12, (a0) -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v12, 4 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vsext.vf8 v10, v8 -; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v11, v12, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vsext.vf8 v9, v11 -; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v8, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vsext.vf8 v11, v8 ; LMULMAX1-NEXT: vsext.vf8 v8, v12 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: sextload_v8i8_v8i64: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; LMULMAX4-NEXT: vle8.v v12, (a0) ; LMULMAX4-NEXT: vsext.vf8 v8, v12 ; LMULMAX4-NEXT: ret @@ -287,26 +287,26 @@ define <8 x i64> @zextload_v8i8_v8i64(<8 x i8>* %x) { ; LMULMAX1-LABEL: zextload_v8i8_v8i64: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; LMULMAX1-NEXT: vle8.v v12, (a0) -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v12, 4 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vzext.vf8 v10, v8 -; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v11, v12, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vzext.vf8 v9, v11 -; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v8, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vzext.vf8 v11, v8 ; LMULMAX1-NEXT: vzext.vf8 v8, v12 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: zextload_v8i8_v8i64: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; LMULMAX4-NEXT: vle8.v v12, (a0) ; LMULMAX4-NEXT: vzext.vf8 v8, v12 ; LMULMAX4-NEXT: ret @@ -318,18 +318,18 @@ define <16 x i16> @sextload_v16i8_v16i16(<16 x i8>* %x) { ; LMULMAX1-LABEL: sextload_v16i8_v16i16: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-NEXT: vle8.v v10, (a0) -; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v10, 8 -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-NEXT: vsext.vf2 v9, v8 ; LMULMAX1-NEXT: vsext.vf2 v8, v10 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: sextload_v16i8_v16i16: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX4-NEXT: vle8.v v10, (a0) ; LMULMAX4-NEXT: vsext.vf2 v8, v10 ; LMULMAX4-NEXT: ret @@ -341,18 +341,18 @@ define <16 x i16> @zextload_v16i8_v16i16(<16 x i8>* %x) { ; LMULMAX1-LABEL: zextload_v16i8_v16i16: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-NEXT: vle8.v v10, (a0) -; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v10, 8 -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-NEXT: vzext.vf2 v9, v8 ; LMULMAX1-NEXT: vzext.vf2 v8, v10 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: zextload_v16i8_v16i16: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX4-NEXT: vle8.v v10, (a0) ; LMULMAX4-NEXT: vzext.vf2 v8, v10 ; LMULMAX4-NEXT: ret @@ -364,26 +364,26 @@ define <16 x i32> @sextload_v16i8_v16i32(<16 x i8>* %x) { ; LMULMAX1-LABEL: sextload_v16i8_v16i32: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-NEXT: vle8.v v12, (a0) -; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v12, 8 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vsext.vf4 v10, v8 -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v11, v12, 4 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vsext.vf4 v9, v11 -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v8, 4 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vsext.vf4 v11, v8 ; LMULMAX1-NEXT: vsext.vf4 v8, v12 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: sextload_v16i8_v16i32: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; LMULMAX4-NEXT: vle8.v v12, (a0) ; LMULMAX4-NEXT: vsext.vf4 v8, v12 ; LMULMAX4-NEXT: ret @@ -395,26 +395,26 @@ define <16 x i32> @zextload_v16i8_v16i32(<16 x i8>* %x) { ; LMULMAX1-LABEL: zextload_v16i8_v16i32: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-NEXT: vle8.v v12, (a0) -; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v12, 8 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vzext.vf4 v10, v8 -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v11, v12, 4 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vzext.vf4 v9, v11 -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v8, 4 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vzext.vf4 v11, v8 ; LMULMAX1-NEXT: vzext.vf4 v8, v12 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: zextload_v16i8_v16i32: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; LMULMAX4-NEXT: vle8.v v12, (a0) ; LMULMAX4-NEXT: vzext.vf4 v8, v12 ; LMULMAX4-NEXT: ret @@ -426,46 +426,46 @@ define <16 x i64> @sextload_v16i8_v16i64(<16 x i8>* %x) { ; LMULMAX1-LABEL: sextload_v16i8_v16i64: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-NEXT: vle8.v v16, (a0) -; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v16, 8 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vsext.vf8 v12, v8 -; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v10, v16, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vsext.vf8 v9, v10 -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v11, v16, 4 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vsext.vf8 v10, v11 -; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v14, v8, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vsext.vf8 v13, v14 -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v8, 4 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vsext.vf8 v14, v8 -; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v15, v11, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vsext.vf8 v11, v15 -; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v8, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vsext.vf8 v15, v8 ; LMULMAX1-NEXT: vsext.vf8 v8, v16 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: sextload_v16i8_v16i64: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX4-NEXT: vle8.v v16, (a0) -; LMULMAX4-NEXT: vsetivli zero, 8, e8, m1, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 8, e8, m1, ta, ma ; LMULMAX4-NEXT: vslidedown.vi v8, v16, 8 -; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; LMULMAX4-NEXT: vsext.vf8 v12, v8 ; LMULMAX4-NEXT: vsext.vf8 v8, v16 ; LMULMAX4-NEXT: ret @@ -477,46 +477,46 @@ define <16 x i64> @zextload_v16i8_v16i64(<16 x i8>* %x) { ; LMULMAX1-LABEL: zextload_v16i8_v16i64: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-NEXT: vle8.v v16, (a0) -; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v16, 8 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vzext.vf8 v12, v8 -; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v10, v16, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vzext.vf8 v9, v10 -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v11, v16, 4 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vzext.vf8 v10, v11 -; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v14, v8, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vzext.vf8 v13, v14 -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v8, 4 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vzext.vf8 v14, v8 -; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v15, v11, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vzext.vf8 v11, v15 -; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v8, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vzext.vf8 v15, v8 ; LMULMAX1-NEXT: vzext.vf8 v8, v16 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: zextload_v16i8_v16i64: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX4-NEXT: vle8.v v16, (a0) -; LMULMAX4-NEXT: vsetivli zero, 8, e8, m1, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 8, e8, m1, ta, ma ; LMULMAX4-NEXT: vslidedown.vi v8, v16, 8 -; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; LMULMAX4-NEXT: vzext.vf8 v12, v8 ; LMULMAX4-NEXT: vzext.vf8 v8, v16 ; LMULMAX4-NEXT: ret @@ -528,16 +528,16 @@ define void @truncstore_v2i8_v2i1(<2 x i8> %x, <2 x i1>* %z) { ; CHECK-LABEL: truncstore_v2i8_v2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a0) ; CHECK-NEXT: ret @@ -549,7 +549,7 @@ define void @truncstore_v2i16_v2i8(<2 x i16> %x, <2 x i8>* %z) { ; CHECK-LABEL: truncstore_v2i16_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret @@ -561,7 +561,7 @@ define <2 x i32> @sextload_v2i16_v2i32(<2 x i16>* %x) { ; CHECK-LABEL: sextload_v2i16_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vsext.vf2 v8, v9 ; CHECK-NEXT: ret @@ -573,7 +573,7 @@ define <2 x i32> @zextload_v2i16_v2i32(<2 x i16>* %x) { ; CHECK-LABEL: zextload_v2i16_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vzext.vf2 v8, v9 ; CHECK-NEXT: ret @@ -585,7 +585,7 @@ define <2 x i64> @sextload_v2i16_v2i64(<2 x i16>* %x) { ; CHECK-LABEL: sextload_v2i16_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vsext.vf4 v8, v9 ; CHECK-NEXT: ret @@ -597,7 +597,7 @@ define <2 x i64> @zextload_v2i16_v2i64(<2 x i16>* %x) { ; CHECK-LABEL: zextload_v2i16_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vzext.vf4 v8, v9 ; CHECK-NEXT: ret @@ -609,7 +609,7 @@ define void @truncstore_v4i16_v4i8(<4 x i16> %x, <4 x i8>* %z) { ; CHECK-LABEL: truncstore_v4i16_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret @@ -621,7 +621,7 @@ define <4 x i32> @sextload_v4i16_v4i32(<4 x i16>* %x) { ; CHECK-LABEL: sextload_v4i16_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vsext.vf2 v8, v9 ; CHECK-NEXT: ret @@ -633,7 +633,7 @@ define <4 x i32> @zextload_v4i16_v4i32(<4 x i16>* %x) { ; CHECK-LABEL: zextload_v4i16_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vzext.vf2 v8, v9 ; CHECK-NEXT: ret @@ -645,18 +645,18 @@ define <4 x i64> @sextload_v4i16_v4i64(<4 x i16>* %x) { ; LMULMAX1-LABEL: sextload_v4i16_v4i64: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; LMULMAX1-NEXT: vle16.v v10, (a0) -; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v10, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vsext.vf4 v9, v8 ; LMULMAX1-NEXT: vsext.vf4 v8, v10 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: sextload_v4i16_v4i64: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX4-NEXT: vle16.v v10, (a0) ; LMULMAX4-NEXT: vsext.vf4 v8, v10 ; LMULMAX4-NEXT: ret @@ -668,18 +668,18 @@ define <4 x i64> @zextload_v4i16_v4i64(<4 x i16>* %x) { ; LMULMAX1-LABEL: zextload_v4i16_v4i64: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; LMULMAX1-NEXT: vle16.v v10, (a0) -; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v10, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vzext.vf4 v9, v8 ; LMULMAX1-NEXT: vzext.vf4 v8, v10 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: zextload_v4i16_v4i64: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX4-NEXT: vle16.v v10, (a0) ; LMULMAX4-NEXT: vzext.vf4 v8, v10 ; LMULMAX4-NEXT: ret @@ -691,7 +691,7 @@ define void @truncstore_v8i16_v8i8(<8 x i16> %x, <8 x i8>* %z) { ; CHECK-LABEL: truncstore_v8i16_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret @@ -703,18 +703,18 @@ define <8 x i32> @sextload_v8i16_v8i32(<8 x i16>* %x) { ; LMULMAX1-LABEL: sextload_v8i16_v8i32: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-NEXT: vle16.v v10, (a0) -; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v10, 4 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vsext.vf2 v9, v8 ; LMULMAX1-NEXT: vsext.vf2 v8, v10 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: sextload_v8i16_v8i32: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX4-NEXT: vle16.v v10, (a0) ; LMULMAX4-NEXT: vsext.vf2 v8, v10 ; LMULMAX4-NEXT: ret @@ -726,18 +726,18 @@ define <8 x i32> @zextload_v8i16_v8i32(<8 x i16>* %x) { ; LMULMAX1-LABEL: zextload_v8i16_v8i32: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-NEXT: vle16.v v10, (a0) -; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v10, 4 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vzext.vf2 v9, v8 ; LMULMAX1-NEXT: vzext.vf2 v8, v10 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: zextload_v8i16_v8i32: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX4-NEXT: vle16.v v10, (a0) ; LMULMAX4-NEXT: vzext.vf2 v8, v10 ; LMULMAX4-NEXT: ret @@ -749,26 +749,26 @@ define <8 x i64> @sextload_v8i16_v8i64(<8 x i16>* %x) { ; LMULMAX1-LABEL: sextload_v8i16_v8i64: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-NEXT: vle16.v v12, (a0) -; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v12, 4 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vsext.vf4 v10, v8 -; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v11, v12, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vsext.vf4 v9, v11 -; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v8, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vsext.vf4 v11, v8 ; LMULMAX1-NEXT: vsext.vf4 v8, v12 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: sextload_v8i16_v8i64: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; LMULMAX4-NEXT: vle16.v v12, (a0) ; LMULMAX4-NEXT: vsext.vf4 v8, v12 ; LMULMAX4-NEXT: ret @@ -780,26 +780,26 @@ define <8 x i64> @zextload_v8i16_v8i64(<8 x i16>* %x) { ; LMULMAX1-LABEL: zextload_v8i16_v8i64: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-NEXT: vle16.v v12, (a0) -; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v12, 4 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vzext.vf4 v10, v8 -; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v11, v12, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vzext.vf4 v9, v11 -; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v8, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vzext.vf4 v11, v8 ; LMULMAX1-NEXT: vzext.vf4 v8, v12 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: zextload_v8i16_v8i64: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; LMULMAX4-NEXT: vle16.v v12, (a0) ; LMULMAX4-NEXT: vzext.vf4 v8, v12 ; LMULMAX4-NEXT: ret @@ -811,17 +811,17 @@ define void @truncstore_v16i16_v16i8(<16 x i16> %x, <16 x i8>* %z) { ; LMULMAX1-LABEL: truncstore_v16i16_v16i8: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, tu, ma ; LMULMAX1-NEXT: vslideup.vi v8, v9, 8 ; LMULMAX1-NEXT: vse8.v v8, (a0) ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: truncstore_v16i16_v16i8: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX4-NEXT: vnsrl.wi v10, v8, 0 ; LMULMAX4-NEXT: vse8.v v10, (a0) ; LMULMAX4-NEXT: ret @@ -833,17 +833,17 @@ define <16 x i32> @sextload_v16i16_v16i32(<16 x i16>* %x) { ; LMULMAX1-LABEL: sextload_v16i16_v16i32: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-NEXT: vle16.v v10, (a0) ; LMULMAX1-NEXT: addi a0, a0, 16 ; LMULMAX1-NEXT: vle16.v v12, (a0) -; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v10, 4 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vsext.vf2 v9, v8 -; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v12, 4 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vsext.vf2 v11, v8 ; LMULMAX1-NEXT: vsext.vf2 v8, v10 ; LMULMAX1-NEXT: vsext.vf2 v10, v12 @@ -851,7 +851,7 @@ ; ; LMULMAX4-LABEL: sextload_v16i16_v16i32: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; LMULMAX4-NEXT: vle16.v v12, (a0) ; LMULMAX4-NEXT: vsext.vf2 v8, v12 ; LMULMAX4-NEXT: ret @@ -863,17 +863,17 @@ define <16 x i32> @zextload_v16i16_v16i32(<16 x i16>* %x) { ; LMULMAX1-LABEL: zextload_v16i16_v16i32: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-NEXT: vle16.v v10, (a0) ; LMULMAX1-NEXT: addi a0, a0, 16 ; LMULMAX1-NEXT: vle16.v v12, (a0) -; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v10, 4 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vzext.vf2 v9, v8 -; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v12, 4 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vzext.vf2 v11, v8 ; LMULMAX1-NEXT: vzext.vf2 v8, v10 ; LMULMAX1-NEXT: vzext.vf2 v10, v12 @@ -881,7 +881,7 @@ ; ; LMULMAX4-LABEL: zextload_v16i16_v16i32: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; LMULMAX4-NEXT: vle16.v v12, (a0) ; LMULMAX4-NEXT: vzext.vf2 v8, v12 ; LMULMAX4-NEXT: ret @@ -893,33 +893,33 @@ define <16 x i64> @sextload_v16i16_v16i64(<16 x i16>* %x) { ; LMULMAX1-LABEL: sextload_v16i16_v16i64: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-NEXT: vle16.v v12, (a0) ; LMULMAX1-NEXT: addi a0, a0, 16 ; LMULMAX1-NEXT: vle16.v v16, (a0) -; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v12, 4 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vsext.vf4 v10, v8 -; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v15, v16, 4 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vsext.vf4 v14, v15 -; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v11, v12, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vsext.vf4 v9, v11 -; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v8, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vsext.vf4 v11, v8 -; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v16, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vsext.vf4 v13, v8 -; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v15, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vsext.vf4 v15, v8 ; LMULMAX1-NEXT: vsext.vf4 v8, v12 ; LMULMAX1-NEXT: vsext.vf4 v12, v16 @@ -927,11 +927,11 @@ ; ; LMULMAX4-LABEL: sextload_v16i16_v16i64: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX4-NEXT: vle16.v v16, (a0) -; LMULMAX4-NEXT: vsetivli zero, 8, e16, m2, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 8, e16, m2, ta, ma ; LMULMAX4-NEXT: vslidedown.vi v8, v16, 8 -; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; LMULMAX4-NEXT: vsext.vf4 v12, v8 ; LMULMAX4-NEXT: vsext.vf4 v8, v16 ; LMULMAX4-NEXT: ret @@ -943,33 +943,33 @@ define <16 x i64> @zextload_v16i16_v16i64(<16 x i16>* %x) { ; LMULMAX1-LABEL: zextload_v16i16_v16i64: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-NEXT: vle16.v v12, (a0) ; LMULMAX1-NEXT: addi a0, a0, 16 ; LMULMAX1-NEXT: vle16.v v16, (a0) -; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v12, 4 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vzext.vf4 v10, v8 -; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v15, v16, 4 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vzext.vf4 v14, v15 -; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v11, v12, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vzext.vf4 v9, v11 -; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v8, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vzext.vf4 v11, v8 -; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v16, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vzext.vf4 v13, v8 -; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v15, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vzext.vf4 v15, v8 ; LMULMAX1-NEXT: vzext.vf4 v8, v12 ; LMULMAX1-NEXT: vzext.vf4 v12, v16 @@ -977,11 +977,11 @@ ; ; LMULMAX4-LABEL: zextload_v16i16_v16i64: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX4-NEXT: vle16.v v16, (a0) -; LMULMAX4-NEXT: vsetivli zero, 8, e16, m2, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 8, e16, m2, ta, ma ; LMULMAX4-NEXT: vslidedown.vi v8, v16, 8 -; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; LMULMAX4-NEXT: vzext.vf4 v12, v8 ; LMULMAX4-NEXT: vzext.vf4 v8, v16 ; LMULMAX4-NEXT: ret @@ -993,9 +993,9 @@ define void @truncstore_v2i32_v2i8(<2 x i32> %x, <2 x i8>* %z) { ; CHECK-LABEL: truncstore_v2i32_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret @@ -1007,7 +1007,7 @@ define void @truncstore_v2i32_v2i16(<2 x i32> %x, <2 x i16>* %z) { ; CHECK-LABEL: truncstore_v2i32_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret @@ -1019,7 +1019,7 @@ define <2 x i64> @sextload_v2i32_v2i64(<2 x i32>* %x) { ; CHECK-LABEL: sextload_v2i32_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vsext.vf2 v8, v9 ; CHECK-NEXT: ret @@ -1031,7 +1031,7 @@ define <2 x i64> @zextload_v2i32_v2i64(<2 x i32>* %x) { ; CHECK-LABEL: zextload_v2i32_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vzext.vf2 v8, v9 ; CHECK-NEXT: ret @@ -1043,9 +1043,9 @@ define void @truncstore_v4i32_v4i8(<4 x i32> %x, <4 x i8>* %z) { ; CHECK-LABEL: truncstore_v4i32_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret @@ -1057,7 +1057,7 @@ define void @truncstore_v4i32_v4i16(<4 x i32> %x, <4 x i16>* %z) { ; CHECK-LABEL: truncstore_v4i32_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret @@ -1069,18 +1069,18 @@ define <4 x i64> @sextload_v4i32_v4i64(<4 x i32>* %x) { ; LMULMAX1-LABEL: sextload_v4i32_v4i64: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vle32.v v10, (a0) -; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v10, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vsext.vf2 v9, v8 ; LMULMAX1-NEXT: vsext.vf2 v8, v10 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: sextload_v4i32_v4i64: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX4-NEXT: vle32.v v10, (a0) ; LMULMAX4-NEXT: vsext.vf2 v8, v10 ; LMULMAX4-NEXT: ret @@ -1092,18 +1092,18 @@ define <4 x i64> @zextload_v4i32_v4i64(<4 x i32>* %x) { ; LMULMAX1-LABEL: zextload_v4i32_v4i64: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vle32.v v10, (a0) -; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v10, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vzext.vf2 v9, v8 ; LMULMAX1-NEXT: vzext.vf2 v8, v10 ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: zextload_v4i32_v4i64: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX4-NEXT: vle32.v v10, (a0) ; LMULMAX4-NEXT: vzext.vf2 v8, v10 ; LMULMAX4-NEXT: ret @@ -1115,24 +1115,24 @@ define void @truncstore_v8i32_v8i8(<8 x i32> %x, <8 x i8>* %z) { ; LMULMAX1-LABEL: truncstore_v8i32_v8i8: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, ma ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 ; LMULMAX1-NEXT: vse8.v v8, (a0) ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: truncstore_v8i32_v8i8: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX4-NEXT: vnsrl.wi v10, v8, 0 -; LMULMAX4-NEXT: vsetvli zero, zero, e8, mf2, ta, mu +; LMULMAX4-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; LMULMAX4-NEXT: vnsrl.wi v8, v10, 0 ; LMULMAX4-NEXT: vse8.v v8, (a0) ; LMULMAX4-NEXT: ret @@ -1144,17 +1144,17 @@ define void @truncstore_v8i32_v8i16(<8 x i32> %x, <8 x i16>* %z) { ; LMULMAX1-LABEL: truncstore_v8i32_v8i16: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, ma ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 ; LMULMAX1-NEXT: vse16.v v8, (a0) ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: truncstore_v8i32_v8i16: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX4-NEXT: vnsrl.wi v10, v8, 0 ; LMULMAX4-NEXT: vse16.v v10, (a0) ; LMULMAX4-NEXT: ret @@ -1166,17 +1166,17 @@ define <8 x i64> @sextload_v8i32_v8i64(<8 x i32>* %x) { ; LMULMAX1-LABEL: sextload_v8i32_v8i64: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vle32.v v10, (a0) ; LMULMAX1-NEXT: addi a0, a0, 16 ; LMULMAX1-NEXT: vle32.v v12, (a0) -; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v10, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vsext.vf2 v9, v8 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v12, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vsext.vf2 v11, v8 ; LMULMAX1-NEXT: vsext.vf2 v8, v10 ; LMULMAX1-NEXT: vsext.vf2 v10, v12 @@ -1184,7 +1184,7 @@ ; ; LMULMAX4-LABEL: sextload_v8i32_v8i64: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; LMULMAX4-NEXT: vle32.v v12, (a0) ; LMULMAX4-NEXT: vsext.vf2 v8, v12 ; LMULMAX4-NEXT: ret @@ -1196,17 +1196,17 @@ define <8 x i64> @zextload_v8i32_v8i64(<8 x i32>* %x) { ; LMULMAX1-LABEL: zextload_v8i32_v8i64: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vle32.v v10, (a0) ; LMULMAX1-NEXT: addi a0, a0, 16 ; LMULMAX1-NEXT: vle32.v v12, (a0) -; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v10, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vzext.vf2 v9, v8 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v12, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vzext.vf2 v11, v8 ; LMULMAX1-NEXT: vzext.vf2 v8, v10 ; LMULMAX1-NEXT: vzext.vf2 v10, v12 @@ -1214,7 +1214,7 @@ ; ; LMULMAX4-LABEL: zextload_v8i32_v8i64: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; LMULMAX4-NEXT: vle32.v v12, (a0) ; LMULMAX4-NEXT: vzext.vf2 v8, v12 ; LMULMAX4-NEXT: ret @@ -1226,36 +1226,36 @@ define void @truncstore_v16i32_v16i8(<16 x i32> %x, <16 x i8>* %z) { ; LMULMAX1-LABEL: truncstore_v16i32_v16i8: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, tu, ma ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 -; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v10, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 -; LMULMAX1-NEXT: vsetivli zero, 12, e8, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 12, e8, m1, tu, ma ; LMULMAX1-NEXT: vslideup.vi v8, v9, 8 -; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v11, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, tu, ma ; LMULMAX1-NEXT: vslideup.vi v8, v9, 12 ; LMULMAX1-NEXT: vse8.v v8, (a0) ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: truncstore_v16i32_v16i8: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX4-NEXT: vnsrl.wi v12, v8, 0 -; LMULMAX4-NEXT: vsetvli zero, zero, e8, m1, ta, mu +; LMULMAX4-NEXT: vsetvli zero, zero, e8, m1, ta, ma ; LMULMAX4-NEXT: vnsrl.wi v8, v12, 0 ; LMULMAX4-NEXT: vse8.v v8, (a0) ; LMULMAX4-NEXT: ret @@ -1267,15 +1267,15 @@ define void @truncstore_v16i32_v16i16(<16 x i32> %x, <16 x i16>* %z) { ; LMULMAX1-LABEL: truncstore_v16i32_v16i16: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, ma ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 -; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v11, 0 ; LMULMAX1-NEXT: vnsrl.wi v10, v10, 0 -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, ma ; LMULMAX1-NEXT: vslideup.vi v10, v9, 4 ; LMULMAX1-NEXT: addi a1, a0, 16 ; LMULMAX1-NEXT: vse16.v v10, (a1) @@ -1284,7 +1284,7 @@ ; ; LMULMAX4-LABEL: truncstore_v16i32_v16i16: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX4-NEXT: vnsrl.wi v12, v8, 0 ; LMULMAX4-NEXT: vse16.v v12, (a0) ; LMULMAX4-NEXT: ret @@ -1297,28 +1297,28 @@ ; LMULMAX1-LABEL: sextload_v16i32_v16i64: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: addi a1, a0, 48 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vle32.v v16, (a1) ; LMULMAX1-NEXT: addi a1, a0, 32 ; LMULMAX1-NEXT: vle32.v v14, (a1) ; LMULMAX1-NEXT: vle32.v v10, (a0) ; LMULMAX1-NEXT: addi a0, a0, 16 ; LMULMAX1-NEXT: vle32.v v12, (a0) -; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v10, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vsext.vf2 v9, v8 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v12, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vsext.vf2 v11, v8 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v14, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vsext.vf2 v13, v8 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v16, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vsext.vf2 v15, v8 ; LMULMAX1-NEXT: vsext.vf2 v8, v10 ; LMULMAX1-NEXT: vsext.vf2 v10, v12 @@ -1328,11 +1328,11 @@ ; ; LMULMAX4-LABEL: sextload_v16i32_v16i64: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; LMULMAX4-NEXT: vle32.v v16, (a0) -; LMULMAX4-NEXT: vsetivli zero, 8, e32, m4, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 8, e32, m4, ta, ma ; LMULMAX4-NEXT: vslidedown.vi v8, v16, 8 -; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; LMULMAX4-NEXT: vsext.vf2 v12, v8 ; LMULMAX4-NEXT: vsext.vf2 v8, v16 ; LMULMAX4-NEXT: ret @@ -1345,28 +1345,28 @@ ; LMULMAX1-LABEL: zextload_v16i32_v16i64: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: addi a1, a0, 48 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vle32.v v16, (a1) ; LMULMAX1-NEXT: addi a1, a0, 32 ; LMULMAX1-NEXT: vle32.v v14, (a1) ; LMULMAX1-NEXT: vle32.v v10, (a0) ; LMULMAX1-NEXT: addi a0, a0, 16 ; LMULMAX1-NEXT: vle32.v v12, (a0) -; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v10, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vzext.vf2 v9, v8 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v12, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vzext.vf2 v11, v8 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v14, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vzext.vf2 v13, v8 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v16, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vzext.vf2 v15, v8 ; LMULMAX1-NEXT: vzext.vf2 v8, v10 ; LMULMAX1-NEXT: vzext.vf2 v10, v12 @@ -1376,11 +1376,11 @@ ; ; LMULMAX4-LABEL: zextload_v16i32_v16i64: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; LMULMAX4-NEXT: vle32.v v16, (a0) -; LMULMAX4-NEXT: vsetivli zero, 8, e32, m4, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 8, e32, m4, ta, ma ; LMULMAX4-NEXT: vslidedown.vi v8, v16, 8 -; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; LMULMAX4-NEXT: vzext.vf2 v12, v8 ; LMULMAX4-NEXT: vzext.vf2 v8, v16 ; LMULMAX4-NEXT: ret @@ -1392,11 +1392,11 @@ define void @truncstore_v2i64_v2i8(<2 x i64> %x, <2 x i8>* %z) { ; CHECK-LABEL: truncstore_v2i64_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret @@ -1408,9 +1408,9 @@ define void @truncstore_v2i64_v2i16(<2 x i64> %x, <2 x i16>* %z) { ; CHECK-LABEL: truncstore_v2i64_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret @@ -1422,7 +1422,7 @@ define void @truncstore_v2i64_v2i32(<2 x i64> %x, <2 x i32>* %z) { ; CHECK-LABEL: truncstore_v2i64_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret @@ -1434,30 +1434,30 @@ define void @truncstore_v4i64_v4i8(<4 x i64> %x, <4 x i8>* %z) { ; LMULMAX1-LABEL: truncstore_v4i64_v4i8: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, tu, ma ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX1-NEXT: vse8.v v8, (a0) ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: truncstore_v4i64_v4i8: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX4-NEXT: vnsrl.wi v10, v8, 0 -; LMULMAX4-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; LMULMAX4-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; LMULMAX4-NEXT: vnsrl.wi v8, v10, 0 -; LMULMAX4-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; LMULMAX4-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; LMULMAX4-NEXT: vnsrl.wi v8, v8, 0 ; LMULMAX4-NEXT: vse8.v v8, (a0) ; LMULMAX4-NEXT: ret @@ -1469,24 +1469,24 @@ define void @truncstore_v4i64_v4i16(<4 x i64> %x, <4 x i16>* %z) { ; LMULMAX1-LABEL: truncstore_v4i64_v4i16: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 -; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, tu, ma ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX1-NEXT: vse16.v v8, (a0) ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: truncstore_v4i64_v4i16: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX4-NEXT: vnsrl.wi v10, v8, 0 -; LMULMAX4-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; LMULMAX4-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; LMULMAX4-NEXT: vnsrl.wi v8, v10, 0 ; LMULMAX4-NEXT: vse16.v v8, (a0) ; LMULMAX4-NEXT: ret @@ -1498,17 +1498,17 @@ define void @truncstore_v4i64_v4i32(<4 x i64> %x, <4 x i32>* %z) { ; LMULMAX1-LABEL: truncstore_v4i64_v4i32: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, ma ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 ; LMULMAX1-NEXT: vse32.v v8, (a0) ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: truncstore_v4i64_v4i32: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX4-NEXT: vnsrl.wi v10, v8, 0 ; LMULMAX4-NEXT: vse32.v v10, (a0) ; LMULMAX4-NEXT: ret @@ -1520,46 +1520,46 @@ define void @truncstore_v8i64_v8i8(<8 x i64> %x, <8 x i8>* %z) { ; LMULMAX1-LABEL: truncstore_v8i64_v8i8: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, ma ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v10, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 -; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, tu, ma ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v11, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, ma ; LMULMAX1-NEXT: vslideup.vi v8, v9, 6 ; LMULMAX1-NEXT: vse8.v v8, (a0) ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: truncstore_v8i64_v8i8: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX4-NEXT: vnsrl.wi v12, v8, 0 -; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; LMULMAX4-NEXT: vnsrl.wi v8, v12, 0 -; LMULMAX4-NEXT: vsetvli zero, zero, e8, mf2, ta, mu +; LMULMAX4-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; LMULMAX4-NEXT: vnsrl.wi v8, v8, 0 ; LMULMAX4-NEXT: vse8.v v8, (a0) ; LMULMAX4-NEXT: ret @@ -1571,36 +1571,36 @@ define void @truncstore_v8i64_v8i16(<8 x i64> %x, <8 x i16>* %z) { ; LMULMAX1-LABEL: truncstore_v8i64_v8i16: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 -; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, ma ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v10, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 -; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, ma ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v11, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, ma ; LMULMAX1-NEXT: vslideup.vi v8, v9, 6 ; LMULMAX1-NEXT: vse16.v v8, (a0) ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: truncstore_v8i64_v8i16: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX4-NEXT: vnsrl.wi v12, v8, 0 -; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; LMULMAX4-NEXT: vnsrl.wi v8, v12, 0 ; LMULMAX4-NEXT: vse16.v v8, (a0) ; LMULMAX4-NEXT: ret @@ -1612,15 +1612,15 @@ define void @truncstore_v8i64_v8i32(<8 x i64> %x, <8 x i32>* %z) { ; LMULMAX1-LABEL: truncstore_v8i64_v8i32: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, ma ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v11, 0 ; LMULMAX1-NEXT: vnsrl.wi v10, v10, 0 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, ma ; LMULMAX1-NEXT: vslideup.vi v10, v9, 2 ; LMULMAX1-NEXT: addi a1, a0, 16 ; LMULMAX1-NEXT: vse32.v v10, (a1) @@ -1629,7 +1629,7 @@ ; ; LMULMAX4-LABEL: truncstore_v8i64_v8i32: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX4-NEXT: vnsrl.wi v12, v8, 0 ; LMULMAX4-NEXT: vse32.v v12, (a0) ; LMULMAX4-NEXT: ret @@ -1641,86 +1641,86 @@ define void @truncstore_v16i64_v16i8(<16 x i64> %x, <16 x i8>* %z) { ; LMULMAX1-LABEL: truncstore_v16i64_v16i8: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 -; LMULMAX1-NEXT: vsetivli zero, 4, e8, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, m1, tu, ma ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v10, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 -; LMULMAX1-NEXT: vsetivli zero, 6, e8, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 6, e8, m1, tu, ma ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v11, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, tu, ma ; LMULMAX1-NEXT: vslideup.vi v8, v9, 6 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v12, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 -; LMULMAX1-NEXT: vsetivli zero, 10, e8, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 10, e8, m1, tu, ma ; LMULMAX1-NEXT: vslideup.vi v8, v9, 8 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v13, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 -; LMULMAX1-NEXT: vsetivli zero, 12, e8, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 12, e8, m1, tu, ma ; LMULMAX1-NEXT: vslideup.vi v8, v9, 10 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v14, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 -; LMULMAX1-NEXT: vsetivli zero, 14, e8, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 14, e8, m1, tu, ma ; LMULMAX1-NEXT: vslideup.vi v8, v9, 12 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v15, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, tu, ma ; LMULMAX1-NEXT: vslideup.vi v8, v9, 14 ; LMULMAX1-NEXT: vse8.v v8, (a0) ; LMULMAX1-NEXT: ret ; ; LMULMAX4-LABEL: truncstore_v16i64_v16i8: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX4-NEXT: vnsrl.wi v16, v12, 0 -; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; LMULMAX4-NEXT: vnsrl.wi v12, v16, 0 -; LMULMAX4-NEXT: vsetvli zero, zero, e8, mf2, ta, mu +; LMULMAX4-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; LMULMAX4-NEXT: vnsrl.wi v12, v12, 0 -; LMULMAX4-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; LMULMAX4-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; LMULMAX4-NEXT: vnsrl.wi v14, v8, 0 -; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; LMULMAX4-NEXT: vnsrl.wi v8, v14, 0 -; LMULMAX4-NEXT: vsetvli zero, zero, e8, mf2, ta, mu +; LMULMAX4-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; LMULMAX4-NEXT: vnsrl.wi v8, v8, 0 -; LMULMAX4-NEXT: vsetivli zero, 16, e8, m1, tu, mu +; LMULMAX4-NEXT: vsetivli zero, 16, e8, m1, tu, ma ; LMULMAX4-NEXT: vslideup.vi v8, v12, 8 ; LMULMAX4-NEXT: vse8.v v8, (a0) ; LMULMAX4-NEXT: ret @@ -1732,49 +1732,49 @@ define void @truncstore_v16i64_v16i16(<16 x i64> %x, <16 x i16>* %z) { ; LMULMAX1-LABEL: truncstore_v16i64_v16i16: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 -; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, ma ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v10, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 -; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, ma ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v11, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, ma ; LMULMAX1-NEXT: vslideup.vi v8, v9, 6 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v13, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v10, v12, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v10, v10, 0 -; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, ma ; LMULMAX1-NEXT: vslideup.vi v10, v9, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v14, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 -; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, ma ; LMULMAX1-NEXT: vslideup.vi v10, v9, 4 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v15, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, ma ; LMULMAX1-NEXT: vslideup.vi v10, v9, 6 ; LMULMAX1-NEXT: addi a1, a0, 16 ; LMULMAX1-NEXT: vse16.v v10, (a1) @@ -1783,15 +1783,15 @@ ; ; LMULMAX4-LABEL: truncstore_v16i64_v16i16: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX4-NEXT: vnsrl.wi v16, v12, 0 -; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; LMULMAX4-NEXT: vnsrl.wi v12, v16, 0 -; LMULMAX4-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; LMULMAX4-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; LMULMAX4-NEXT: vnsrl.wi v14, v8, 0 -; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; LMULMAX4-NEXT: vnsrl.wi v8, v14, 0 -; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, tu, mu +; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, tu, ma ; LMULMAX4-NEXT: vslideup.vi v8, v12, 8 ; LMULMAX4-NEXT: vse16.v v8, (a0) ; LMULMAX4-NEXT: ret @@ -1803,25 +1803,25 @@ define void @truncstore_v16i64_v16i32(<16 x i64> %x, <16 x i32>* %z) { ; LMULMAX1-LABEL: truncstore_v16i64_v16i32: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 ; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, ma ; LMULMAX1-NEXT: vslideup.vi v8, v9, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v11, 0 ; LMULMAX1-NEXT: vnsrl.wi v10, v10, 0 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, ma ; LMULMAX1-NEXT: vslideup.vi v10, v9, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v13, 0 ; LMULMAX1-NEXT: vnsrl.wi v11, v12, 0 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, ma ; LMULMAX1-NEXT: vslideup.vi v11, v9, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v15, 0 ; LMULMAX1-NEXT: vnsrl.wi v12, v14, 0 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, ma ; LMULMAX1-NEXT: vslideup.vi v12, v9, 2 ; LMULMAX1-NEXT: addi a1, a0, 48 ; LMULMAX1-NEXT: vse32.v v12, (a1) @@ -1834,10 +1834,10 @@ ; ; LMULMAX4-LABEL: truncstore_v16i64_v16i32: ; LMULMAX4: # %bb.0: -; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX4-NEXT: vnsrl.wi v16, v12, 0 ; LMULMAX4-NEXT: vnsrl.wi v12, v8, 0 -; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, tu, mu +; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, tu, ma ; LMULMAX4-NEXT: vslideup.vi v12, v16, 8 ; LMULMAX4-NEXT: vse32.v v12, (a0) ; LMULMAX4-NEXT: ret @@ -1849,7 +1849,7 @@ define @extload_nxv2f16_nxv2f32(* %x) { ; CHECK-LABEL: extload_nxv2f16_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vfwcvt.f.f.v v8, v9 ; CHECK-NEXT: ret @@ -1861,10 +1861,10 @@ define @extload_nxv2f16_nxv2f64(* %x) { ; CHECK-LABEL: extload_nxv2f16_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfwcvt.f.f.v v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v8, v10 ; CHECK-NEXT: ret %y = load , * %x @@ -1876,7 +1876,7 @@ ; CHECK-LABEL: extload_nxv4f16_nxv4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vl1re16.v v10, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v8, v10 ; CHECK-NEXT: ret %y = load , * %x @@ -1888,9 +1888,9 @@ ; CHECK-LABEL: extload_nxv4f16_nxv4f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vl1re16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v8, v12 ; CHECK-NEXT: ret %y = load , * %x @@ -1902,7 +1902,7 @@ ; CHECK-LABEL: extload_nxv8f16_nxv8f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vl2re16.v v12, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v8, v12 ; CHECK-NEXT: ret %y = load , * %x @@ -1914,9 +1914,9 @@ ; CHECK-LABEL: extload_nxv8f16_nxv8f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vl2re16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v16, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v8, v16 ; CHECK-NEXT: ret %y = load , * %x @@ -1928,7 +1928,7 @@ ; CHECK-LABEL: extload_nxv16f16_nxv16f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vl4re16.v v16, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v8, v16 ; CHECK-NEXT: ret %y = load , * %x @@ -1940,13 +1940,13 @@ ; CHECK-LABEL: extload_nxv16f16_nxv16f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vl4re16.v v16, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v20, v16 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v8, v20 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v24, v18 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v16, v24 ; CHECK-NEXT: ret %y = load , * %x @@ -1957,7 +1957,7 @@ define void @truncstore_nxv2f32_nxv2f16( %x, * %z) { ; CHECK-LABEL: truncstore_nxv2f32_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v9, v8 ; CHECK-NEXT: vse16.v v9, (a0) ; CHECK-NEXT: ret @@ -1970,7 +1970,7 @@ ; CHECK-LABEL: extload_nxv2f32_nxv2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vl1re32.v v10, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v8, v10 ; CHECK-NEXT: ret %y = load , * %x @@ -1981,7 +1981,7 @@ define void @truncstore_nxv4f32_nxv4f16( %x, * %z) { ; CHECK-LABEL: truncstore_nxv4f32_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v10, v8 ; CHECK-NEXT: vs1r.v v10, (a0) ; CHECK-NEXT: ret @@ -1994,7 +1994,7 @@ ; CHECK-LABEL: extload_nxv4f32_nxv4f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vl2re32.v v12, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v8, v12 ; CHECK-NEXT: ret %y = load , * %x @@ -2005,7 +2005,7 @@ define void @truncstore_nxv8f32_nxv8f16( %x, * %z) { ; CHECK-LABEL: truncstore_nxv8f32_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v12, v8 ; CHECK-NEXT: vs2r.v v12, (a0) ; CHECK-NEXT: ret @@ -2018,7 +2018,7 @@ ; CHECK-LABEL: extload_nxv8f32_nxv8f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vl4re32.v v16, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v8, v16 ; CHECK-NEXT: ret %y = load , * %x @@ -2029,7 +2029,7 @@ define void @truncstore_nxv16f32_nxv16f16( %x, * %z) { ; CHECK-LABEL: truncstore_nxv16f32_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v16, v8 ; CHECK-NEXT: vs4r.v v16, (a0) ; CHECK-NEXT: ret @@ -2042,7 +2042,7 @@ ; CHECK-LABEL: extload_nxv16f32_nxv16f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v8, v24 ; CHECK-NEXT: vfwcvt.f.f.v v16, v28 ; CHECK-NEXT: ret @@ -2054,9 +2054,9 @@ define void @truncstore_nxv2f64_nxv2f16( %x, * %z) { ; CHECK-LABEL: truncstore_nxv2f64_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.rod.f.f.w v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v8, v10 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret @@ -2068,7 +2068,7 @@ define void @truncstore_nxv2f64_nxv2f32( %x, * %z) { ; CHECK-LABEL: truncstore_nxv2f64_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v10, v8 ; CHECK-NEXT: vs1r.v v10, (a0) ; CHECK-NEXT: ret @@ -2080,9 +2080,9 @@ define void @truncstore_nxv4f64_nxv4f16( %x, * %z) { ; CHECK-LABEL: truncstore_nxv4f64_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vfncvt.rod.f.f.w v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v8, v12 ; CHECK-NEXT: vs1r.v v8, (a0) ; CHECK-NEXT: ret @@ -2094,7 +2094,7 @@ define void @truncstore_nxv4f64_nxv4f32( %x, * %z) { ; CHECK-LABEL: truncstore_nxv4f64_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v12, v8 ; CHECK-NEXT: vs2r.v v12, (a0) ; CHECK-NEXT: ret @@ -2106,9 +2106,9 @@ define void @truncstore_nxv8f64_nxv8f16( %x, * %z) { ; CHECK-LABEL: truncstore_nxv8f64_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vfncvt.rod.f.f.w v16, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v8, v16 ; CHECK-NEXT: vs2r.v v8, (a0) ; CHECK-NEXT: ret @@ -2120,7 +2120,7 @@ define void @truncstore_nxv8f64_nxv8f32( %x, * %z) { ; CHECK-LABEL: truncstore_nxv8f64_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v16, v8 ; CHECK-NEXT: vs4r.v v16, (a0) ; CHECK-NEXT: ret @@ -2132,13 +2132,13 @@ define void @truncstore_nxv16f64_nxv16f16( %x, * %z) { ; CHECK-LABEL: truncstore_nxv16f64_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vfncvt.rod.f.f.w v24, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v8, v24 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-NEXT: vfncvt.rod.f.f.w v12, v16 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v10, v12 ; CHECK-NEXT: vs4r.v v8, (a0) ; CHECK-NEXT: ret @@ -2150,7 +2150,7 @@ define void @truncstore_nxv16f64_nxv16f32( %x, * %z) { ; CHECK-LABEL: truncstore_nxv16f64_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v24, v8 ; CHECK-NEXT: vfncvt.f.f.w v28, v16 ; CHECK-NEXT: vs8r.v v24, (a0) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll @@ -8,7 +8,7 @@ define i1 @extractelt_v1i1(<1 x i8>* %x, i64 %idx) nounwind { ; CHECK-LABEL: extractelt_v1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmseq.vi v0, v8, 0 ; CHECK-NEXT: vmv.v.i v8, 0 @@ -25,12 +25,12 @@ define i1 @extractelt_v2i1(<2 x i8>* %x, i64 %idx) nounwind { ; CHECK-LABEL: extractelt_v2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmseq.vi v0, v8, 0 ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -43,12 +43,12 @@ define i1 @extractelt_v4i1(<4 x i8>* %x, i64 %idx) nounwind { ; CHECK-LABEL: extractelt_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmseq.vi v0, v8, 0 ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -61,7 +61,7 @@ define i1 @extractelt_v8i1(<8 x i8>* %x, i64 %idx) nounwind { ; RV32-LABEL: extractelt_v8i1: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV32-NEXT: vle8.v v8, (a0) ; RV32-NEXT: vmseq.vi v8, v8, 0 ; RV32-NEXT: vmv.x.s a0, v8 @@ -71,7 +71,7 @@ ; ; RV64-LABEL: extractelt_v8i1: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV64-NEXT: vle8.v v8, (a0) ; RV64-NEXT: vmseq.vi v8, v8, 0 ; RV64-NEXT: vmv.x.s a0, v8 @@ -81,7 +81,7 @@ ; ; RV32ZBS-LABEL: extractelt_v8i1: ; RV32ZBS: # %bb.0: -; RV32ZBS-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV32ZBS-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV32ZBS-NEXT: vle8.v v8, (a0) ; RV32ZBS-NEXT: vmseq.vi v8, v8, 0 ; RV32ZBS-NEXT: vmv.x.s a0, v8 @@ -90,7 +90,7 @@ ; ; RV64ZBS-LABEL: extractelt_v8i1: ; RV64ZBS: # %bb.0: -; RV64ZBS-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV64ZBS-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV64ZBS-NEXT: vle8.v v8, (a0) ; RV64ZBS-NEXT: vmseq.vi v8, v8, 0 ; RV64ZBS-NEXT: vmv.x.s a0, v8 @@ -105,10 +105,10 @@ define i1 @extractelt_v16i1(<16 x i8>* %x, i64 %idx) nounwind { ; RV32-LABEL: extractelt_v16i1: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV32-NEXT: vle8.v v8, (a0) ; RV32-NEXT: vmseq.vi v8, v8, 0 -; RV32-NEXT: vsetivli zero, 0, e16, mf4, ta, mu +; RV32-NEXT: vsetivli zero, 0, e16, mf4, ta, ma ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: srl a0, a0, a1 ; RV32-NEXT: andi a0, a0, 1 @@ -116,10 +116,10 @@ ; ; RV64-LABEL: extractelt_v16i1: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV64-NEXT: vle8.v v8, (a0) ; RV64-NEXT: vmseq.vi v8, v8, 0 -; RV64-NEXT: vsetivli zero, 0, e16, mf4, ta, mu +; RV64-NEXT: vsetivli zero, 0, e16, mf4, ta, ma ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: srl a0, a0, a1 ; RV64-NEXT: andi a0, a0, 1 @@ -127,20 +127,20 @@ ; ; RV32ZBS-LABEL: extractelt_v16i1: ; RV32ZBS: # %bb.0: -; RV32ZBS-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; RV32ZBS-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV32ZBS-NEXT: vle8.v v8, (a0) ; RV32ZBS-NEXT: vmseq.vi v8, v8, 0 -; RV32ZBS-NEXT: vsetivli zero, 0, e16, mf4, ta, mu +; RV32ZBS-NEXT: vsetivli zero, 0, e16, mf4, ta, ma ; RV32ZBS-NEXT: vmv.x.s a0, v8 ; RV32ZBS-NEXT: bext a0, a0, a1 ; RV32ZBS-NEXT: ret ; ; RV64ZBS-LABEL: extractelt_v16i1: ; RV64ZBS: # %bb.0: -; RV64ZBS-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; RV64ZBS-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV64ZBS-NEXT: vle8.v v8, (a0) ; RV64ZBS-NEXT: vmseq.vi v8, v8, 0 -; RV64ZBS-NEXT: vsetivli zero, 0, e16, mf4, ta, mu +; RV64ZBS-NEXT: vsetivli zero, 0, e16, mf4, ta, ma ; RV64ZBS-NEXT: vmv.x.s a0, v8 ; RV64ZBS-NEXT: bext a0, a0, a1 ; RV64ZBS-NEXT: ret @@ -154,10 +154,10 @@ ; RV32-LABEL: extractelt_v32i1: ; RV32: # %bb.0: ; RV32-NEXT: li a2, 32 -; RV32-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; RV32-NEXT: vle8.v v8, (a0) ; RV32-NEXT: vmseq.vi v10, v8, 0 -; RV32-NEXT: vsetivli zero, 0, e32, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 0, e32, mf2, ta, ma ; RV32-NEXT: vmv.x.s a0, v10 ; RV32-NEXT: srl a0, a0, a1 ; RV32-NEXT: andi a0, a0, 1 @@ -166,10 +166,10 @@ ; RV64-LABEL: extractelt_v32i1: ; RV64: # %bb.0: ; RV64-NEXT: li a2, 32 -; RV64-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; RV64-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; RV64-NEXT: vle8.v v8, (a0) ; RV64-NEXT: vmseq.vi v10, v8, 0 -; RV64-NEXT: vsetivli zero, 0, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 0, e32, mf2, ta, ma ; RV64-NEXT: vmv.x.s a0, v10 ; RV64-NEXT: srl a0, a0, a1 ; RV64-NEXT: andi a0, a0, 1 @@ -178,10 +178,10 @@ ; RV32ZBS-LABEL: extractelt_v32i1: ; RV32ZBS: # %bb.0: ; RV32ZBS-NEXT: li a2, 32 -; RV32ZBS-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; RV32ZBS-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; RV32ZBS-NEXT: vle8.v v8, (a0) ; RV32ZBS-NEXT: vmseq.vi v10, v8, 0 -; RV32ZBS-NEXT: vsetivli zero, 0, e32, mf2, ta, mu +; RV32ZBS-NEXT: vsetivli zero, 0, e32, mf2, ta, ma ; RV32ZBS-NEXT: vmv.x.s a0, v10 ; RV32ZBS-NEXT: bext a0, a0, a1 ; RV32ZBS-NEXT: ret @@ -189,10 +189,10 @@ ; RV64ZBS-LABEL: extractelt_v32i1: ; RV64ZBS: # %bb.0: ; RV64ZBS-NEXT: li a2, 32 -; RV64ZBS-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; RV64ZBS-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; RV64ZBS-NEXT: vle8.v v8, (a0) ; RV64ZBS-NEXT: vmseq.vi v10, v8, 0 -; RV64ZBS-NEXT: vsetivli zero, 0, e32, mf2, ta, mu +; RV64ZBS-NEXT: vsetivli zero, 0, e32, mf2, ta, ma ; RV64ZBS-NEXT: vmv.x.s a0, v10 ; RV64ZBS-NEXT: bext a0, a0, a1 ; RV64ZBS-NEXT: ret @@ -206,11 +206,11 @@ ; RV32-LABEL: extractelt_v64i1: ; RV32: # %bb.0: ; RV32-NEXT: li a2, 64 -; RV32-NEXT: vsetvli zero, a2, e8, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e8, m4, ta, ma ; RV32-NEXT: vle8.v v8, (a0) ; RV32-NEXT: vmseq.vi v12, v8, 0 ; RV32-NEXT: srli a0, a1, 5 -; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV32-NEXT: vslidedown.vx v8, v12, a0 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: srl a0, a0, a1 @@ -220,10 +220,10 @@ ; RV64-LABEL: extractelt_v64i1: ; RV64: # %bb.0: ; RV64-NEXT: li a2, 64 -; RV64-NEXT: vsetvli zero, a2, e8, m4, ta, mu +; RV64-NEXT: vsetvli zero, a2, e8, m4, ta, ma ; RV64-NEXT: vle8.v v8, (a0) ; RV64-NEXT: vmseq.vi v12, v8, 0 -; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; RV64-NEXT: vmv.x.s a0, v12 ; RV64-NEXT: srl a0, a0, a1 ; RV64-NEXT: andi a0, a0, 1 @@ -232,11 +232,11 @@ ; RV32ZBS-LABEL: extractelt_v64i1: ; RV32ZBS: # %bb.0: ; RV32ZBS-NEXT: li a2, 64 -; RV32ZBS-NEXT: vsetvli zero, a2, e8, m4, ta, mu +; RV32ZBS-NEXT: vsetvli zero, a2, e8, m4, ta, ma ; RV32ZBS-NEXT: vle8.v v8, (a0) ; RV32ZBS-NEXT: vmseq.vi v12, v8, 0 ; RV32ZBS-NEXT: srli a0, a1, 5 -; RV32ZBS-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; RV32ZBS-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV32ZBS-NEXT: vslidedown.vx v8, v12, a0 ; RV32ZBS-NEXT: vmv.x.s a0, v8 ; RV32ZBS-NEXT: bext a0, a0, a1 @@ -245,10 +245,10 @@ ; RV64ZBS-LABEL: extractelt_v64i1: ; RV64ZBS: # %bb.0: ; RV64ZBS-NEXT: li a2, 64 -; RV64ZBS-NEXT: vsetvli zero, a2, e8, m4, ta, mu +; RV64ZBS-NEXT: vsetvli zero, a2, e8, m4, ta, ma ; RV64ZBS-NEXT: vle8.v v8, (a0) ; RV64ZBS-NEXT: vmseq.vi v12, v8, 0 -; RV64ZBS-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; RV64ZBS-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; RV64ZBS-NEXT: vmv.x.s a0, v12 ; RV64ZBS-NEXT: bext a0, a0, a1 ; RV64ZBS-NEXT: ret @@ -262,11 +262,11 @@ ; RV32-LABEL: extractelt_v128i1: ; RV32: # %bb.0: ; RV32-NEXT: li a2, 128 -; RV32-NEXT: vsetvli zero, a2, e8, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e8, m8, ta, ma ; RV32-NEXT: vle8.v v8, (a0) ; RV32-NEXT: vmseq.vi v16, v8, 0 ; RV32-NEXT: srli a0, a1, 5 -; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32-NEXT: vslidedown.vx v8, v16, a0 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: srl a0, a0, a1 @@ -276,11 +276,11 @@ ; RV64-LABEL: extractelt_v128i1: ; RV64: # %bb.0: ; RV64-NEXT: li a2, 128 -; RV64-NEXT: vsetvli zero, a2, e8, m8, ta, mu +; RV64-NEXT: vsetvli zero, a2, e8, m8, ta, ma ; RV64-NEXT: vle8.v v8, (a0) ; RV64-NEXT: vmseq.vi v16, v8, 0 ; RV64-NEXT: srli a0, a1, 6 -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vslidedown.vx v8, v16, a0 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: srl a0, a0, a1 @@ -290,11 +290,11 @@ ; RV32ZBS-LABEL: extractelt_v128i1: ; RV32ZBS: # %bb.0: ; RV32ZBS-NEXT: li a2, 128 -; RV32ZBS-NEXT: vsetvli zero, a2, e8, m8, ta, mu +; RV32ZBS-NEXT: vsetvli zero, a2, e8, m8, ta, ma ; RV32ZBS-NEXT: vle8.v v8, (a0) ; RV32ZBS-NEXT: vmseq.vi v16, v8, 0 ; RV32ZBS-NEXT: srli a0, a1, 5 -; RV32ZBS-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32ZBS-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32ZBS-NEXT: vslidedown.vx v8, v16, a0 ; RV32ZBS-NEXT: vmv.x.s a0, v8 ; RV32ZBS-NEXT: bext a0, a0, a1 @@ -303,11 +303,11 @@ ; RV64ZBS-LABEL: extractelt_v128i1: ; RV64ZBS: # %bb.0: ; RV64ZBS-NEXT: li a2, 128 -; RV64ZBS-NEXT: vsetvli zero, a2, e8, m8, ta, mu +; RV64ZBS-NEXT: vsetvli zero, a2, e8, m8, ta, ma ; RV64ZBS-NEXT: vle8.v v8, (a0) ; RV64ZBS-NEXT: vmseq.vi v16, v8, 0 ; RV64ZBS-NEXT: srli a0, a1, 6 -; RV64ZBS-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64ZBS-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64ZBS-NEXT: vslidedown.vx v8, v16, a0 ; RV64ZBS-NEXT: vmv.x.s a0, v8 ; RV64ZBS-NEXT: bext a0, a0, a1 @@ -328,7 +328,7 @@ ; RV32-NEXT: andi sp, sp, -128 ; RV32-NEXT: andi a1, a1, 255 ; RV32-NEXT: li a2, 128 -; RV32-NEXT: vsetvli zero, a2, e8, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e8, m8, ta, ma ; RV32-NEXT: addi a2, a0, 128 ; RV32-NEXT: vle8.v v16, (a2) ; RV32-NEXT: vle8.v v24, (a0) @@ -359,7 +359,7 @@ ; RV64-NEXT: andi sp, sp, -128 ; RV64-NEXT: andi a1, a1, 255 ; RV64-NEXT: li a2, 128 -; RV64-NEXT: vsetvli zero, a2, e8, m8, ta, mu +; RV64-NEXT: vsetvli zero, a2, e8, m8, ta, ma ; RV64-NEXT: addi a2, a0, 128 ; RV64-NEXT: vle8.v v16, (a2) ; RV64-NEXT: vle8.v v24, (a0) @@ -390,7 +390,7 @@ ; RV32ZBS-NEXT: andi sp, sp, -128 ; RV32ZBS-NEXT: andi a1, a1, 255 ; RV32ZBS-NEXT: li a2, 128 -; RV32ZBS-NEXT: vsetvli zero, a2, e8, m8, ta, mu +; RV32ZBS-NEXT: vsetvli zero, a2, e8, m8, ta, ma ; RV32ZBS-NEXT: addi a2, a0, 128 ; RV32ZBS-NEXT: vle8.v v16, (a2) ; RV32ZBS-NEXT: vle8.v v24, (a0) @@ -421,7 +421,7 @@ ; RV64ZBS-NEXT: andi sp, sp, -128 ; RV64ZBS-NEXT: andi a1, a1, 255 ; RV64ZBS-NEXT: li a2, 128 -; RV64ZBS-NEXT: vsetvli zero, a2, e8, m8, ta, mu +; RV64ZBS-NEXT: vsetvli zero, a2, e8, m8, ta, ma ; RV64ZBS-NEXT: addi a2, a0, 128 ; RV64ZBS-NEXT: vle8.v v16, (a2) ; RV64ZBS-NEXT: vle8.v v24, (a0) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll @@ -5,9 +5,9 @@ define void @extract_v2i8_v4i8_0(<4 x i8>* %x, <2 x i8>* %y) { ; CHECK-LABEL: extract_v2i8_v4i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vse8.v v8, (a1) ; CHECK-NEXT: ret %a = load <4 x i8>, <4 x i8>* %x @@ -19,11 +19,11 @@ define void @extract_v2i8_v4i8_2(<4 x i8>* %x, <2 x i8>* %y) { ; CHECK-LABEL: extract_v2i8_v4i8_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vse8.v v8, (a1) ; CHECK-NEXT: ret %a = load <4 x i8>, <4 x i8>* %x @@ -35,9 +35,9 @@ define void @extract_v2i8_v8i8_0(<8 x i8>* %x, <2 x i8>* %y) { ; CHECK-LABEL: extract_v2i8_v8i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vse8.v v8, (a1) ; CHECK-NEXT: ret %a = load <8 x i8>, <8 x i8>* %x @@ -49,11 +49,11 @@ define void @extract_v2i8_v8i8_6(<8 x i8>* %x, <2 x i8>* %y) { ; CHECK-LABEL: extract_v2i8_v8i8_6: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 6 -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vse8.v v8, (a1) ; CHECK-NEXT: ret %a = load <8 x i8>, <8 x i8>* %x @@ -65,17 +65,17 @@ define void @extract_v2i32_v8i32_0(<8 x i32>* %x, <2 x i32>* %y) { ; LMULMAX2-LABEL: extract_v2i32_v8i32_0: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vle32.v v8, (a0) -; LMULMAX2-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX2-NEXT: vse32.v v8, (a1) ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: extract_v2i32_v8i32_0: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vle32.v v8, (a0) -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vse32.v v8, (a1) ; LMULMAX1-NEXT: ret %a = load <8 x i32>, <8 x i32>* %x @@ -87,21 +87,21 @@ define void @extract_v2i32_v8i32_2(<8 x i32>* %x, <2 x i32>* %y) { ; LMULMAX2-LABEL: extract_v2i32_v8i32_2: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vle32.v v8, (a0) -; LMULMAX2-NEXT: vsetivli zero, 2, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 2, e32, m2, ta, ma ; LMULMAX2-NEXT: vslidedown.vi v8, v8, 2 -; LMULMAX2-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX2-NEXT: vse32.v v8, (a1) ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: extract_v2i32_v8i32_2: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vle32.v v8, (a0) -; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v8, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vse32.v v8, (a1) ; LMULMAX1-NEXT: ret %a = load <8 x i32>, <8 x i32>* %x @@ -113,22 +113,22 @@ define void @extract_v2i32_v8i32_6(<8 x i32>* %x, <2 x i32>* %y) { ; LMULMAX2-LABEL: extract_v2i32_v8i32_6: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vle32.v v8, (a0) -; LMULMAX2-NEXT: vsetivli zero, 2, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 2, e32, m2, ta, ma ; LMULMAX2-NEXT: vslidedown.vi v8, v8, 6 -; LMULMAX2-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX2-NEXT: vse32.v v8, (a1) ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: extract_v2i32_v8i32_6: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: addi a0, a0, 16 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vle32.v v8, (a0) -; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v8, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vse32.v v8, (a1) ; LMULMAX1-NEXT: ret %a = load <8 x i32>, <8 x i32>* %x @@ -140,7 +140,7 @@ define void @extract_v2i32_nxv16i32_0( %x, <2 x i32>* %y) { ; CHECK-LABEL: extract_v2i32_nxv16i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret %c = call <2 x i32> @llvm.vector.extract.v2i32.nxv16i32( %x, i64 0) @@ -151,9 +151,9 @@ define void @extract_v2i32_nxv16i32_8( %x, <2 x i32>* %y) { ; CHECK-LABEL: extract_v2i32_nxv16i32_8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 6 -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret %c = call <2 x i32> @llvm.vector.extract.v2i32.nxv16i32( %x, i64 6) @@ -164,7 +164,7 @@ define void @extract_v2i8_nxv2i8_0( %x, <2 x i8>* %y) { ; CHECK-LABEL: extract_v2i8_nxv2i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %c = call <2 x i8> @llvm.vector.extract.v2i8.nxv2i8( %x, i64 0) @@ -175,9 +175,9 @@ define void @extract_v2i8_nxv2i8_2( %x, <2 x i8>* %y) { ; CHECK-LABEL: extract_v2i8_nxv2i8_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %c = call <2 x i8> @llvm.vector.extract.v2i8.nxv2i8( %x, i64 2) @@ -188,19 +188,19 @@ define void @extract_v8i32_nxv16i32_8( %x, <8 x i32>* %y) { ; LMULMAX2-LABEL: extract_v8i32_nxv16i32_8: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m8, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m8, ta, ma ; LMULMAX2-NEXT: vslidedown.vi v8, v8, 8 -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vse32.v v8, (a0) ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: extract_v8i32_nxv16i32_8: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m8, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m8, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v16, v8, 8 ; LMULMAX1-NEXT: vslidedown.vi v8, v8, 12 ; LMULMAX1-NEXT: addi a1, a0, 16 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vse32.v v8, (a1) ; LMULMAX1-NEXT: vse32.v v16, (a0) ; LMULMAX1-NEXT: ret @@ -213,17 +213,17 @@ ; LMULMAX2-LABEL: extract_v8i1_v64i1_0: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: li a2, 32 -; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; LMULMAX2-NEXT: vlm.v v8, (a0) -; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; LMULMAX2-NEXT: vsm.v v8, (a1) ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: extract_v8i1_v64i1_0: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-NEXT: vlm.v v8, (a0) -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; LMULMAX1-NEXT: vsm.v v8, (a1) ; LMULMAX1-NEXT: ret %a = load <64 x i1>, <64 x i1>* %x @@ -236,21 +236,21 @@ ; LMULMAX2-LABEL: extract_v8i1_v64i1_8: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: li a2, 32 -; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; LMULMAX2-NEXT: vlm.v v8, (a0) -; LMULMAX2-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; LMULMAX2-NEXT: vslidedown.vi v8, v8, 1 -; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; LMULMAX2-NEXT: vsm.v v8, (a1) ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: extract_v8i1_v64i1_8: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-NEXT: vlm.v v8, (a0) -; LMULMAX1-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v8, 1 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; LMULMAX1-NEXT: vsm.v v8, (a1) ; LMULMAX1-NEXT: ret %a = load <64 x i1>, <64 x i1>* %x @@ -264,20 +264,20 @@ ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: addi a0, a0, 4 ; LMULMAX2-NEXT: li a2, 32 -; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; LMULMAX2-NEXT: vlm.v v8, (a0) -; LMULMAX2-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; LMULMAX2-NEXT: vslidedown.vi v8, v8, 2 -; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; LMULMAX2-NEXT: vsm.v v8, (a1) ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: extract_v8i1_v64i1_48: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: addi a0, a0, 6 -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-NEXT: vlm.v v8, (a0) -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; LMULMAX1-NEXT: vsm.v v8, (a1) ; LMULMAX1-NEXT: ret %a = load <64 x i1>, <64 x i1>* %x @@ -289,7 +289,7 @@ define void @extract_v8i1_nxv2i1_0( %x, <8 x i1>* %y) { ; CHECK-LABEL: extract_v8i1_nxv2i1_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vsm.v v0, (a0) ; CHECK-NEXT: ret %c = call <8 x i1> @llvm.vector.extract.v8i1.nxv2i1( %x, i64 0) @@ -300,7 +300,7 @@ define void @extract_v8i1_nxv64i1_0( %x, <8 x i1>* %y) { ; CHECK-LABEL: extract_v8i1_nxv64i1_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vsm.v v0, (a0) ; CHECK-NEXT: ret %c = call <8 x i1> @llvm.vector.extract.v8i1.nxv64i1( %x, i64 0) @@ -311,9 +311,9 @@ define void @extract_v8i1_nxv64i1_8( %x, <8 x i1>* %y) { ; CHECK-LABEL: extract_v8i1_nxv64i1_8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v0, 1 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vsm.v v8, (a0) ; CHECK-NEXT: ret %c = call <8 x i1> @llvm.vector.extract.v8i1.nxv64i1( %x, i64 8) @@ -324,9 +324,9 @@ define void @extract_v8i1_nxv64i1_48( %x, <8 x i1>* %y) { ; CHECK-LABEL: extract_v8i1_nxv64i1_48: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v0, 6 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vsm.v v8, (a0) ; CHECK-NEXT: ret %c = call <8 x i1> @llvm.vector.extract.v8i1.nxv64i1( %x, i64 48) @@ -339,32 +339,32 @@ ; LMULMAX2-LABEL: extract_v2i1_v64i1_0: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: li a2, 32 -; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; LMULMAX2-NEXT: vlm.v v0, (a0) -; LMULMAX2-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; LMULMAX2-NEXT: vmv.v.i v8, 0 ; LMULMAX2-NEXT: vmerge.vim v8, v8, 1, v0 -; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; LMULMAX2-NEXT: vmv.v.i v9, 0 -; LMULMAX2-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; LMULMAX2-NEXT: vsetivli zero, 2, e8, mf2, tu, ma ; LMULMAX2-NEXT: vslideup.vi v9, v8, 0 -; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; LMULMAX2-NEXT: vmsne.vi v8, v9, 0 ; LMULMAX2-NEXT: vsm.v v8, (a1) ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: extract_v2i1_v64i1_0: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-NEXT: vlm.v v0, (a0) -; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; LMULMAX1-NEXT: vmv.v.i v8, 0 ; LMULMAX1-NEXT: vmerge.vim v8, v8, 1, v0 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; LMULMAX1-NEXT: vmv.v.i v9, 0 -; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf2, tu, ma ; LMULMAX1-NEXT: vslideup.vi v9, v8, 0 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; LMULMAX1-NEXT: vmsne.vi v8, v9, 0 ; LMULMAX1-NEXT: vsm.v v8, (a1) ; LMULMAX1-NEXT: ret @@ -378,42 +378,42 @@ ; LMULMAX2-LABEL: extract_v2i1_v64i1_2: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: li a2, 32 -; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; LMULMAX2-NEXT: vlm.v v0, (a0) ; LMULMAX2-NEXT: vmv.v.i v8, 0 ; LMULMAX2-NEXT: vmerge.vim v8, v8, 1, v0 -; LMULMAX2-NEXT: vsetivli zero, 2, e8, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 2, e8, m2, ta, ma ; LMULMAX2-NEXT: vslidedown.vi v8, v8, 2 -; LMULMAX2-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; LMULMAX2-NEXT: vmsne.vi v0, v8, 0 ; LMULMAX2-NEXT: vmv.v.i v8, 0 ; LMULMAX2-NEXT: vmerge.vim v8, v8, 1, v0 -; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; LMULMAX2-NEXT: vmv.v.i v9, 0 -; LMULMAX2-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; LMULMAX2-NEXT: vsetivli zero, 2, e8, mf2, tu, ma ; LMULMAX2-NEXT: vslideup.vi v9, v8, 0 -; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; LMULMAX2-NEXT: vmsne.vi v8, v9, 0 ; LMULMAX2-NEXT: vsm.v v8, (a1) ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: extract_v2i1_v64i1_2: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-NEXT: vlm.v v0, (a0) ; LMULMAX1-NEXT: vmv.v.i v8, 0 ; LMULMAX1-NEXT: vmerge.vim v8, v8, 1, v0 -; LMULMAX1-NEXT: vsetivli zero, 2, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e8, m1, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v8, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; LMULMAX1-NEXT: vmsne.vi v0, v8, 0 ; LMULMAX1-NEXT: vmv.v.i v8, 0 ; LMULMAX1-NEXT: vmerge.vim v8, v8, 1, v0 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; LMULMAX1-NEXT: vmv.v.i v9, 0 -; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf2, tu, ma ; LMULMAX1-NEXT: vslideup.vi v9, v8, 0 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; LMULMAX1-NEXT: vmsne.vi v8, v9, 0 ; LMULMAX1-NEXT: vsm.v v8, (a1) ; LMULMAX1-NEXT: ret @@ -428,21 +428,21 @@ ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: addi a0, a0, 4 ; LMULMAX2-NEXT: li a2, 32 -; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; LMULMAX2-NEXT: vlm.v v0, (a0) ; LMULMAX2-NEXT: vmv.v.i v8, 0 ; LMULMAX2-NEXT: vmerge.vim v8, v8, 1, v0 -; LMULMAX2-NEXT: vsetivli zero, 2, e8, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 2, e8, m2, ta, ma ; LMULMAX2-NEXT: vslidedown.vi v8, v8, 10 -; LMULMAX2-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; LMULMAX2-NEXT: vmsne.vi v0, v8, 0 ; LMULMAX2-NEXT: vmv.v.i v8, 0 ; LMULMAX2-NEXT: vmerge.vim v8, v8, 1, v0 -; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; LMULMAX2-NEXT: vmv.v.i v9, 0 -; LMULMAX2-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; LMULMAX2-NEXT: vsetivli zero, 2, e8, mf2, tu, ma ; LMULMAX2-NEXT: vslideup.vi v9, v8, 0 -; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; LMULMAX2-NEXT: vmsne.vi v8, v9, 0 ; LMULMAX2-NEXT: vsm.v v8, (a1) ; LMULMAX2-NEXT: ret @@ -450,21 +450,21 @@ ; LMULMAX1-LABEL: extract_v2i1_v64i1_42: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: addi a0, a0, 4 -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-NEXT: vlm.v v0, (a0) ; LMULMAX1-NEXT: vmv.v.i v8, 0 ; LMULMAX1-NEXT: vmerge.vim v8, v8, 1, v0 -; LMULMAX1-NEXT: vsetivli zero, 2, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e8, m1, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v8, v8, 10 -; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; LMULMAX1-NEXT: vmsne.vi v0, v8, 0 ; LMULMAX1-NEXT: vmv.v.i v8, 0 ; LMULMAX1-NEXT: vmerge.vim v8, v8, 1, v0 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; LMULMAX1-NEXT: vmv.v.i v9, 0 -; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf2, tu, ma ; LMULMAX1-NEXT: vslideup.vi v9, v8, 0 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; LMULMAX1-NEXT: vmsne.vi v8, v9, 0 ; LMULMAX1-NEXT: vsm.v v8, (a1) ; LMULMAX1-NEXT: ret @@ -477,14 +477,14 @@ define void @extract_v2i1_nxv2i1_0( %x, <2 x i1>* %y) { ; CHECK-LABEL: extract_v2i1_nxv2i1_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a0) ; CHECK-NEXT: ret @@ -496,20 +496,20 @@ define void @extract_v2i1_nxv2i1_2( %x, <2 x i1>* %y) { ; CHECK-LABEL: extract_v2i1_nxv2i1_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a0) ; CHECK-NEXT: ret @@ -521,14 +521,14 @@ define void @extract_v2i1_nxv64i1_0( %x, <2 x i1>* %y) { ; CHECK-LABEL: extract_v2i1_nxv64i1_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a0) ; CHECK-NEXT: ret @@ -540,20 +540,20 @@ define void @extract_v2i1_nxv64i1_2( %x, <2 x i1>* %y) { ; CHECK-LABEL: extract_v2i1_nxv64i1_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 2, e8, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a0) ; CHECK-NEXT: ret @@ -565,21 +565,21 @@ define void @extract_v2i1_nxv64i1_42( %x, <2 x i1>* %y) { ; CHECK-LABEL: extract_v2i1_nxv64i1_42: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: li a1, 42 -; CHECK-NEXT: vsetivli zero, 2, e8, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a1 -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a0) ; CHECK-NEXT: ret @@ -591,20 +591,20 @@ define void @extract_v2i1_nxv32i1_26( %x, <2 x i1>* %y) { ; CHECK-LABEL: extract_v2i1_nxv32i1_26: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 2, e8, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, m4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 26 -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a0) ; CHECK-NEXT: ret @@ -616,9 +616,9 @@ define void @extract_v8i1_nxv32i1_16( %x, <8 x i1>* %y) { ; CHECK-LABEL: extract_v8i1_nxv32i1_16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v0, 2 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vsm.v v8, (a0) ; CHECK-NEXT: ret %c = call <8 x i1> @llvm.vector.extract.v8i1.nxv32i1( %x, i64 16) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll @@ -7,9 +7,9 @@ define i8 @extractelt_v16i8(<16 x i8>* %x) nounwind { ; CHECK-LABEL: extractelt_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 7 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -21,9 +21,9 @@ define i16 @extractelt_v8i16(<8 x i16>* %x) nounwind { ; CHECK-LABEL: extractelt_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 7 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -35,9 +35,9 @@ define i32 @extractelt_v4i32(<4 x i32>* %x) nounwind { ; CHECK-LABEL: extractelt_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -49,10 +49,10 @@ define i64 @extractelt_v2i64(<2 x i64>* %x) nounwind { ; RV32-LABEL: extractelt_v2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: li a0, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v9, v8, a0 ; RV32-NEXT: vmv.x.s a1, v9 ; RV32-NEXT: vmv.x.s a0, v8 @@ -60,7 +60,7 @@ ; ; RV64-LABEL: extractelt_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -72,9 +72,9 @@ define half @extractelt_v8f16(<8 x half>* %x) nounwind { ; CHECK-LABEL: extractelt_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 7 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -86,9 +86,9 @@ define float @extractelt_v4f32(<4 x float>* %x) nounwind { ; CHECK-LABEL: extractelt_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -100,7 +100,7 @@ define double @extractelt_v2f64(<2 x double>* %x) nounwind { ; CHECK-LABEL: extractelt_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -113,9 +113,9 @@ ; CHECK-LABEL: extractelt_v32i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e8, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 7 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -127,9 +127,9 @@ define i16 @extractelt_v16i16(<16 x i16>* %x) nounwind { ; CHECK-LABEL: extractelt_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 7 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -141,9 +141,9 @@ define i32 @extractelt_v8i32(<8 x i32>* %x) nounwind { ; CHECK-LABEL: extractelt_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 6 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -155,9 +155,9 @@ define i64 @extractelt_v4i64(<4 x i64>* %x) nounwind { ; RV32-LABEL: extractelt_v4i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma ; RV32-NEXT: vslidedown.vi v8, v8, 3 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 @@ -167,9 +167,9 @@ ; ; RV64-LABEL: extractelt_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma ; RV64-NEXT: vslidedown.vi v8, v8, 3 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -181,9 +181,9 @@ define half @extractelt_v16f16(<16 x half>* %x) nounwind { ; CHECK-LABEL: extractelt_v16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 7 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -195,9 +195,9 @@ define float @extractelt_v8f32(<8 x float>* %x) nounwind { ; CHECK-LABEL: extractelt_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -209,7 +209,7 @@ define double @extractelt_v4f64(<4 x double>* %x) nounwind { ; CHECK-LABEL: extractelt_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -225,9 +225,9 @@ define i64 @extractelt_v3i64(<3 x i64>* %x) nounwind { ; RV32-LABEL: extractelt_v3i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vle32.v v8, (a0) -; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32-NEXT: vslidedown.vi v10, v8, 4 ; RV32-NEXT: vmv.x.s a0, v10 ; RV32-NEXT: vslidedown.vi v8, v8, 5 @@ -236,9 +236,9 @@ ; ; RV64-LABEL: extractelt_v3i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma ; RV64-NEXT: vslidedown.vi v8, v8, 2 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -250,9 +250,9 @@ define i8 @extractelt_v16i8_idx(<16 x i8>* %x, i32 signext %idx) nounwind { ; CHECK-LABEL: extractelt_v16i8_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -264,9 +264,9 @@ define i16 @extractelt_v8i16_idx(<8 x i16>* %x, i32 signext %idx) nounwind { ; CHECK-LABEL: extractelt_v8i16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -278,10 +278,10 @@ define i32 @extractelt_v4i32_idx(<4 x i32>* %x, i32 signext %idx) nounwind { ; CHECK-LABEL: extractelt_v4i32_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vadd.vv v8, v8, v8 -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -294,10 +294,10 @@ define i64 @extractelt_v2i64_idx(<2 x i64>* %x, i32 signext %idx) nounwind { ; RV32-LABEL: extractelt_v2i64_idx: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vadd.vv v8, v8, v8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vslidedown.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 @@ -307,10 +307,10 @@ ; ; RV64-LABEL: extractelt_v2i64_idx: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vadd.vv v8, v8, v8 -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vslidedown.vx v8, v8, a1 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -323,10 +323,10 @@ define half @extractelt_v8f16_idx(<8 x half>* %x, i32 signext %idx) nounwind { ; CHECK-LABEL: extractelt_v8f16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfadd.vv v8, v8, v8 -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a1 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -339,10 +339,10 @@ define float @extractelt_v4f32_idx(<4 x float>* %x, i32 signext %idx) nounwind { ; CHECK-LABEL: extractelt_v4f32_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfadd.vv v8, v8, v8 -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a1 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -355,10 +355,10 @@ define double @extractelt_v2f64_idx(<2 x double>* %x, i32 signext %idx) nounwind { ; CHECK-LABEL: extractelt_v2f64_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfadd.vv v8, v8, v8 -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a1 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -372,9 +372,9 @@ ; CHECK-LABEL: extractelt_v32i8_idx: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e8, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -386,9 +386,9 @@ define i16 @extractelt_v16i16_idx(<16 x i16>* %x, i32 signext %idx) nounwind { ; CHECK-LABEL: extractelt_v16i16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -400,10 +400,10 @@ define i32 @extractelt_v8i32_idx(<8 x i32>* %x, i32 signext %idx) nounwind { ; CHECK-LABEL: extractelt_v8i32_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vadd.vv v8, v8, v8 -; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a1 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -416,10 +416,10 @@ define i64 @extractelt_v4i64_idx(<4 x i64>* %x, i32 signext %idx) nounwind { ; RV32-LABEL: extractelt_v4i64_idx: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vadd.vv v8, v8, v8 -; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma ; RV32-NEXT: vslidedown.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 @@ -429,10 +429,10 @@ ; ; RV64-LABEL: extractelt_v4i64_idx: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vadd.vv v8, v8, v8 -; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma ; RV64-NEXT: vslidedown.vx v8, v8, a1 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -445,10 +445,10 @@ define half @extractelt_v16f16_idx(<16 x half>* %x, i32 signext %idx) nounwind { ; CHECK-LABEL: extractelt_v16f16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfadd.vv v8, v8, v8 -; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a1 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -461,10 +461,10 @@ define float @extractelt_v8f32_idx(<8 x float>* %x, i32 signext %idx) nounwind { ; CHECK-LABEL: extractelt_v8f32_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfadd.vv v8, v8, v8 -; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a1 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -477,10 +477,10 @@ define double @extractelt_v4f64_idx(<4 x double>* %x, i32 signext %idx) nounwind { ; CHECK-LABEL: extractelt_v4f64_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfadd.vv v8, v8, v8 -; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a1 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -497,11 +497,11 @@ define i64 @extractelt_v3i64_idx(<3 x i64>* %x, i32 signext %idx) nounwind { ; RV32-LABEL: extractelt_v3i64_idx: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vadd.vv v8, v8, v8 ; RV32-NEXT: add a1, a1, a1 -; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32-NEXT: vslidedown.vx v10, v8, a1 ; RV32-NEXT: vmv.x.s a0, v10 ; RV32-NEXT: addi a1, a1, 1 @@ -511,10 +511,10 @@ ; ; RV64-LABEL: extractelt_v3i64_idx: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vadd.vv v8, v8, v8 -; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma ; RV64-NEXT: vslidedown.vx v8, v8, a1 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -527,9 +527,9 @@ define void @store_extractelt_v16i8(<16 x i8>* %x, i8* %p) nounwind { ; CHECK-LABEL: store_extractelt_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 7 ; CHECK-NEXT: vse8.v v8, (a1) ; CHECK-NEXT: ret @@ -542,9 +542,9 @@ define void @store_extractelt_v8i16(<8 x i16>* %x, i16* %p) nounwind { ; CHECK-LABEL: store_extractelt_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 7 ; CHECK-NEXT: vse16.v v8, (a1) ; CHECK-NEXT: ret @@ -557,9 +557,9 @@ define void @store_extractelt_v4i32(<4 x i32>* %x, i32* %p) nounwind { ; CHECK-LABEL: store_extractelt_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vse32.v v8, (a1) ; CHECK-NEXT: ret @@ -573,9 +573,9 @@ define void @store_extractelt_v2i64(<2 x i64>* %x, i64* %p) nounwind { ; RV32-LABEL: store_extractelt_v2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vslidedown.vi v8, v8, 1 ; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsrl.vx v9, v8, a0 @@ -587,9 +587,9 @@ ; ; RV64-LABEL: store_extractelt_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vslidedown.vi v8, v8, 1 ; RV64-NEXT: vse64.v v8, (a1) ; RV64-NEXT: ret @@ -602,9 +602,9 @@ define void @store_extractelt_v2f64(<2 x double>* %x, double* %p) nounwind { ; CHECK-LABEL: store_extractelt_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 1 ; CHECK-NEXT: vse64.v v8, (a1) ; CHECK-NEXT: ret @@ -617,7 +617,7 @@ define i32 @extractelt_add_v4i32(<4 x i32> %x) { ; RV32-LABEL: extractelt_add_v4i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32-NEXT: vslidedown.vi v8, v8, 2 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: addi a0, a0, 13 @@ -625,9 +625,9 @@ ; ; RV64-LABEL: extractelt_add_v4i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64-NEXT: vadd.vi v8, v8, 13 -; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-NEXT: vslidedown.vi v8, v8, 2 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -639,7 +639,7 @@ define i32 @extractelt_sub_v4i32(<4 x i32> %x) { ; RV32-LABEL: extractelt_sub_v4i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32-NEXT: vslidedown.vi v8, v8, 2 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 13 @@ -648,9 +648,9 @@ ; ; RV64-LABEL: extractelt_sub_v4i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64-NEXT: vrsub.vi v8, v8, 13 -; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-NEXT: vslidedown.vi v8, v8, 2 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -663,16 +663,16 @@ ; RV32NOM-LABEL: extractelt_mul_v4i32: ; RV32NOM: # %bb.0: ; RV32NOM-NEXT: li a0, 13 -; RV32NOM-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV32NOM-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32NOM-NEXT: vmul.vx v8, v8, a0 -; RV32NOM-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32NOM-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32NOM-NEXT: vslidedown.vi v8, v8, 2 ; RV32NOM-NEXT: vmv.x.s a0, v8 ; RV32NOM-NEXT: ret ; ; RV32M-LABEL: extractelt_mul_v4i32: ; RV32M: # %bb.0: -; RV32M-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32M-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32M-NEXT: vslidedown.vi v8, v8, 2 ; RV32M-NEXT: vmv.x.s a0, v8 ; RV32M-NEXT: li a1, 13 @@ -682,9 +682,9 @@ ; RV64-LABEL: extractelt_mul_v4i32: ; RV64: # %bb.0: ; RV64-NEXT: li a0, 13 -; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64-NEXT: vmul.vx v8, v8, a0 -; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-NEXT: vslidedown.vi v8, v8, 2 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -697,12 +697,12 @@ ; RV32NOM-LABEL: extractelt_sdiv_v4i32: ; RV32NOM: # %bb.0: ; RV32NOM-NEXT: li a0, -1 -; RV32NOM-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV32NOM-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32NOM-NEXT: vmv.s.x v9, a0 ; RV32NOM-NEXT: vmv.v.i v10, 0 -; RV32NOM-NEXT: vsetvli zero, zero, e32, m1, tu, mu +; RV32NOM-NEXT: vsetvli zero, zero, e32, m1, tu, ma ; RV32NOM-NEXT: vslideup.vi v10, v9, 3 -; RV32NOM-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV32NOM-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV32NOM-NEXT: lui a0, %hi(.LCPI38_0) ; RV32NOM-NEXT: addi a0, a0, %lo(.LCPI38_0) ; RV32NOM-NEXT: vle32.v v9, (a0) @@ -715,14 +715,14 @@ ; RV32NOM-NEXT: vsra.vv v9, v8, v11 ; RV32NOM-NEXT: vsrl.vi v8, v8, 31 ; RV32NOM-NEXT: vadd.vv v8, v9, v8 -; RV32NOM-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32NOM-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32NOM-NEXT: vslidedown.vi v8, v8, 2 ; RV32NOM-NEXT: vmv.x.s a0, v8 ; RV32NOM-NEXT: ret ; ; RV32M-LABEL: extractelt_sdiv_v4i32: ; RV32M: # %bb.0: -; RV32M-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32M-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32M-NEXT: vslidedown.vi v8, v8, 2 ; RV32M-NEXT: vmv.x.s a0, v8 ; RV32M-NEXT: lui a1, 322639 @@ -736,12 +736,12 @@ ; RV64-LABEL: extractelt_sdiv_v4i32: ; RV64: # %bb.0: ; RV64-NEXT: li a0, -1 -; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vmv.v.i v10, 0 -; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu +; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, ma ; RV64-NEXT: vslideup.vi v10, v9, 3 -; RV64-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64-NEXT: lui a0, %hi(.LCPI38_0) ; RV64-NEXT: addi a0, a0, %lo(.LCPI38_0) ; RV64-NEXT: vle32.v v9, (a0) @@ -754,7 +754,7 @@ ; RV64-NEXT: vsra.vv v8, v8, v11 ; RV64-NEXT: vsrl.vi v9, v8, 31 ; RV64-NEXT: vadd.vv v8, v8, v9 -; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-NEXT: vslidedown.vi v8, v8, 2 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -766,12 +766,12 @@ define i32 @extractelt_udiv_v4i32(<4 x i32> %x) { ; RV32NOM-LABEL: extractelt_udiv_v4i32: ; RV32NOM: # %bb.0: -; RV32NOM-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV32NOM-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32NOM-NEXT: vsrl.vi v8, v8, 0 ; RV32NOM-NEXT: lui a0, 322639 ; RV32NOM-NEXT: addi a0, a0, -945 ; RV32NOM-NEXT: vmulhu.vx v8, v8, a0 -; RV32NOM-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32NOM-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32NOM-NEXT: vslidedown.vi v8, v8, 2 ; RV32NOM-NEXT: vmv.x.s a0, v8 ; RV32NOM-NEXT: srli a0, a0, 2 @@ -779,7 +779,7 @@ ; ; RV32M-LABEL: extractelt_udiv_v4i32: ; RV32M: # %bb.0: -; RV32M-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32M-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32M-NEXT: vslidedown.vi v8, v8, 2 ; RV32M-NEXT: vmv.x.s a0, v8 ; RV32M-NEXT: lui a1, 322639 @@ -790,13 +790,13 @@ ; ; RV64-LABEL: extractelt_udiv_v4i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64-NEXT: vsrl.vi v8, v8, 0 ; RV64-NEXT: lui a0, 322639 ; RV64-NEXT: addiw a0, a0, -945 ; RV64-NEXT: vmulhu.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 2 -; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-NEXT: vslidedown.vi v8, v8, 2 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -810,7 +810,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI40_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI40_0)(a0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s ft1, v8 ; CHECK-NEXT: fadd.s fa0, ft1, ft0 @@ -825,7 +825,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI41_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI41_0)(a0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s ft1, v8 ; CHECK-NEXT: fsub.s fa0, ft0, ft1 @@ -840,7 +840,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI42_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI42_0)(a0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s ft1, v8 ; CHECK-NEXT: fmul.s fa0, ft1, ft0 @@ -855,7 +855,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI43_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI43_0)(a0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 ; CHECK-NEXT: vfmv.f.s ft1, v8 ; CHECK-NEXT: fdiv.s fa0, ft1, ft0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll @@ -29,7 +29,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI1_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI1_0)(a1) -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfabs.v v9, v8, v0.t @@ -71,7 +71,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI3_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI3_0)(a1) -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfabs.v v9, v8, v0.t @@ -113,7 +113,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI5_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI5_0)(a1) -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfabs.v v9, v8, v0.t @@ -157,7 +157,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI7_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI7_0)(a1) -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmset.m v10 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmv1r.v v0, v10 @@ -201,7 +201,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI9_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI9_0)(a1) -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfabs.v v9, v8, v0.t @@ -243,7 +243,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI11_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI11_0)(a1) -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfabs.v v9, v8, v0.t @@ -287,7 +287,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI13_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI13_0)(a1) -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmset.m v10 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmv1r.v v0, v10 @@ -333,7 +333,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI15_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI15_0)(a1) -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmset.m v12 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmv1r.v v0, v12 @@ -377,7 +377,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI17_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI17_0)(a1) -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfabs.v v9, v8, v0.t @@ -421,7 +421,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI19_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI19_0)(a1) -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmset.m v10 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmv1r.v v0, v10 @@ -467,7 +467,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI21_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI21_0)(a1) -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmset.m v12 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmv1r.v v0, v12 @@ -513,7 +513,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI23_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI23_0)(a1) -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmset.m v16 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v16 @@ -559,7 +559,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI25_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI25_0)(a1) -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmset.m v16 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v16 @@ -585,7 +585,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v1, v0 ; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: addi a2, a0, -16 ; CHECK-NEXT: vslidedown.vi v2, v0, 2 ; CHECK-NEXT: bltu a0, a2, .LBB26_2 @@ -645,7 +645,7 @@ ; CHECK-LABEL: vp_floor_v32f64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: addi a2, a0, -16 ; CHECK-NEXT: vmset.m v1 ; CHECK-NEXT: bltu a0, a2, .LBB27_2 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-bitcast.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-bitcast.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-bitcast.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-bitcast.ll @@ -9,7 +9,7 @@ define i16 @bitcast_v1f16_i16(<1 x half> %a) { ; CHECK-LABEL: bitcast_v1f16_i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %b = bitcast <1 x half> %a to i16 @@ -19,7 +19,7 @@ define half @bitcast_v1f16_f16(<1 x half> %a) { ; CHECK-LABEL: bitcast_v1f16_f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %b = bitcast <1 x half> %a to half @@ -29,7 +29,7 @@ define i32 @bitcast_v2f16_i32(<2 x half> %a) { ; CHECK-LABEL: bitcast_v2f16_i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %b = bitcast <2 x half> %a to i32 @@ -39,7 +39,7 @@ define i32 @bitcast_v1f32_i32(<1 x float> %a) { ; CHECK-LABEL: bitcast_v1f32_i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %b = bitcast <1 x float> %a to i32 @@ -49,7 +49,7 @@ define float @bitcast_v2f16_f32(<2 x half> %a) { ; CHECK-LABEL: bitcast_v2f16_f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %b = bitcast <2 x half> %a to float @@ -59,7 +59,7 @@ define float @bitcast_v1f32_f32(<1 x float> %a) { ; CHECK-LABEL: bitcast_v1f32_f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %b = bitcast <1 x float> %a to float @@ -70,7 +70,7 @@ ; RV32-FP-LABEL: bitcast_v4f16_i64: ; RV32-FP: # %bb.0: ; RV32-FP-NEXT: li a0, 32 -; RV32-FP-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-FP-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-FP-NEXT: vsrl.vx v9, v8, a0 ; RV32-FP-NEXT: vmv.x.s a1, v9 ; RV32-FP-NEXT: vmv.x.s a0, v8 @@ -78,7 +78,7 @@ ; ; RV64-FP-LABEL: bitcast_v4f16_i64: ; RV64-FP: # %bb.0: -; RV64-FP-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; RV64-FP-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; RV64-FP-NEXT: vmv.x.s a0, v8 ; RV64-FP-NEXT: ret %b = bitcast <4 x half> %a to i64 @@ -89,7 +89,7 @@ ; RV32-FP-LABEL: bitcast_v2f32_i64: ; RV32-FP: # %bb.0: ; RV32-FP-NEXT: li a0, 32 -; RV32-FP-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-FP-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-FP-NEXT: vsrl.vx v9, v8, a0 ; RV32-FP-NEXT: vmv.x.s a1, v9 ; RV32-FP-NEXT: vmv.x.s a0, v8 @@ -97,7 +97,7 @@ ; ; RV64-FP-LABEL: bitcast_v2f32_i64: ; RV64-FP: # %bb.0: -; RV64-FP-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; RV64-FP-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; RV64-FP-NEXT: vmv.x.s a0, v8 ; RV64-FP-NEXT: ret %b = bitcast <2 x float> %a to i64 @@ -108,7 +108,7 @@ ; RV32-FP-LABEL: bitcast_v1f64_i64: ; RV32-FP: # %bb.0: ; RV32-FP-NEXT: li a0, 32 -; RV32-FP-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-FP-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-FP-NEXT: vsrl.vx v9, v8, a0 ; RV32-FP-NEXT: vmv.x.s a1, v9 ; RV32-FP-NEXT: vmv.x.s a0, v8 @@ -116,7 +116,7 @@ ; ; RV64-FP-LABEL: bitcast_v1f64_i64: ; RV64-FP: # %bb.0: -; RV64-FP-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; RV64-FP-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; RV64-FP-NEXT: vmv.x.s a0, v8 ; RV64-FP-NEXT: ret %b = bitcast <1 x double> %a to i64 @@ -126,7 +126,7 @@ define double @bitcast_v4f16_f64(<4 x half> %a) { ; CHECK-LABEL: bitcast_v4f16_f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %b = bitcast <4 x half> %a to double @@ -136,7 +136,7 @@ define double @bitcast_v2f32_f64(<2 x float> %a) { ; CHECK-LABEL: bitcast_v2f32_f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %b = bitcast <2 x float> %a to double @@ -146,7 +146,7 @@ define double @bitcast_v1f64_f64(<1 x double> %a) { ; CHECK-LABEL: bitcast_v1f64_f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %b = bitcast <1 x double> %a to double @@ -156,7 +156,7 @@ define <1 x half> @bitcast_i16_v1f16(i16 %a) { ; CHECK-LABEL: bitcast_i16_v1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: ret %b = bitcast i16 %a to <1 x half> @@ -166,13 +166,13 @@ define <2 x half> @bitcast_i32_v2f16(i32 %a) { ; RV32-FP-LABEL: bitcast_i32_v2f16: ; RV32-FP: # %bb.0: -; RV32-FP-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; RV32-FP-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV32-FP-NEXT: vmv.s.x v8, a0 ; RV32-FP-NEXT: ret ; ; RV64-FP-LABEL: bitcast_i32_v2f16: ; RV64-FP: # %bb.0: -; RV64-FP-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; RV64-FP-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV64-FP-NEXT: vmv.v.x v8, a0 ; RV64-FP-NEXT: ret %b = bitcast i32 %a to <2 x half> @@ -182,13 +182,13 @@ define <1 x float> @bitcast_i32_v1f32(i32 %a) { ; RV32-FP-LABEL: bitcast_i32_v1f32: ; RV32-FP: # %bb.0: -; RV32-FP-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; RV32-FP-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV32-FP-NEXT: vmv.s.x v8, a0 ; RV32-FP-NEXT: ret ; ; RV64-FP-LABEL: bitcast_i32_v1f32: ; RV64-FP: # %bb.0: -; RV64-FP-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; RV64-FP-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV64-FP-NEXT: vmv.v.x v8, a0 ; RV64-FP-NEXT: ret %b = bitcast i32 %a to <1 x float> @@ -198,17 +198,17 @@ define <4 x half> @bitcast_i64_v4f16(i64 %a) { ; RV32-FP-LABEL: bitcast_i64_v4f16: ; RV32-FP: # %bb.0: -; RV32-FP-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV32-FP-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV32-FP-NEXT: vmv.v.i v8, 0 ; RV32-FP-NEXT: vslide1up.vx v9, v8, a1 ; RV32-FP-NEXT: vslide1up.vx v10, v9, a0 -; RV32-FP-NEXT: vsetivli zero, 1, e64, m1, tu, mu +; RV32-FP-NEXT: vsetivli zero, 1, e64, m1, tu, ma ; RV32-FP-NEXT: vslideup.vi v8, v10, 0 ; RV32-FP-NEXT: ret ; ; RV64-FP-LABEL: bitcast_i64_v4f16: ; RV64-FP: # %bb.0: -; RV64-FP-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-FP-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-FP-NEXT: vmv.s.x v8, a0 ; RV64-FP-NEXT: ret %b = bitcast i64 %a to <4 x half> @@ -218,17 +218,17 @@ define <2 x float> @bitcast_i64_v2f32(i64 %a) { ; RV32-FP-LABEL: bitcast_i64_v2f32: ; RV32-FP: # %bb.0: -; RV32-FP-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV32-FP-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV32-FP-NEXT: vmv.v.i v8, 0 ; RV32-FP-NEXT: vslide1up.vx v9, v8, a1 ; RV32-FP-NEXT: vslide1up.vx v10, v9, a0 -; RV32-FP-NEXT: vsetivli zero, 1, e64, m1, tu, mu +; RV32-FP-NEXT: vsetivli zero, 1, e64, m1, tu, ma ; RV32-FP-NEXT: vslideup.vi v8, v10, 0 ; RV32-FP-NEXT: ret ; ; RV64-FP-LABEL: bitcast_i64_v2f32: ; RV64-FP: # %bb.0: -; RV64-FP-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-FP-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-FP-NEXT: vmv.s.x v8, a0 ; RV64-FP-NEXT: ret %b = bitcast i64 %a to <2 x float> @@ -238,17 +238,17 @@ define <1 x double> @bitcast_i64_v1f64(i64 %a) { ; RV32-FP-LABEL: bitcast_i64_v1f64: ; RV32-FP: # %bb.0: -; RV32-FP-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV32-FP-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV32-FP-NEXT: vmv.v.i v8, 0 ; RV32-FP-NEXT: vslide1up.vx v9, v8, a1 ; RV32-FP-NEXT: vslide1up.vx v10, v9, a0 -; RV32-FP-NEXT: vsetivli zero, 1, e64, m1, tu, mu +; RV32-FP-NEXT: vsetivli zero, 1, e64, m1, tu, ma ; RV32-FP-NEXT: vslideup.vi v8, v10, 0 ; RV32-FP-NEXT: ret ; ; RV64-FP-LABEL: bitcast_i64_v1f64: ; RV64-FP: # %bb.0: -; RV64-FP-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-FP-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-FP-NEXT: vmv.s.x v8, a0 ; RV64-FP-NEXT: ret %b = bitcast i64 %a to <1 x double> @@ -258,7 +258,7 @@ define <1 x i16> @bitcast_f16_v1i16(half %a) { ; CHECK-LABEL: bitcast_f16_v1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret %b = bitcast half %a to <1 x i16> @@ -268,7 +268,7 @@ define <1 x half> @bitcast_f16_v1f16(half %a) { ; CHECK-LABEL: bitcast_f16_v1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret %b = bitcast half %a to <1 x half> @@ -278,7 +278,7 @@ define <2 x i16> @bitcast_f32_v2i16(float %a) { ; CHECK-LABEL: bitcast_f32_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret %b = bitcast float %a to <2 x i16> @@ -288,7 +288,7 @@ define <2 x half> @bitcast_f32_v2f16(float %a) { ; CHECK-LABEL: bitcast_f32_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret %b = bitcast float %a to <2 x half> @@ -298,7 +298,7 @@ define <1 x i32> @bitcast_f32_v1i32(float %a) { ; CHECK-LABEL: bitcast_f32_v1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret %b = bitcast float %a to <1 x i32> @@ -308,7 +308,7 @@ define <1 x float> @bitcast_f32_v1f32(float %a) { ; CHECK-LABEL: bitcast_f32_v1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret %b = bitcast float %a to <1 x float> @@ -318,7 +318,7 @@ define <4 x i16> @bitcast_f64_v4i16(double %a) { ; CHECK-LABEL: bitcast_f64_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret %b = bitcast double %a to <4 x i16> @@ -328,7 +328,7 @@ define <4 x half> @bitcast_f64_v4f16(double %a) { ; CHECK-LABEL: bitcast_f64_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret %b = bitcast double %a to <4 x half> @@ -338,7 +338,7 @@ define <2 x i32> @bitcast_f64_v2i32(double %a) { ; CHECK-LABEL: bitcast_f64_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret %b = bitcast double %a to <2 x i32> @@ -348,7 +348,7 @@ define <2 x float> @bitcast_f64_v2f32(double %a) { ; CHECK-LABEL: bitcast_f64_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret %b = bitcast double %a to <2 x float> @@ -358,7 +358,7 @@ define <1 x i64> @bitcast_f64_v1i64(double %a) { ; CHECK-LABEL: bitcast_f64_v1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret %b = bitcast double %a to <1 x i64> @@ -368,7 +368,7 @@ define <1 x double> @bitcast_f64_v1f64(double %a) { ; CHECK-LABEL: bitcast_f64_v1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret %b = bitcast double %a to <1 x double> diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll @@ -12,7 +12,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI0_0) ; CHECK-NEXT: addi a1, a1, %lo(.LCPI0_0) -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret @@ -36,7 +36,7 @@ ; LMULMAX1-LABEL: hang_when_merging_stores_after_legalization: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: li a0, 2 -; LMULMAX1-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; LMULMAX1-NEXT: vmv.s.x v0, a0 ; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX1-NEXT: vrgather.vi v12, v8, 0 @@ -71,7 +71,7 @@ define void @buildvec_dominant0_v2f32(<2 x float>* %x) { ; CHECK-LABEL: buildvec_dominant0_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vfcvt.f.x.v v8, v8 ; CHECK-NEXT: vse32.v v8, (a0) @@ -86,7 +86,7 @@ define void @buildvec_dominant1_v2f32(<2 x float>* %x) { ; CHECK-LABEL: buildvec_dominant1_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vadd.vi v8, v8, 1 ; CHECK-NEXT: vfcvt.f.x.v v8, v8 @@ -99,14 +99,14 @@ define void @buildvec_dominant0_v4f32(<4 x float>* %x) { ; CHECK-LABEL: buildvec_dominant0_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: lui a1, %hi(.LCPI4_0) ; CHECK-NEXT: addi a1, a1, %lo(.LCPI4_0) ; CHECK-NEXT: vlse32.v v8, (a1), zero ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetivli zero, 3, e32, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 3, e32, m1, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 2 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret store <4 x float> , <4 x float>* %x @@ -116,12 +116,12 @@ define void @buildvec_dominant1_v4f32(<4 x float>* %x, float %f) { ; CHECK-LABEL: buildvec_dominant1_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v8, zero ; CHECK-NEXT: vfmv.v.f v9, fa0 -; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 1 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v9, (a0) ; CHECK-NEXT: ret %v0 = insertelement <4 x float> poison, float %f, i32 0 @@ -137,12 +137,12 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI6_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI6_0)(a1) -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v8, ft0 ; CHECK-NEXT: vfmv.v.f v9, fa0 -; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 1 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v9, (a0) ; CHECK-NEXT: ret %v0 = insertelement <4 x float> poison, float %f, i32 0 @@ -157,11 +157,11 @@ ; RV32-LABEL: buildvec_merge0_v4f32: ; RV32: # %bb.0: ; RV32-NEXT: li a1, 6 -; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; RV32-NEXT: lui a2, %hi(.LCPI7_0) ; RV32-NEXT: flw ft0, %lo(.LCPI7_0)(a2) ; RV32-NEXT: vmv.s.x v0, a1 -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vfmv.v.f v8, fa0 ; RV32-NEXT: vfmerge.vfm v8, v8, ft0, v0 ; RV32-NEXT: vse32.v v8, (a0) @@ -172,9 +172,9 @@ ; RV64-NEXT: lui a1, %hi(.LCPI7_0) ; RV64-NEXT: flw ft0, %lo(.LCPI7_0)(a1) ; RV64-NEXT: li a1, 6 -; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; RV64-NEXT: vmv.s.x v0, a1 -; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64-NEXT: vfmv.v.f v8, fa0 ; RV64-NEXT: vfmerge.vfm v8, v8, ft0, v0 ; RV64-NEXT: vse32.v v8, (a0) @@ -190,7 +190,7 @@ define <4 x half> @splat_c3_v4f16(<4 x half> %v) { ; CHECK-LABEL: splat_c3_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vrgather.vi v9, v8, 3 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -203,7 +203,7 @@ define <4 x half> @splat_idx_v4f16(<4 x half> %v, i64 %idx) { ; CHECK-LABEL: splat_idx_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vrgather.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -216,14 +216,14 @@ define <8 x float> @splat_c5_v8f32(<8 x float> %v) { ; LMULMAX1-LABEL: splat_c5_v8f32: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vrgather.vi v8, v9, 1 ; LMULMAX1-NEXT: vmv.v.v v9, v8 ; LMULMAX1-NEXT: ret ; ; LMULMAX2-LABEL: splat_c5_v8f32: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vrgather.vi v10, v8, 5 ; LMULMAX2-NEXT: vmv.v.v v8, v10 ; LMULMAX2-NEXT: ret @@ -243,7 +243,7 @@ ; LMULMAX1-NEXT: mv a1, sp ; LMULMAX1-NEXT: add a0, a1, a0 ; LMULMAX1-NEXT: addi a2, sp, 16 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vse32.v v9, (a2) ; LMULMAX1-NEXT: vse32.v v8, (a1) ; LMULMAX1-NEXT: vlse32.v v8, (a0), zero @@ -253,7 +253,7 @@ ; ; LMULMAX2-LABEL: splat_idx_v8f32: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vrgather.vx v10, v8, a0 ; LMULMAX2-NEXT: vmv.v.v v8, v10 ; LMULMAX2-NEXT: ret @@ -269,7 +269,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a1, %hi(.LCPI12_0) ; RV32-NEXT: addi a1, a1, %lo(.LCPI12_0) -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vlse32.v v8, (a1), zero ; RV32-NEXT: li a1, 1024 ; RV32-NEXT: .LBB12_1: # =>This Inner Loop Header: Depth=1 @@ -284,7 +284,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a1, %hi(.LCPI12_0) ; RV64-NEXT: addi a1, a1, %lo(.LCPI12_0) -; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64-NEXT: vlse32.v v8, (a1), zero ; RV64-NEXT: li a1, 0 ; RV64-NEXT: li a2, 1024 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll @@ -7,7 +7,7 @@ define void @fpext_v2f16_v2f32(<2 x half>* %x, <2 x float>* %y) { ; CHECK-LABEL: fpext_v2f16_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfwcvt.f.f.v v9, v8 ; CHECK-NEXT: vse32.v v9, (a1) @@ -21,10 +21,10 @@ define void @fpext_v2f16_v2f64(<2 x half>* %x, <2 x double>* %y) { ; CHECK-LABEL: fpext_v2f16_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfwcvt.f.f.v v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v8, v9 ; CHECK-NEXT: vse64.v v8, (a1) ; CHECK-NEXT: ret @@ -37,7 +37,7 @@ define void @fpext_v8f16_v8f32(<8 x half>* %x, <8 x float>* %y) { ; LMULMAX8-LABEL: fpext_v8f16_v8f32: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX8-NEXT: vle16.v v8, (a0) ; LMULMAX8-NEXT: vfwcvt.f.f.v v10, v8 ; LMULMAX8-NEXT: vse32.v v10, (a1) @@ -45,11 +45,11 @@ ; ; LMULMAX1-LABEL: fpext_v8f16_v8f32: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-NEXT: vle16.v v8, (a0) -; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v9, v8, 4 -; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; LMULMAX1-NEXT: vfwcvt.f.f.v v10, v9 ; LMULMAX1-NEXT: vfwcvt.f.f.v v9, v8 ; LMULMAX1-NEXT: addi a0, a1, 16 @@ -65,39 +65,39 @@ define void @fpext_v8f16_v8f64(<8 x half>* %x, <8 x double>* %y) { ; LMULMAX8-LABEL: fpext_v8f16_v8f64: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX8-NEXT: vle16.v v8, (a0) ; LMULMAX8-NEXT: vfwcvt.f.f.v v10, v8 -; LMULMAX8-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; LMULMAX8-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; LMULMAX8-NEXT: vfwcvt.f.f.v v12, v10 ; LMULMAX8-NEXT: vse64.v v12, (a1) ; LMULMAX8-NEXT: ret ; ; LMULMAX1-LABEL: fpext_v8f16_v8f64: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-NEXT: vle16.v v8, (a0) -; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v9, v8, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; LMULMAX1-NEXT: vfwcvt.f.f.v v10, v9 -; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; LMULMAX1-NEXT: vfwcvt.f.f.v v9, v10 -; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v10, v8, 4 -; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v11, v10, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; LMULMAX1-NEXT: vfwcvt.f.f.v v12, v11 -; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; LMULMAX1-NEXT: vfwcvt.f.f.v v11, v12 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; LMULMAX1-NEXT: vfwcvt.f.f.v v12, v10 -; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; LMULMAX1-NEXT: vfwcvt.f.f.v v10, v12 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; LMULMAX1-NEXT: vfwcvt.f.f.v v12, v8 -; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; LMULMAX1-NEXT: vfwcvt.f.f.v v8, v12 ; LMULMAX1-NEXT: addi a0, a1, 32 ; LMULMAX1-NEXT: vse64.v v10, (a0) @@ -116,7 +116,7 @@ define void @fpround_v2f32_v2f16(<2 x float>* %x, <2 x half>* %y) { ; CHECK-LABEL: fpround_v2f32_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfncvt.f.f.w v9, v8 ; CHECK-NEXT: vse16.v v9, (a1) @@ -130,10 +130,10 @@ define void @fpround_v2f64_v2f16(<2 x double>* %x, <2 x half>* %y) { ; CHECK-LABEL: fpround_v2f64_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfncvt.rod.f.f.w v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v8, v9 ; CHECK-NEXT: vse16.v v8, (a1) ; CHECK-NEXT: ret @@ -146,7 +146,7 @@ define void @fpround_v8f32_v8f16(<8 x float>* %x, <8 x half>* %y) { ; LMULMAX8-LABEL: fpround_v8f32_v8f16: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX8-NEXT: vle32.v v8, (a0) ; LMULMAX8-NEXT: vfncvt.f.f.w v10, v8 ; LMULMAX8-NEXT: vse16.v v10, (a1) @@ -155,12 +155,12 @@ ; LMULMAX1-LABEL: fpround_v8f32_v8f16: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: addi a2, a0, 16 -; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; LMULMAX1-NEXT: vle32.v v8, (a0) ; LMULMAX1-NEXT: vle32.v v9, (a2) ; LMULMAX1-NEXT: vfncvt.f.f.w v10, v8 ; LMULMAX1-NEXT: vfncvt.f.f.w v8, v9 -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, ma ; LMULMAX1-NEXT: vslideup.vi v10, v8, 4 ; LMULMAX1-NEXT: vse16.v v10, (a1) ; LMULMAX1-NEXT: ret @@ -173,10 +173,10 @@ define void @fpround_v8f64_v8f16(<8 x double>* %x, <8 x half>* %y) { ; LMULMAX8-LABEL: fpround_v8f64_v8f16: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX8-NEXT: vle64.v v8, (a0) ; LMULMAX8-NEXT: vfncvt.rod.f.f.w v12, v8 -; LMULMAX8-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; LMULMAX8-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; LMULMAX8-NEXT: vfncvt.f.f.w v8, v12 ; LMULMAX8-NEXT: vse16.v v8, (a1) ; LMULMAX8-NEXT: ret @@ -184,7 +184,7 @@ ; LMULMAX1-LABEL: fpround_v8f64_v8f16: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: addi a2, a0, 48 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vle64.v v8, (a2) ; LMULMAX1-NEXT: addi a2, a0, 32 ; LMULMAX1-NEXT: vle64.v v9, (a0) @@ -192,25 +192,25 @@ ; LMULMAX1-NEXT: addi a0, a0, 16 ; LMULMAX1-NEXT: vle64.v v11, (a0) ; LMULMAX1-NEXT: vfncvt.rod.f.f.w v12, v9 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; LMULMAX1-NEXT: vfncvt.f.f.w v9, v12 -; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; LMULMAX1-NEXT: vfncvt.rod.f.f.w v12, v11 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; LMULMAX1-NEXT: vfncvt.f.f.w v11, v12 -; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, ma ; LMULMAX1-NEXT: vslideup.vi v9, v11, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vfncvt.rod.f.f.w v11, v10 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; LMULMAX1-NEXT: vfncvt.f.f.w v10, v11 -; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, ma ; LMULMAX1-NEXT: vslideup.vi v9, v10, 4 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vfncvt.rod.f.f.w v10, v8 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; LMULMAX1-NEXT: vfncvt.f.f.w v8, v10 -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, ma ; LMULMAX1-NEXT: vslideup.vi v9, v8, 6 ; LMULMAX1-NEXT: vse16.v v9, (a1) ; LMULMAX1-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll @@ -9,7 +9,7 @@ define <4 x half> @interleave_v2f16(<2 x half> %x, <2 x half> %y) { ; CHECK-LABEL: interleave_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf4, ta, ma ; CHECK-NEXT: vwaddu.vv v10, v8, v9 ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: vwmaccu.vx v10, a0, v9 @@ -23,7 +23,7 @@ define <4 x float> @interleave_v2f32(<2 x float> %x, <2 x float> %y) { ; CHECK-LABEL: interleave_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, mf2, ta, ma ; CHECK-NEXT: vwaddu.vv v10, v9, v8 ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: vwmaccu.vx v10, a0, v8 @@ -40,7 +40,7 @@ ; RV32-V128: # %bb.0: ; RV32-V128-NEXT: vmv1r.v v12, v9 ; RV32-V128-NEXT: # kill: def $v8 killed $v8 def $v8m2 -; RV32-V128-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; RV32-V128-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; RV32-V128-NEXT: vid.v v10 ; RV32-V128-NEXT: vsrl.vi v14, v10, 1 ; RV32-V128-NEXT: vsetvli zero, zero, e64, m2, ta, mu @@ -67,7 +67,7 @@ ; ; RV32-V512-LABEL: interleave_v2f64: ; RV32-V512: # %bb.0: -; RV32-V512-NEXT: vsetivli zero, 4, e16, mf4, ta, mu +; RV32-V512-NEXT: vsetivli zero, 4, e16, mf4, ta, ma ; RV32-V512-NEXT: vid.v v10 ; RV32-V512-NEXT: vsrl.vi v11, v10, 1 ; RV32-V512-NEXT: vsetvli zero, zero, e64, m1, ta, mu @@ -97,7 +97,7 @@ define <8 x half> @interleave_v4f16(<4 x half> %x, <4 x half> %y) { ; V128-LABEL: interleave_v4f16: ; V128: # %bb.0: -; V128-NEXT: vsetivli zero, 8, e16, mf2, ta, mu +; V128-NEXT: vsetivli zero, 8, e16, mf2, ta, ma ; V128-NEXT: vwaddu.vv v10, v8, v9 ; V128-NEXT: li a0, -1 ; V128-NEXT: vwmaccu.vx v10, a0, v9 @@ -106,7 +106,7 @@ ; ; V512-LABEL: interleave_v4f16: ; V512: # %bb.0: -; V512-NEXT: vsetivli zero, 8, e16, mf4, ta, mu +; V512-NEXT: vsetivli zero, 8, e16, mf4, ta, ma ; V512-NEXT: vwaddu.vv v10, v8, v9 ; V512-NEXT: li a0, -1 ; V512-NEXT: vwmaccu.vx v10, a0, v9 @@ -119,7 +119,7 @@ define <8 x float> @interleave_v4f32(<4 x float> %x, <4 x float> %y) { ; V128-LABEL: interleave_v4f32: ; V128: # %bb.0: -; V128-NEXT: vsetivli zero, 8, e32, m1, ta, mu +; V128-NEXT: vsetivli zero, 8, e32, m1, ta, ma ; V128-NEXT: vwaddu.vv v10, v8, v9 ; V128-NEXT: li a0, -1 ; V128-NEXT: vwmaccu.vx v10, a0, v9 @@ -128,7 +128,7 @@ ; ; V512-LABEL: interleave_v4f32: ; V512: # %bb.0: -; V512-NEXT: vsetivli zero, 8, e32, mf2, ta, mu +; V512-NEXT: vsetivli zero, 8, e32, mf2, ta, ma ; V512-NEXT: vwaddu.vv v10, v8, v9 ; V512-NEXT: li a0, -1 ; V512-NEXT: vwmaccu.vx v10, a0, v9 @@ -142,7 +142,7 @@ define <16 x half> @interleave_v8f16(<8 x half> %x, <8 x half> %y) { ; V128-LABEL: interleave_v8f16: ; V128: # %bb.0: -; V128-NEXT: vsetivli zero, 16, e16, m1, ta, mu +; V128-NEXT: vsetivli zero, 16, e16, m1, ta, ma ; V128-NEXT: vwaddu.vv v10, v9, v8 ; V128-NEXT: li a0, -1 ; V128-NEXT: vwmaccu.vx v10, a0, v8 @@ -151,7 +151,7 @@ ; ; V512-LABEL: interleave_v8f16: ; V512: # %bb.0: -; V512-NEXT: vsetivli zero, 16, e16, mf4, ta, mu +; V512-NEXT: vsetivli zero, 16, e16, mf4, ta, ma ; V512-NEXT: vwaddu.vv v10, v9, v8 ; V512-NEXT: li a0, -1 ; V512-NEXT: vwmaccu.vx v10, a0, v8 @@ -164,7 +164,7 @@ define <16 x float> @interleave_v8f32(<8 x float> %x, <8 x float> %y) { ; V128-LABEL: interleave_v8f32: ; V128: # %bb.0: -; V128-NEXT: vsetivli zero, 16, e32, m2, ta, mu +; V128-NEXT: vsetivli zero, 16, e32, m2, ta, ma ; V128-NEXT: vwaddu.vv v12, v8, v10 ; V128-NEXT: li a0, -1 ; V128-NEXT: vwmaccu.vx v12, a0, v10 @@ -173,7 +173,7 @@ ; ; V512-LABEL: interleave_v8f32: ; V512: # %bb.0: -; V512-NEXT: vsetivli zero, 16, e32, mf2, ta, mu +; V512-NEXT: vsetivli zero, 16, e32, mf2, ta, ma ; V512-NEXT: vwaddu.vv v10, v8, v9 ; V512-NEXT: li a0, -1 ; V512-NEXT: vwmaccu.vx v10, a0, v9 @@ -187,7 +187,7 @@ ; V128-LABEL: interleave_v16f16: ; V128: # %bb.0: ; V128-NEXT: li a0, 32 -; V128-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; V128-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; V128-NEXT: vwaddu.vv v12, v8, v10 ; V128-NEXT: li a0, -1 ; V128-NEXT: vwmaccu.vx v12, a0, v10 @@ -197,7 +197,7 @@ ; V512-LABEL: interleave_v16f16: ; V512: # %bb.0: ; V512-NEXT: li a0, 32 -; V512-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; V512-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; V512-NEXT: vwaddu.vv v10, v8, v9 ; V512-NEXT: li a0, -1 ; V512-NEXT: vwmaccu.vx v10, a0, v9 @@ -211,7 +211,7 @@ ; V128-LABEL: interleave_v16f32: ; V128: # %bb.0: ; V128-NEXT: li a0, 32 -; V128-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; V128-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; V128-NEXT: vwaddu.vv v16, v8, v12 ; V128-NEXT: li a0, -1 ; V128-NEXT: vwmaccu.vx v16, a0, v12 @@ -221,7 +221,7 @@ ; V512-LABEL: interleave_v16f32: ; V512: # %bb.0: ; V512-NEXT: li a0, 32 -; V512-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; V512-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; V512-NEXT: vwaddu.vv v10, v8, v9 ; V512-NEXT: li a0, -1 ; V512-NEXT: vwmaccu.vx v10, a0, v9 @@ -235,7 +235,7 @@ ; V128-LABEL: interleave_v32f16: ; V128: # %bb.0: ; V128-NEXT: li a0, 64 -; V128-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; V128-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; V128-NEXT: vwaddu.vv v16, v8, v12 ; V128-NEXT: li a0, -1 ; V128-NEXT: vwmaccu.vx v16, a0, v12 @@ -245,7 +245,7 @@ ; V512-LABEL: interleave_v32f16: ; V512: # %bb.0: ; V512-NEXT: li a0, 64 -; V512-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; V512-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; V512-NEXT: vwaddu.vv v10, v8, v9 ; V512-NEXT: li a0, -1 ; V512-NEXT: vwmaccu.vx v10, a0, v9 @@ -266,7 +266,7 @@ ; RV32-V128-NEXT: lui a0, %hi(.LCPI10_0) ; RV32-V128-NEXT: addi a0, a0, %lo(.LCPI10_0) ; RV32-V128-NEXT: li a1, 32 -; RV32-V128-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; RV32-V128-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; RV32-V128-NEXT: vle32.v v0, (a0) ; RV32-V128-NEXT: vmv8r.v v24, v8 ; RV32-V128-NEXT: addi a0, sp, 16 @@ -282,7 +282,7 @@ ; RV32-V128-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill ; RV32-V128-NEXT: lui a0, 699051 ; RV32-V128-NEXT: addi a0, a0, -1366 -; RV32-V128-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; RV32-V128-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV32-V128-NEXT: vmv.s.x v0, a0 ; RV32-V128-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; RV32-V128-NEXT: csrr a0, vlenb @@ -292,7 +292,7 @@ ; RV32-V128-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload ; RV32-V128-NEXT: vrgather.vv v8, v16, v24, v0.t ; RV32-V128-NEXT: vmv.v.v v24, v8 -; RV32-V128-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-V128-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV32-V128-NEXT: addi a0, sp, 16 ; RV32-V128-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload ; RV32-V128-NEXT: vwaddu.vv v0, v8, v16 @@ -316,7 +316,7 @@ ; RV64-V128-NEXT: lui a0, %hi(.LCPI10_0) ; RV64-V128-NEXT: addi a0, a0, %lo(.LCPI10_0) ; RV64-V128-NEXT: li a1, 32 -; RV64-V128-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; RV64-V128-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; RV64-V128-NEXT: vle32.v v0, (a0) ; RV64-V128-NEXT: vmv8r.v v24, v8 ; RV64-V128-NEXT: addi a0, sp, 16 @@ -332,7 +332,7 @@ ; RV64-V128-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill ; RV64-V128-NEXT: lui a0, 699051 ; RV64-V128-NEXT: addiw a0, a0, -1366 -; RV64-V128-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; RV64-V128-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV64-V128-NEXT: vmv.s.x v0, a0 ; RV64-V128-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; RV64-V128-NEXT: csrr a0, vlenb @@ -342,7 +342,7 @@ ; RV64-V128-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload ; RV64-V128-NEXT: vrgather.vv v8, v16, v24, v0.t ; RV64-V128-NEXT: vmv.v.v v24, v8 -; RV64-V128-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-V128-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV64-V128-NEXT: addi a0, sp, 16 ; RV64-V128-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload ; RV64-V128-NEXT: vwaddu.vv v0, v8, v16 @@ -359,7 +359,7 @@ ; V512-LABEL: interleave_v32f32: ; V512: # %bb.0: ; V512-NEXT: li a0, 64 -; V512-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; V512-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; V512-NEXT: vwaddu.vv v12, v8, v10 ; V512-NEXT: li a0, -1 ; V512-NEXT: vwmaccu.vx v12, a0, v10 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll @@ -5,7 +5,7 @@ define void @fcmp_oeq_vv_v8f16(<8 x half>* %x, <8 x half>* %y, <8 x i1>* %z) { ; CHECK-LABEL: fcmp_oeq_vv_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vmfeq.vv v8, v8, v9 @@ -21,7 +21,7 @@ define void @fcmp_oeq_vv_v8f16_nonans(<8 x half>* %x, <8 x half>* %y, <8 x i1>* %z) { ; CHECK-LABEL: fcmp_oeq_vv_v8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vmfeq.vv v8, v8, v9 @@ -37,18 +37,18 @@ define void @fcmp_une_vv_v4f32(<4 x float>* %x, <4 x float>* %y, <4 x i1>* %z) { ; CHECK-LABEL: fcmp_une_vv_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vmfne.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a2) ; CHECK-NEXT: ret @@ -62,18 +62,18 @@ define void @fcmp_une_vv_v4f32_nonans(<4 x float>* %x, <4 x float>* %y, <4 x i1>* %z) { ; CHECK-LABEL: fcmp_une_vv_v4f32_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vmfne.vv v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a2) ; CHECK-NEXT: ret @@ -87,18 +87,18 @@ define void @fcmp_ogt_vv_v2f64(<2 x double>* %x, <2 x double>* %y, <2 x i1>* %z) { ; CHECK-LABEL: fcmp_ogt_vv_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vmflt.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a2) ; CHECK-NEXT: ret @@ -112,18 +112,18 @@ define void @fcmp_ogt_vv_v2f64_nonans(<2 x double>* %x, <2 x double>* %y, <2 x i1>* %z) { ; CHECK-LABEL: fcmp_ogt_vv_v2f64_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vmflt.vv v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a2) ; CHECK-NEXT: ret @@ -137,7 +137,7 @@ define void @fcmp_olt_vv_v16f16(<16 x half>* %x, <16 x half>* %y, <16 x i1>* %z) { ; CHECK-LABEL: fcmp_olt_vv_v16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v10, (a1) ; CHECK-NEXT: vmflt.vv v12, v8, v10 @@ -153,7 +153,7 @@ define void @fcmp_olt_vv_v16f16_nonans(<16 x half>* %x, <16 x half>* %y, <16 x i1>* %z) { ; CHECK-LABEL: fcmp_olt_vv_v16f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v10, (a1) ; CHECK-NEXT: vmflt.vv v12, v8, v10 @@ -169,7 +169,7 @@ define void @fcmp_oge_vv_v8f32(<8 x float>* %x, <8 x float>* %y, <8 x i1>* %z) { ; CHECK-LABEL: fcmp_oge_vv_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v10, (a1) ; CHECK-NEXT: vmfle.vv v12, v10, v8 @@ -185,7 +185,7 @@ define void @fcmp_oge_vv_v8f32_nonans(<8 x float>* %x, <8 x float>* %y, <8 x i1>* %z) { ; CHECK-LABEL: fcmp_oge_vv_v8f32_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v10, (a1) ; CHECK-NEXT: vmfle.vv v12, v10, v8 @@ -201,18 +201,18 @@ define void @fcmp_ole_vv_v4f64(<4 x double>* %x, <4 x double>* %y, <4 x i1>* %z) { ; CHECK-LABEL: fcmp_ole_vv_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v10, (a1) ; CHECK-NEXT: vmfle.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a2) ; CHECK-NEXT: ret @@ -226,18 +226,18 @@ define void @fcmp_ole_vv_v4f64_nonans(<4 x double>* %x, <4 x double>* %y, <4 x i1>* %z) { ; CHECK-LABEL: fcmp_ole_vv_v4f64_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v10, (a1) ; CHECK-NEXT: vmfle.vv v0, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a2) ; CHECK-NEXT: ret @@ -252,7 +252,7 @@ ; CHECK-LABEL: fcmp_ule_vv_v32f16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a3, 32 -; CHECK-NEXT: vsetvli zero, a3, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v12, (a1) ; CHECK-NEXT: vmflt.vv v16, v12, v8 @@ -270,7 +270,7 @@ ; CHECK-LABEL: fcmp_ule_vv_v32f16_nonans: ; CHECK: # %bb.0: ; CHECK-NEXT: li a3, 32 -; CHECK-NEXT: vsetvli zero, a3, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v12, (a1) ; CHECK-NEXT: vmfle.vv v16, v8, v12 @@ -286,7 +286,7 @@ define void @fcmp_uge_vv_v16f32(<16 x float>* %x, <16 x float>* %y, <16 x i1>* %z) { ; CHECK-LABEL: fcmp_uge_vv_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v12, (a1) ; CHECK-NEXT: vmflt.vv v16, v8, v12 @@ -303,7 +303,7 @@ define void @fcmp_uge_vv_v16f32_nonans(<16 x float>* %x, <16 x float>* %y, <16 x i1>* %z) { ; CHECK-LABEL: fcmp_uge_vv_v16f32_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v12, (a1) ; CHECK-NEXT: vmfle.vv v16, v12, v8 @@ -319,7 +319,7 @@ define void @fcmp_ult_vv_v8f64(<8 x double>* %x, <8 x double>* %y, <8 x i1>* %z) { ; CHECK-LABEL: fcmp_ult_vv_v8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v12, (a1) ; CHECK-NEXT: vmfle.vv v16, v12, v8 @@ -336,7 +336,7 @@ define void @fcmp_ult_vv_v8f64_nonans(<8 x double>* %x, <8 x double>* %y, <8 x i1>* %z) { ; CHECK-LABEL: fcmp_ult_vv_v8f64_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v12, (a1) ; CHECK-NEXT: vmflt.vv v16, v8, v12 @@ -353,7 +353,7 @@ ; CHECK-LABEL: fcmp_ugt_vv_v64f16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a3, 64 -; CHECK-NEXT: vsetvli zero, a3, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v16, (a1) ; CHECK-NEXT: vmfle.vv v24, v8, v16 @@ -371,7 +371,7 @@ ; CHECK-LABEL: fcmp_ugt_vv_v64f16_nonans: ; CHECK: # %bb.0: ; CHECK-NEXT: li a3, 64 -; CHECK-NEXT: vsetvli zero, a3, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v16, (a1) ; CHECK-NEXT: vmflt.vv v24, v16, v8 @@ -388,7 +388,7 @@ ; CHECK-LABEL: fcmp_ueq_vv_v32f32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a3, 32 -; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v16, (a1) ; CHECK-NEXT: vmflt.vv v24, v8, v16 @@ -407,7 +407,7 @@ ; CHECK-LABEL: fcmp_ueq_vv_v32f32_nonans: ; CHECK: # %bb.0: ; CHECK-NEXT: li a3, 32 -; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v16, (a1) ; CHECK-NEXT: vmfeq.vv v24, v8, v16 @@ -423,7 +423,7 @@ define void @fcmp_one_vv_v8f64(<16 x double>* %x, <16 x double>* %y, <16 x i1>* %z) { ; CHECK-LABEL: fcmp_one_vv_v8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v16, (a1) ; CHECK-NEXT: vmflt.vv v24, v8, v16 @@ -441,7 +441,7 @@ define void @fcmp_one_vv_v8f64_nonans(<16 x double>* %x, <16 x double>* %y, <16 x i1>* %z) { ; CHECK-LABEL: fcmp_one_vv_v8f64_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v16, (a1) ; CHECK-NEXT: vmfne.vv v24, v8, v16 @@ -457,20 +457,20 @@ define void @fcmp_ord_vv_v4f16(<4 x half>* %x, <4 x half>* %y, <4 x i1>* %z) { ; CHECK-LABEL: fcmp_ord_vv_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v8, (a1) ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vmfeq.vv v8, v8, v8 ; CHECK-NEXT: vmfeq.vv v9, v9, v9 ; CHECK-NEXT: vmand.mm v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a2) ; CHECK-NEXT: ret @@ -484,20 +484,20 @@ define void @fcmp_uno_vv_v4f16(<2 x half>* %x, <2 x half>* %y, <2 x i1>* %z) { ; CHECK-LABEL: fcmp_uno_vv_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a1) ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vmfne.vv v8, v8, v8 ; CHECK-NEXT: vmfne.vv v9, v9, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a2) ; CHECK-NEXT: ret @@ -511,7 +511,7 @@ define void @fcmp_oeq_vf_v8f16(<8 x half>* %x, half %y, <8 x i1>* %z) { ; CHECK-LABEL: fcmp_oeq_vf_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmfeq.vf v8, v8, fa0 ; CHECK-NEXT: vsm.v v8, (a1) @@ -527,7 +527,7 @@ define void @fcmp_oeq_vf_v8f16_nonans(<8 x half>* %x, half %y, <8 x i1>* %z) { ; CHECK-LABEL: fcmp_oeq_vf_v8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmfeq.vf v8, v8, fa0 ; CHECK-NEXT: vsm.v v8, (a1) @@ -543,17 +543,17 @@ define void @fcmp_une_vf_v4f32(<4 x float>* %x, float %y, <4 x i1>* %z) { ; CHECK-LABEL: fcmp_une_vf_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmfne.vf v0, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a1) ; CHECK-NEXT: ret @@ -568,17 +568,17 @@ define void @fcmp_une_vf_v4f32_nonans(<4 x float>* %x, float %y, <4 x i1>* %z) { ; CHECK-LABEL: fcmp_une_vf_v4f32_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmfne.vf v0, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a1) ; CHECK-NEXT: ret @@ -593,17 +593,17 @@ define void @fcmp_ogt_vf_v2f64(<2 x double>* %x, double %y, <2 x i1>* %z) { ; CHECK-LABEL: fcmp_ogt_vf_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a1) ; CHECK-NEXT: ret @@ -618,17 +618,17 @@ define void @fcmp_ogt_vf_v2f64_nonans(<2 x double>* %x, double %y, <2 x i1>* %z) { ; CHECK-LABEL: fcmp_ogt_vf_v2f64_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a1) ; CHECK-NEXT: ret @@ -643,7 +643,7 @@ define void @fcmp_olt_vf_v16f16(<16 x half>* %x, half %y, <16 x i1>* %z) { ; CHECK-LABEL: fcmp_olt_vf_v16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmflt.vf v10, v8, fa0 ; CHECK-NEXT: vsm.v v10, (a1) @@ -659,7 +659,7 @@ define void @fcmp_olt_vf_v16f16_nonans(<16 x half>* %x, half %y, <16 x i1>* %z) { ; CHECK-LABEL: fcmp_olt_vf_v16f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmflt.vf v10, v8, fa0 ; CHECK-NEXT: vsm.v v10, (a1) @@ -675,7 +675,7 @@ define void @fcmp_oge_vf_v8f32(<8 x float>* %x, float %y, <8 x i1>* %z) { ; CHECK-LABEL: fcmp_oge_vf_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmfge.vf v10, v8, fa0 ; CHECK-NEXT: vsm.v v10, (a1) @@ -691,7 +691,7 @@ define void @fcmp_oge_vf_v8f32_nonans(<8 x float>* %x, float %y, <8 x i1>* %z) { ; CHECK-LABEL: fcmp_oge_vf_v8f32_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmfge.vf v10, v8, fa0 ; CHECK-NEXT: vsm.v v10, (a1) @@ -707,17 +707,17 @@ define void @fcmp_ole_vf_v4f64(<4 x double>* %x, double %y, <4 x i1>* %z) { ; CHECK-LABEL: fcmp_ole_vf_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vmfle.vf v0, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a1) ; CHECK-NEXT: ret @@ -732,17 +732,17 @@ define void @fcmp_ole_vf_v4f64_nonans(<4 x double>* %x, double %y, <4 x i1>* %z) { ; CHECK-LABEL: fcmp_ole_vf_v4f64_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vmfle.vf v0, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a1) ; CHECK-NEXT: ret @@ -758,7 +758,7 @@ ; CHECK-LABEL: fcmp_ule_vf_v32f16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmfgt.vf v12, v8, fa0 ; CHECK-NEXT: vmnot.m v8, v12 @@ -776,7 +776,7 @@ ; CHECK-LABEL: fcmp_ule_vf_v32f16_nonans: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmfle.vf v12, v8, fa0 ; CHECK-NEXT: vsm.v v12, (a1) @@ -792,7 +792,7 @@ define void @fcmp_uge_vf_v16f32(<16 x float>* %x, float %y, <16 x i1>* %z) { ; CHECK-LABEL: fcmp_uge_vf_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmflt.vf v12, v8, fa0 ; CHECK-NEXT: vmnot.m v8, v12 @@ -809,7 +809,7 @@ define void @fcmp_uge_vf_v16f32_nonans(<16 x float>* %x, float %y, <16 x i1>* %z) { ; CHECK-LABEL: fcmp_uge_vf_v16f32_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmfge.vf v12, v8, fa0 ; CHECK-NEXT: vsm.v v12, (a1) @@ -825,7 +825,7 @@ define void @fcmp_ult_vf_v8f64(<8 x double>* %x, double %y, <8 x i1>* %z) { ; CHECK-LABEL: fcmp_ult_vf_v8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vmfge.vf v12, v8, fa0 ; CHECK-NEXT: vmnot.m v8, v12 @@ -842,7 +842,7 @@ define void @fcmp_ult_vf_v8f64_nonans(<8 x double>* %x, double %y, <8 x i1>* %z) { ; CHECK-LABEL: fcmp_ult_vf_v8f64_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vmflt.vf v12, v8, fa0 ; CHECK-NEXT: vsm.v v12, (a1) @@ -859,7 +859,7 @@ ; CHECK-LABEL: fcmp_ugt_vf_v64f16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 64 -; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmfle.vf v16, v8, fa0 ; CHECK-NEXT: vmnot.m v8, v16 @@ -877,7 +877,7 @@ ; CHECK-LABEL: fcmp_ugt_vf_v64f16_nonans: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 64 -; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmfgt.vf v16, v8, fa0 ; CHECK-NEXT: vsm.v v16, (a1) @@ -894,7 +894,7 @@ ; CHECK-LABEL: fcmp_ueq_vf_v32f32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmflt.vf v16, v8, fa0 ; CHECK-NEXT: vmfgt.vf v17, v8, fa0 @@ -913,7 +913,7 @@ ; CHECK-LABEL: fcmp_ueq_vf_v32f32_nonans: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmfeq.vf v16, v8, fa0 ; CHECK-NEXT: vsm.v v16, (a1) @@ -929,7 +929,7 @@ define void @fcmp_one_vf_v8f64(<16 x double>* %x, double %y, <16 x i1>* %z) { ; CHECK-LABEL: fcmp_one_vf_v8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vmflt.vf v16, v8, fa0 ; CHECK-NEXT: vmfgt.vf v17, v8, fa0 @@ -947,7 +947,7 @@ define void @fcmp_one_vf_v8f64_nonans(<16 x double>* %x, double %y, <16 x i1>* %z) { ; CHECK-LABEL: fcmp_one_vf_v8f64_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vmfne.vf v16, v8, fa0 ; CHECK-NEXT: vsm.v v16, (a1) @@ -963,20 +963,20 @@ define void @fcmp_ord_vf_v4f16(<4 x half>* %x, half %y, <4 x i1>* %z) { ; CHECK-LABEL: fcmp_ord_vf_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vmfeq.vf v9, v9, fa0 ; CHECK-NEXT: vmfeq.vv v8, v8, v8 ; CHECK-NEXT: vmand.mm v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a1) ; CHECK-NEXT: ret @@ -991,20 +991,20 @@ define void @fcmp_uno_vf_v4f16(<2 x half>* %x, half %y, <2 x i1>* %z) { ; CHECK-LABEL: fcmp_uno_vf_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vmfne.vf v9, v9, fa0 ; CHECK-NEXT: vmfne.vv v8, v8, v8 ; CHECK-NEXT: vmor.mm v0, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a1) ; CHECK-NEXT: ret @@ -1019,7 +1019,7 @@ define void @fcmp_oeq_fv_v8f16(<8 x half>* %x, half %y, <8 x i1>* %z) { ; CHECK-LABEL: fcmp_oeq_fv_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmfeq.vf v8, v8, fa0 ; CHECK-NEXT: vsm.v v8, (a1) @@ -1035,7 +1035,7 @@ define void @fcmp_oeq_fv_v8f16_nonans(<8 x half>* %x, half %y, <8 x i1>* %z) { ; CHECK-LABEL: fcmp_oeq_fv_v8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmfeq.vf v8, v8, fa0 ; CHECK-NEXT: vsm.v v8, (a1) @@ -1051,17 +1051,17 @@ define void @fcmp_une_fv_v4f32(<4 x float>* %x, float %y, <4 x i1>* %z) { ; CHECK-LABEL: fcmp_une_fv_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmfne.vf v0, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a1) ; CHECK-NEXT: ret @@ -1076,17 +1076,17 @@ define void @fcmp_une_fv_v4f32_nonans(<4 x float>* %x, float %y, <4 x i1>* %z) { ; CHECK-LABEL: fcmp_une_fv_v4f32_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmfne.vf v0, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a1) ; CHECK-NEXT: ret @@ -1101,17 +1101,17 @@ define void @fcmp_ogt_fv_v2f64(<2 x double>* %x, double %y, <2 x i1>* %z) { ; CHECK-LABEL: fcmp_ogt_fv_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vmflt.vf v0, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a1) ; CHECK-NEXT: ret @@ -1126,17 +1126,17 @@ define void @fcmp_ogt_fv_v2f64_nonans(<2 x double>* %x, double %y, <2 x i1>* %z) { ; CHECK-LABEL: fcmp_ogt_fv_v2f64_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vmflt.vf v0, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a1) ; CHECK-NEXT: ret @@ -1151,7 +1151,7 @@ define void @fcmp_olt_fv_v16f16(<16 x half>* %x, half %y, <16 x i1>* %z) { ; CHECK-LABEL: fcmp_olt_fv_v16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmfgt.vf v10, v8, fa0 ; CHECK-NEXT: vsm.v v10, (a1) @@ -1167,7 +1167,7 @@ define void @fcmp_olt_fv_v16f16_nonans(<16 x half>* %x, half %y, <16 x i1>* %z) { ; CHECK-LABEL: fcmp_olt_fv_v16f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmfgt.vf v10, v8, fa0 ; CHECK-NEXT: vsm.v v10, (a1) @@ -1183,7 +1183,7 @@ define void @fcmp_oge_fv_v8f32(<8 x float>* %x, float %y, <8 x i1>* %z) { ; CHECK-LABEL: fcmp_oge_fv_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmfle.vf v10, v8, fa0 ; CHECK-NEXT: vsm.v v10, (a1) @@ -1199,7 +1199,7 @@ define void @fcmp_oge_fv_v8f32_nonans(<8 x float>* %x, float %y, <8 x i1>* %z) { ; CHECK-LABEL: fcmp_oge_fv_v8f32_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmfle.vf v10, v8, fa0 ; CHECK-NEXT: vsm.v v10, (a1) @@ -1215,17 +1215,17 @@ define void @fcmp_ole_fv_v4f64(<4 x double>* %x, double %y, <4 x i1>* %z) { ; CHECK-LABEL: fcmp_ole_fv_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vmfge.vf v0, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a1) ; CHECK-NEXT: ret @@ -1240,17 +1240,17 @@ define void @fcmp_ole_fv_v4f64_nonans(<4 x double>* %x, double %y, <4 x i1>* %z) { ; CHECK-LABEL: fcmp_ole_fv_v4f64_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vmfge.vf v0, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a1) ; CHECK-NEXT: ret @@ -1266,7 +1266,7 @@ ; CHECK-LABEL: fcmp_ule_fv_v32f16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmflt.vf v12, v8, fa0 ; CHECK-NEXT: vmnot.m v8, v12 @@ -1284,7 +1284,7 @@ ; CHECK-LABEL: fcmp_ule_fv_v32f16_nonans: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmfge.vf v12, v8, fa0 ; CHECK-NEXT: vsm.v v12, (a1) @@ -1300,7 +1300,7 @@ define void @fcmp_uge_fv_v16f32(<16 x float>* %x, float %y, <16 x i1>* %z) { ; CHECK-LABEL: fcmp_uge_fv_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmfgt.vf v12, v8, fa0 ; CHECK-NEXT: vmnot.m v8, v12 @@ -1317,7 +1317,7 @@ define void @fcmp_uge_fv_v16f32_nonans(<16 x float>* %x, float %y, <16 x i1>* %z) { ; CHECK-LABEL: fcmp_uge_fv_v16f32_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmfle.vf v12, v8, fa0 ; CHECK-NEXT: vsm.v v12, (a1) @@ -1333,7 +1333,7 @@ define void @fcmp_ult_fv_v8f64(<8 x double>* %x, double %y, <8 x i1>* %z) { ; CHECK-LABEL: fcmp_ult_fv_v8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vmfle.vf v12, v8, fa0 ; CHECK-NEXT: vmnot.m v8, v12 @@ -1350,7 +1350,7 @@ define void @fcmp_ult_fv_v8f64_nonans(<8 x double>* %x, double %y, <8 x i1>* %z) { ; CHECK-LABEL: fcmp_ult_fv_v8f64_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vmfgt.vf v12, v8, fa0 ; CHECK-NEXT: vsm.v v12, (a1) @@ -1367,7 +1367,7 @@ ; CHECK-LABEL: fcmp_ugt_fv_v64f16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 64 -; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmfge.vf v16, v8, fa0 ; CHECK-NEXT: vmnot.m v8, v16 @@ -1385,7 +1385,7 @@ ; CHECK-LABEL: fcmp_ugt_fv_v64f16_nonans: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 64 -; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmflt.vf v16, v8, fa0 ; CHECK-NEXT: vsm.v v16, (a1) @@ -1402,7 +1402,7 @@ ; CHECK-LABEL: fcmp_ueq_fv_v32f32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmfgt.vf v16, v8, fa0 ; CHECK-NEXT: vmflt.vf v17, v8, fa0 @@ -1421,7 +1421,7 @@ ; CHECK-LABEL: fcmp_ueq_fv_v32f32_nonans: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmfeq.vf v16, v8, fa0 ; CHECK-NEXT: vsm.v v16, (a1) @@ -1437,7 +1437,7 @@ define void @fcmp_one_fv_v8f64(<16 x double>* %x, double %y, <16 x i1>* %z) { ; CHECK-LABEL: fcmp_one_fv_v8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vmfgt.vf v16, v8, fa0 ; CHECK-NEXT: vmflt.vf v17, v8, fa0 @@ -1455,7 +1455,7 @@ define void @fcmp_one_fv_v8f64_nonans(<16 x double>* %x, double %y, <16 x i1>* %z) { ; CHECK-LABEL: fcmp_one_fv_v8f64_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vmfne.vf v16, v8, fa0 ; CHECK-NEXT: vsm.v v16, (a1) @@ -1471,20 +1471,20 @@ define void @fcmp_ord_fv_v4f16(<4 x half>* %x, half %y, <4 x i1>* %z) { ; CHECK-LABEL: fcmp_ord_fv_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vmfeq.vf v9, v9, fa0 ; CHECK-NEXT: vmfeq.vv v8, v8, v8 ; CHECK-NEXT: vmand.mm v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a1) ; CHECK-NEXT: ret @@ -1499,20 +1499,20 @@ define void @fcmp_uno_fv_v4f16(<2 x half>* %x, half %y, <2 x i1>* %z) { ; CHECK-LABEL: fcmp_uno_fv_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vmfne.vf v9, v9, fa0 ; CHECK-NEXT: vmfne.vv v8, v8, v8 ; CHECK-NEXT: vmor.mm v0, v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a1) ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll @@ -6,9 +6,9 @@ ; CHECK-LABEL: shuffle_v4f16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 11 -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.s.x v0, a0 -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %s = shufflevector <4 x half> %x, <4 x half> %y, <4 x i32> @@ -19,9 +19,9 @@ ; CHECK-LABEL: shuffle_v8f32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 236 -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.s.x v0, a0 -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %s = shufflevector <8 x float> %x, <8 x float> %y, <8 x i32> @@ -34,9 +34,9 @@ ; RV32-NEXT: li a0, 9 ; RV32-NEXT: lui a1, %hi(.LCPI2_0) ; RV32-NEXT: fld ft0, %lo(.LCPI2_0)(a1) -; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; RV32-NEXT: vmv.s.x v0, a0 -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vfmerge.vfm v8, v8, ft0, v0 ; RV32-NEXT: ret ; @@ -45,9 +45,9 @@ ; RV64-NEXT: lui a0, %hi(.LCPI2_0) ; RV64-NEXT: fld ft0, %lo(.LCPI2_0)(a0) ; RV64-NEXT: li a0, 9 -; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; RV64-NEXT: vmv.s.x v0, a0 -; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-NEXT: vfmerge.vfm v8, v8, ft0, v0 ; RV64-NEXT: ret %s = shufflevector <4 x double> , <4 x double> %x, <4 x i32> @@ -60,9 +60,9 @@ ; RV32-NEXT: li a0, 6 ; RV32-NEXT: lui a1, %hi(.LCPI3_0) ; RV32-NEXT: fld ft0, %lo(.LCPI3_0)(a1) -; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; RV32-NEXT: vmv.s.x v0, a0 -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vfmerge.vfm v8, v8, ft0, v0 ; RV32-NEXT: ret ; @@ -71,9 +71,9 @@ ; RV64-NEXT: lui a0, %hi(.LCPI3_0) ; RV64-NEXT: fld ft0, %lo(.LCPI3_0)(a0) ; RV64-NEXT: li a0, 6 -; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; RV64-NEXT: vmv.s.x v0, a0 -; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-NEXT: vfmerge.vfm v8, v8, ft0, v0 ; RV64-NEXT: ret %s = shufflevector <4 x double> %x, <4 x double> , <4 x i32> @@ -85,7 +85,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, %hi(.LCPI4_0) ; RV32-NEXT: addi a0, a0, %lo(.LCPI4_0) -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vle16.v v12, (a0) ; RV32-NEXT: vrgatherei16.vv v10, v8, v12 ; RV32-NEXT: vmv.v.v v8, v10 @@ -95,7 +95,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, %hi(.LCPI4_0) ; RV64-NEXT: addi a0, a0, %lo(.LCPI4_0) -; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-NEXT: vle64.v v12, (a0) ; RV64-NEXT: vrgather.vv v10, v8, v12 ; RV64-NEXT: vmv.v.v v8, v10 @@ -109,7 +109,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, %hi(.LCPI5_0) ; RV32-NEXT: addi a0, a0, %lo(.LCPI5_0) -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vle16.v v12, (a0) ; RV32-NEXT: vrgatherei16.vv v10, v8, v12 ; RV32-NEXT: vmv.v.v v8, v10 @@ -119,7 +119,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, %hi(.LCPI5_0) ; RV64-NEXT: addi a0, a0, %lo(.LCPI5_0) -; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-NEXT: vle64.v v12, (a0) ; RV64-NEXT: vrgather.vv v10, v8, v12 ; RV64-NEXT: vmv.v.v v8, v10 @@ -162,9 +162,9 @@ ; RV32-LABEL: vrgather_shuffle_xv_v4f64: ; RV32: # %bb.0: ; RV32-NEXT: li a0, 12 -; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; RV32-NEXT: vmv.s.x v0, a0 -; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; RV32-NEXT: lui a0, %hi(.LCPI7_0) ; RV32-NEXT: addi a0, a0, %lo(.LCPI7_0) ; RV32-NEXT: vlse64.v v10, (a0), zero @@ -178,7 +178,7 @@ ; RV64-LABEL: vrgather_shuffle_xv_v4f64: ; RV64: # %bb.0: ; RV64-NEXT: li a0, 12 -; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; RV64-NEXT: vmv.s.x v0, a0 ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: lui a0, %hi(.LCPI7_0) @@ -196,7 +196,7 @@ define <4 x double> @vrgather_shuffle_vx_v4f64(<4 x double> %x) { ; RV32-LABEL: vrgather_shuffle_vx_v4f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; RV32-NEXT: vid.v v12 ; RV32-NEXT: li a0, 3 ; RV32-NEXT: lui a1, %hi(.LCPI8_0) @@ -229,7 +229,7 @@ define <4 x half> @slidedown_v4f16(<4 x half> %x) { ; CHECK-LABEL: slidedown_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 1 ; CHECK-NEXT: ret %s = shufflevector <4 x half> %x, <4 x half> poison, <4 x i32> @@ -239,7 +239,7 @@ define <8 x float> @slidedown_v8f32(<8 x float> %x) { ; CHECK-LABEL: slidedown_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 3 ; CHECK-NEXT: ret %s = shufflevector <8 x float> %x, <8 x float> poison, <8 x i32> @@ -249,7 +249,7 @@ define <4 x half> @slideup_v4f16(<4 x half> %x) { ; CHECK-LABEL: slideup_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 1 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -260,7 +260,7 @@ define <8 x float> @slideup_v8f32(<8 x float> %x) { ; CHECK-LABEL: slideup_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, tu, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, tu, ma ; CHECK-NEXT: vslideup.vi v10, v8, 3 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -271,9 +271,9 @@ define <8 x float> @splice_unary(<8 x float> %x) { ; CHECK-LABEL: splice_unary: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 7, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 7, e32, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v10, v8, 1 -; CHECK-NEXT: vsetivli zero, 8, e32, m2, tu, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, tu, ma ; CHECK-NEXT: vslideup.vi v10, v8, 7 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -284,9 +284,9 @@ define <8 x double> @splice_unary2(<8 x double> %x) { ; CHECK-LABEL: splice_unary2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m4, ta, ma ; CHECK-NEXT: vslidedown.vi v12, v8, 6 -; CHECK-NEXT: vsetivli zero, 8, e64, m4, tu, mu +; CHECK-NEXT: vsetivli zero, 8, e64, m4, tu, ma ; CHECK-NEXT: vslideup.vi v12, v8, 2 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -297,9 +297,9 @@ define <8 x float> @splice_binary(<8 x float> %x, <8 x float> %y) { ; CHECK-LABEL: splice_binary: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 6, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 6, e32, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 -; CHECK-NEXT: vsetivli zero, 8, e32, m2, tu, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 6 ; CHECK-NEXT: ret %s = shufflevector <8 x float> %x, <8 x float> %y, <8 x i32> @@ -309,9 +309,9 @@ define <8 x double> @splice_binary2(<8 x double> %x, <8 x double> %y) { ; CHECK-LABEL: splice_binary2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 3, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 3, e64, m4, ta, ma ; CHECK-NEXT: vslidedown.vi v12, v12, 5 -; CHECK-NEXT: vsetivli zero, 8, e64, m4, tu, mu +; CHECK-NEXT: vsetivli zero, 8, e64, m4, tu, ma ; CHECK-NEXT: vslideup.vi v12, v8, 3 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-splat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-splat.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-splat.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-splat.ll @@ -7,7 +7,7 @@ define void @splat_v8f16(<8 x half>* %x, half %y) { ; CHECK-LABEL: splat_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vfmv.v.f v8, fa0 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret @@ -20,7 +20,7 @@ define void @splat_v4f32(<4 x float>* %x, float %y) { ; CHECK-LABEL: splat_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vfmv.v.f v8, fa0 ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret @@ -33,7 +33,7 @@ define void @splat_v2f64(<2 x double>* %x, double %y) { ; CHECK-LABEL: splat_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vfmv.v.f v8, fa0 ; CHECK-NEXT: vse64.v v8, (a0) ; CHECK-NEXT: ret @@ -46,14 +46,14 @@ define void @splat_16f16(<16 x half>* %x, half %y) { ; LMULMAX2-LABEL: splat_16f16: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX2-NEXT: vfmv.v.f v8, fa0 ; LMULMAX2-NEXT: vse16.v v8, (a0) ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: splat_16f16: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-NEXT: vfmv.v.f v8, fa0 ; LMULMAX1-NEXT: addi a1, a0, 16 ; LMULMAX1-NEXT: vse16.v v8, (a1) @@ -68,14 +68,14 @@ define void @splat_v8f32(<8 x float>* %x, float %y) { ; LMULMAX2-LABEL: splat_v8f32: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vfmv.v.f v8, fa0 ; LMULMAX2-NEXT: vse32.v v8, (a0) ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: splat_v8f32: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vfmv.v.f v8, fa0 ; LMULMAX1-NEXT: addi a1, a0, 16 ; LMULMAX1-NEXT: vse32.v v8, (a1) @@ -90,14 +90,14 @@ define void @splat_v4f64(<4 x double>* %x, double %y) { ; LMULMAX2-LABEL: splat_v4f64: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-NEXT: vfmv.v.f v8, fa0 ; LMULMAX2-NEXT: vse64.v v8, (a0) ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: splat_v4f64: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vfmv.v.f v8, fa0 ; LMULMAX1-NEXT: addi a1, a0, 16 ; LMULMAX1-NEXT: vse64.v v8, (a1) @@ -112,7 +112,7 @@ define void @splat_zero_v8f16(<8 x half>* %x) { ; CHECK-LABEL: splat_zero_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret @@ -125,7 +125,7 @@ define void @splat_zero_v4f32(<4 x float>* %x) { ; CHECK-LABEL: splat_zero_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret @@ -138,7 +138,7 @@ define void @splat_zero_v2f64(<2 x double>* %x) { ; CHECK-LABEL: splat_zero_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vse64.v v8, (a0) ; CHECK-NEXT: ret @@ -151,14 +151,14 @@ define void @splat_zero_16f16(<16 x half>* %x) { ; LMULMAX2-LABEL: splat_zero_16f16: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX2-NEXT: vmv.v.i v8, 0 ; LMULMAX2-NEXT: vse16.v v8, (a0) ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: splat_zero_16f16: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-NEXT: vmv.v.i v8, 0 ; LMULMAX1-NEXT: vse16.v v8, (a0) ; LMULMAX1-NEXT: addi a0, a0, 16 @@ -173,14 +173,14 @@ define void @splat_zero_v8f32(<8 x float>* %x) { ; LMULMAX2-LABEL: splat_zero_v8f32: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vmv.v.i v8, 0 ; LMULMAX2-NEXT: vse32.v v8, (a0) ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: splat_zero_v8f32: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vmv.v.i v8, 0 ; LMULMAX1-NEXT: vse32.v v8, (a0) ; LMULMAX1-NEXT: addi a0, a0, 16 @@ -195,14 +195,14 @@ define void @splat_zero_v4f64(<4 x double>* %x) { ; LMULMAX2-LABEL: splat_zero_v4f64: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-NEXT: vmv.v.i v8, 0 ; LMULMAX2-NEXT: vse64.v v8, (a0) ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: splat_zero_v4f64: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vmv.v.i v8, 0 ; LMULMAX1-NEXT: vse64.v v8, (a0) ; LMULMAX1-NEXT: addi a0, a0, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll @@ -8,7 +8,7 @@ ; CHECK-LABEL: gather_const_v8f16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a1, a0, 10 -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vlse16.v v8, (a1), zero ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret @@ -24,7 +24,7 @@ ; CHECK-LABEL: gather_const_v4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a1, a0, 8 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vlse32.v v8, (a1), zero ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret @@ -39,7 +39,7 @@ define void @gather_const_v2f64(<2 x double>* %x) { ; CHECK-LABEL: gather_const_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v8, (a0), zero ; CHECK-NEXT: vse64.v v8, (a0) ; CHECK-NEXT: ret @@ -56,7 +56,7 @@ ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: li a1, 64 ; LMULMAX8-NEXT: addi a2, a0, 94 -; LMULMAX8-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; LMULMAX8-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; LMULMAX8-NEXT: vlse16.v v8, (a2), zero ; LMULMAX8-NEXT: vse16.v v8, (a0) ; LMULMAX8-NEXT: ret @@ -68,7 +68,7 @@ ; LMULMAX1-NEXT: addi a3, a0, 48 ; LMULMAX1-NEXT: addi a4, a0, 32 ; LMULMAX1-NEXT: addi a5, a0, 94 -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-NEXT: vlse16.v v8, (a5), zero ; LMULMAX1-NEXT: addi a5, a0, 64 ; LMULMAX1-NEXT: addi a6, a0, 112 @@ -95,7 +95,7 @@ ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: li a1, 32 ; LMULMAX8-NEXT: addi a2, a0, 68 -; LMULMAX8-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; LMULMAX8-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; LMULMAX8-NEXT: vlse32.v v8, (a2), zero ; LMULMAX8-NEXT: vse32.v v8, (a0) ; LMULMAX8-NEXT: ret @@ -107,7 +107,7 @@ ; LMULMAX1-NEXT: addi a3, a0, 48 ; LMULMAX1-NEXT: addi a4, a0, 32 ; LMULMAX1-NEXT: addi a5, a0, 68 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vlse32.v v8, (a5), zero ; LMULMAX1-NEXT: addi a5, a0, 80 ; LMULMAX1-NEXT: addi a6, a0, 112 @@ -133,7 +133,7 @@ ; LMULMAX8-LABEL: gather_const_v16f64: ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: addi a1, a0, 80 -; LMULMAX8-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; LMULMAX8-NEXT: vlse64.v v8, (a1), zero ; LMULMAX8-NEXT: vse64.v v8, (a0) ; LMULMAX8-NEXT: ret @@ -144,7 +144,7 @@ ; LMULMAX1-NEXT: addi a2, a0, 16 ; LMULMAX1-NEXT: addi a3, a0, 48 ; LMULMAX1-NEXT: addi a4, a0, 32 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vlse64.v v8, (a1), zero ; LMULMAX1-NEXT: addi a5, a0, 64 ; LMULMAX1-NEXT: addi a6, a0, 112 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll @@ -7,7 +7,7 @@ define void @fadd_v8f16(<8 x half>* %x, <8 x half>* %y) { ; CHECK-LABEL: fadd_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vfadd.vv v8, v8, v9 @@ -23,7 +23,7 @@ define void @fadd_v4f32(<4 x float>* %x, <4 x float>* %y) { ; CHECK-LABEL: fadd_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vfadd.vv v8, v8, v9 @@ -39,7 +39,7 @@ define void @fadd_v2f64(<2 x double>* %x, <2 x double>* %y) { ; CHECK-LABEL: fadd_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vfadd.vv v8, v8, v9 @@ -55,7 +55,7 @@ define void @fsub_v8f16(<8 x half>* %x, <8 x half>* %y) { ; CHECK-LABEL: fsub_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vfsub.vv v8, v8, v9 @@ -71,7 +71,7 @@ define void @fsub_v4f32(<4 x float>* %x, <4 x float>* %y) { ; CHECK-LABEL: fsub_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vfsub.vv v8, v8, v9 @@ -87,7 +87,7 @@ define void @fsub_v2f64(<2 x double>* %x, <2 x double>* %y) { ; CHECK-LABEL: fsub_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vfsub.vv v8, v8, v9 @@ -103,7 +103,7 @@ define void @fmul_v8f16(<8 x half>* %x, <8 x half>* %y) { ; CHECK-LABEL: fmul_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vfmul.vv v8, v8, v9 @@ -119,7 +119,7 @@ define void @fmul_v4f32(<4 x float>* %x, <4 x float>* %y) { ; CHECK-LABEL: fmul_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vfmul.vv v8, v8, v9 @@ -135,7 +135,7 @@ define void @fmul_v2f64(<2 x double>* %x, <2 x double>* %y) { ; CHECK-LABEL: fmul_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vfmul.vv v8, v8, v9 @@ -151,7 +151,7 @@ define void @fdiv_v8f16(<8 x half>* %x, <8 x half>* %y) { ; CHECK-LABEL: fdiv_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vfdiv.vv v8, v8, v9 @@ -167,7 +167,7 @@ define void @fdiv_v4f32(<4 x float>* %x, <4 x float>* %y) { ; CHECK-LABEL: fdiv_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vfdiv.vv v8, v8, v9 @@ -183,7 +183,7 @@ define void @fdiv_v2f64(<2 x double>* %x, <2 x double>* %y) { ; CHECK-LABEL: fdiv_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vfdiv.vv v8, v8, v9 @@ -199,7 +199,7 @@ define void @fneg_v8f16(<8 x half>* %x) { ; CHECK-LABEL: fneg_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: vse16.v v8, (a0) @@ -213,7 +213,7 @@ define void @fneg_v4f32(<4 x float>* %x) { ; CHECK-LABEL: fneg_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: vse32.v v8, (a0) @@ -227,7 +227,7 @@ define void @fneg_v2f64(<2 x double>* %x) { ; CHECK-LABEL: fneg_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: vse64.v v8, (a0) @@ -241,7 +241,7 @@ define void @fabs_v8f16(<8 x half>* %x) { ; CHECK-LABEL: fabs_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: vse16.v v8, (a0) @@ -256,7 +256,7 @@ define void @fabs_v4f32(<4 x float>* %x) { ; CHECK-LABEL: fabs_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: vse32.v v8, (a0) @@ -271,7 +271,7 @@ define void @fabs_v2f64(<2 x double>* %x) { ; CHECK-LABEL: fabs_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: vse64.v v8, (a0) @@ -286,7 +286,7 @@ define void @copysign_v8f16(<8 x half>* %x, <8 x half>* %y) { ; CHECK-LABEL: copysign_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vfsgnj.vv v8, v8, v9 @@ -303,7 +303,7 @@ define void @copysign_v4f32(<4 x float>* %x, <4 x float>* %y) { ; CHECK-LABEL: copysign_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vfsgnj.vv v8, v8, v9 @@ -320,7 +320,7 @@ define void @copysign_v2f64(<2 x double>* %x, <2 x double>* %y) { ; CHECK-LABEL: copysign_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vfsgnj.vv v8, v8, v9 @@ -337,7 +337,7 @@ define void @copysign_vf_v8f16(<8 x half>* %x, half %y) { ; CHECK-LABEL: copysign_vf_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: vse16.v v8, (a0) @@ -353,7 +353,7 @@ define void @copysign_vf_v4f32(<4 x float>* %x, float %y) { ; CHECK-LABEL: copysign_vf_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: vse32.v v8, (a0) @@ -369,7 +369,7 @@ define void @copysign_vf_v2f64(<2 x double>* %x, double %y) { ; CHECK-LABEL: copysign_vf_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: vse64.v v8, (a0) @@ -385,7 +385,7 @@ define void @copysign_neg_v8f16(<8 x half>* %x, <8 x half>* %y) { ; CHECK-LABEL: copysign_neg_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 @@ -402,7 +402,7 @@ define void @copysign_neg_v4f32(<4 x float>* %x, <4 x float>* %y) { ; CHECK-LABEL: copysign_neg_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 @@ -419,7 +419,7 @@ define void @copysign_neg_v2f64(<2 x double>* %x, <2 x double>* %y) { ; CHECK-LABEL: copysign_neg_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 @@ -436,7 +436,7 @@ define void @copysign_neg_trunc_v4f16_v4f32(<4 x half>* %x, <4 x float>* %y) { ; CHECK-LABEL: copysign_neg_trunc_v4f16_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vfncvt.f.f.w v10, v8 @@ -456,11 +456,11 @@ define void @copysign_neg_ext_v2f64_v2f32(<2 x double>* %x, <2 x float>* %y) { ; CHECK-LABEL: copysign_neg_ext_v2f64_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: vle64.v v9, (a0) ; CHECK-NEXT: vfwcvt.f.f.v v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; CHECK-NEXT: vfsgnjn.vv v8, v9, v10 ; CHECK-NEXT: vse64.v v8, (a0) ; CHECK-NEXT: ret @@ -476,7 +476,7 @@ define void @sqrt_v8f16(<8 x half>* %x) { ; CHECK-LABEL: sqrt_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: vse16.v v8, (a0) @@ -491,7 +491,7 @@ define void @sqrt_v4f32(<4 x float>* %x) { ; CHECK-LABEL: sqrt_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: vse32.v v8, (a0) @@ -506,7 +506,7 @@ define void @sqrt_v2f64(<2 x double>* %x) { ; CHECK-LABEL: sqrt_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: vse64.v v8, (a0) @@ -521,7 +521,7 @@ define void @fma_v8f16(<8 x half>* %x, <8 x half>* %y, <8 x half>* %z) { ; CHECK-LABEL: fma_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vle16.v v10, (a2) @@ -540,7 +540,7 @@ define void @fma_v4f32(<4 x float>* %x, <4 x float>* %y, <4 x float>* %z) { ; CHECK-LABEL: fma_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vle32.v v10, (a2) @@ -559,7 +559,7 @@ define void @fma_v2f64(<2 x double>* %x, <2 x double>* %y, <2 x double>* %z) { ; CHECK-LABEL: fma_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vle64.v v10, (a2) @@ -578,7 +578,7 @@ define void @fmsub_v8f16(<8 x half>* %x, <8 x half>* %y, <8 x half>* %z) { ; CHECK-LABEL: fmsub_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vle16.v v10, (a2) @@ -597,7 +597,7 @@ define void @fnmsub_v4f32(<4 x float>* %x, <4 x float>* %y, <4 x float>* %z) { ; CHECK-LABEL: fnmsub_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vle32.v v10, (a2) @@ -616,7 +616,7 @@ define void @fnmadd_v2f64(<2 x double>* %x, <2 x double>* %y, <2 x double>* %z) { ; CHECK-LABEL: fnmadd_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vle64.v v10, (a2) @@ -636,7 +636,7 @@ define void @fadd_v16f16(<16 x half>* %x, <16 x half>* %y) { ; LMULMAX2-LABEL: fadd_v16f16: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX2-NEXT: vle16.v v8, (a0) ; LMULMAX2-NEXT: vle16.v v10, (a1) ; LMULMAX2-NEXT: vfadd.vv v8, v8, v10 @@ -645,7 +645,7 @@ ; ; LMULMAX1-RV32-LABEL: fadd_v16f16: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle16.v v9, (a2) @@ -660,7 +660,7 @@ ; ; LMULMAX1-RV64-LABEL: fadd_v16f16: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle16.v v9, (a2) @@ -682,7 +682,7 @@ define void @fadd_v8f32(<8 x float>* %x, <8 x float>* %y) { ; LMULMAX2-LABEL: fadd_v8f32: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vle32.v v8, (a0) ; LMULMAX2-NEXT: vle32.v v10, (a1) ; LMULMAX2-NEXT: vfadd.vv v8, v8, v10 @@ -691,7 +691,7 @@ ; ; LMULMAX1-RV32-LABEL: fadd_v8f32: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle32.v v9, (a2) @@ -706,7 +706,7 @@ ; ; LMULMAX1-RV64-LABEL: fadd_v8f32: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle32.v v9, (a2) @@ -728,7 +728,7 @@ define void @fadd_v4f64(<4 x double>* %x, <4 x double>* %y) { ; LMULMAX2-LABEL: fadd_v4f64: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-NEXT: vle64.v v8, (a0) ; LMULMAX2-NEXT: vle64.v v10, (a1) ; LMULMAX2-NEXT: vfadd.vv v8, v8, v10 @@ -737,7 +737,7 @@ ; ; LMULMAX1-RV32-LABEL: fadd_v4f64: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle64.v v9, (a2) @@ -752,7 +752,7 @@ ; ; LMULMAX1-RV64-LABEL: fadd_v4f64: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle64.v v9, (a2) @@ -774,7 +774,7 @@ define void @fsub_v16f16(<16 x half>* %x, <16 x half>* %y) { ; LMULMAX2-LABEL: fsub_v16f16: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX2-NEXT: vle16.v v8, (a0) ; LMULMAX2-NEXT: vle16.v v10, (a1) ; LMULMAX2-NEXT: vfsub.vv v8, v8, v10 @@ -783,7 +783,7 @@ ; ; LMULMAX1-RV32-LABEL: fsub_v16f16: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle16.v v9, (a2) @@ -798,7 +798,7 @@ ; ; LMULMAX1-RV64-LABEL: fsub_v16f16: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle16.v v9, (a2) @@ -820,7 +820,7 @@ define void @fsub_v8f32(<8 x float>* %x, <8 x float>* %y) { ; LMULMAX2-LABEL: fsub_v8f32: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vle32.v v8, (a0) ; LMULMAX2-NEXT: vle32.v v10, (a1) ; LMULMAX2-NEXT: vfsub.vv v8, v8, v10 @@ -829,7 +829,7 @@ ; ; LMULMAX1-RV32-LABEL: fsub_v8f32: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle32.v v9, (a2) @@ -844,7 +844,7 @@ ; ; LMULMAX1-RV64-LABEL: fsub_v8f32: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle32.v v9, (a2) @@ -866,7 +866,7 @@ define void @fsub_v4f64(<4 x double>* %x, <4 x double>* %y) { ; LMULMAX2-LABEL: fsub_v4f64: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-NEXT: vle64.v v8, (a0) ; LMULMAX2-NEXT: vle64.v v10, (a1) ; LMULMAX2-NEXT: vfsub.vv v8, v8, v10 @@ -875,7 +875,7 @@ ; ; LMULMAX1-RV32-LABEL: fsub_v4f64: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle64.v v9, (a2) @@ -890,7 +890,7 @@ ; ; LMULMAX1-RV64-LABEL: fsub_v4f64: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle64.v v9, (a2) @@ -912,7 +912,7 @@ define void @fmul_v16f16(<16 x half>* %x, <16 x half>* %y) { ; LMULMAX2-LABEL: fmul_v16f16: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX2-NEXT: vle16.v v8, (a0) ; LMULMAX2-NEXT: vle16.v v10, (a1) ; LMULMAX2-NEXT: vfmul.vv v8, v8, v10 @@ -921,7 +921,7 @@ ; ; LMULMAX1-RV32-LABEL: fmul_v16f16: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle16.v v9, (a2) @@ -936,7 +936,7 @@ ; ; LMULMAX1-RV64-LABEL: fmul_v16f16: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle16.v v9, (a2) @@ -958,7 +958,7 @@ define void @fmul_v8f32(<8 x float>* %x, <8 x float>* %y) { ; LMULMAX2-LABEL: fmul_v8f32: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vle32.v v8, (a0) ; LMULMAX2-NEXT: vle32.v v10, (a1) ; LMULMAX2-NEXT: vfmul.vv v8, v8, v10 @@ -967,7 +967,7 @@ ; ; LMULMAX1-RV32-LABEL: fmul_v8f32: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle32.v v9, (a2) @@ -982,7 +982,7 @@ ; ; LMULMAX1-RV64-LABEL: fmul_v8f32: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle32.v v9, (a2) @@ -1004,7 +1004,7 @@ define void @fmul_v4f64(<4 x double>* %x, <4 x double>* %y) { ; LMULMAX2-LABEL: fmul_v4f64: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-NEXT: vle64.v v8, (a0) ; LMULMAX2-NEXT: vle64.v v10, (a1) ; LMULMAX2-NEXT: vfmul.vv v8, v8, v10 @@ -1013,7 +1013,7 @@ ; ; LMULMAX1-RV32-LABEL: fmul_v4f64: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle64.v v9, (a2) @@ -1028,7 +1028,7 @@ ; ; LMULMAX1-RV64-LABEL: fmul_v4f64: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle64.v v9, (a2) @@ -1050,7 +1050,7 @@ define void @fdiv_v16f16(<16 x half>* %x, <16 x half>* %y) { ; LMULMAX2-LABEL: fdiv_v16f16: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX2-NEXT: vle16.v v8, (a0) ; LMULMAX2-NEXT: vle16.v v10, (a1) ; LMULMAX2-NEXT: vfdiv.vv v8, v8, v10 @@ -1059,7 +1059,7 @@ ; ; LMULMAX1-RV32-LABEL: fdiv_v16f16: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle16.v v9, (a2) @@ -1074,7 +1074,7 @@ ; ; LMULMAX1-RV64-LABEL: fdiv_v16f16: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle16.v v9, (a2) @@ -1096,7 +1096,7 @@ define void @fdiv_v8f32(<8 x float>* %x, <8 x float>* %y) { ; LMULMAX2-LABEL: fdiv_v8f32: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vle32.v v8, (a0) ; LMULMAX2-NEXT: vle32.v v10, (a1) ; LMULMAX2-NEXT: vfdiv.vv v8, v8, v10 @@ -1105,7 +1105,7 @@ ; ; LMULMAX1-RV32-LABEL: fdiv_v8f32: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle32.v v9, (a2) @@ -1120,7 +1120,7 @@ ; ; LMULMAX1-RV64-LABEL: fdiv_v8f32: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle32.v v9, (a2) @@ -1142,7 +1142,7 @@ define void @fdiv_v4f64(<4 x double>* %x, <4 x double>* %y) { ; LMULMAX2-LABEL: fdiv_v4f64: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-NEXT: vle64.v v8, (a0) ; LMULMAX2-NEXT: vle64.v v10, (a1) ; LMULMAX2-NEXT: vfdiv.vv v8, v8, v10 @@ -1151,7 +1151,7 @@ ; ; LMULMAX1-RV32-LABEL: fdiv_v4f64: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle64.v v9, (a2) @@ -1166,7 +1166,7 @@ ; ; LMULMAX1-RV64-LABEL: fdiv_v4f64: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle64.v v9, (a2) @@ -1188,7 +1188,7 @@ define void @fneg_v16f16(<16 x half>* %x) { ; LMULMAX2-LABEL: fneg_v16f16: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX2-NEXT: vle16.v v8, (a0) ; LMULMAX2-NEXT: vfneg.v v8, v8 ; LMULMAX2-NEXT: vse16.v v8, (a0) @@ -1196,7 +1196,7 @@ ; ; LMULMAX1-LABEL: fneg_v16f16: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-NEXT: addi a1, a0, 16 ; LMULMAX1-NEXT: vle16.v v8, (a1) ; LMULMAX1-NEXT: vle16.v v9, (a0) @@ -1214,7 +1214,7 @@ define void @fneg_v8f32(<8 x float>* %x) { ; LMULMAX2-LABEL: fneg_v8f32: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vle32.v v8, (a0) ; LMULMAX2-NEXT: vfneg.v v8, v8 ; LMULMAX2-NEXT: vse32.v v8, (a0) @@ -1222,7 +1222,7 @@ ; ; LMULMAX1-LABEL: fneg_v8f32: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: addi a1, a0, 16 ; LMULMAX1-NEXT: vle32.v v8, (a1) ; LMULMAX1-NEXT: vle32.v v9, (a0) @@ -1240,7 +1240,7 @@ define void @fneg_v4f64(<4 x double>* %x) { ; LMULMAX2-LABEL: fneg_v4f64: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-NEXT: vle64.v v8, (a0) ; LMULMAX2-NEXT: vfneg.v v8, v8 ; LMULMAX2-NEXT: vse64.v v8, (a0) @@ -1248,7 +1248,7 @@ ; ; LMULMAX1-LABEL: fneg_v4f64: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: addi a1, a0, 16 ; LMULMAX1-NEXT: vle64.v v8, (a1) ; LMULMAX1-NEXT: vle64.v v9, (a0) @@ -1266,7 +1266,7 @@ define void @fma_v16f16(<16 x half>* %x, <16 x half>* %y, <16 x half>* %z) { ; LMULMAX2-LABEL: fma_v16f16: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX2-NEXT: vle16.v v8, (a0) ; LMULMAX2-NEXT: vle16.v v10, (a1) ; LMULMAX2-NEXT: vle16.v v12, (a2) @@ -1276,7 +1276,7 @@ ; ; LMULMAX1-LABEL: fma_v16f16: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-NEXT: vle16.v v8, (a0) ; LMULMAX1-NEXT: addi a3, a0, 16 ; LMULMAX1-NEXT: vle16.v v9, (a3) @@ -1303,7 +1303,7 @@ define void @fma_v8f32(<8 x float>* %x, <8 x float>* %y, <8 x float>* %z) { ; LMULMAX2-LABEL: fma_v8f32: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vle32.v v8, (a0) ; LMULMAX2-NEXT: vle32.v v10, (a1) ; LMULMAX2-NEXT: vle32.v v12, (a2) @@ -1313,7 +1313,7 @@ ; ; LMULMAX1-LABEL: fma_v8f32: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vle32.v v8, (a0) ; LMULMAX1-NEXT: addi a3, a0, 16 ; LMULMAX1-NEXT: vle32.v v9, (a3) @@ -1340,7 +1340,7 @@ define void @fma_v4f64(<4 x double>* %x, <4 x double>* %y, <4 x double>* %z) { ; LMULMAX2-LABEL: fma_v4f64: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-NEXT: vle64.v v8, (a0) ; LMULMAX2-NEXT: vle64.v v10, (a1) ; LMULMAX2-NEXT: vle64.v v12, (a2) @@ -1350,7 +1350,7 @@ ; ; LMULMAX1-LABEL: fma_v4f64: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vle64.v v8, (a0) ; LMULMAX1-NEXT: addi a3, a0, 16 ; LMULMAX1-NEXT: vle64.v v9, (a3) @@ -1377,7 +1377,7 @@ define void @fadd_vf_v8f16(<8 x half>* %x, half %y) { ; CHECK-LABEL: fadd_vf_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: vse16.v v8, (a0) @@ -1393,7 +1393,7 @@ define void @fadd_vf_v4f32(<4 x float>* %x, float %y) { ; CHECK-LABEL: fadd_vf_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: vse32.v v8, (a0) @@ -1409,7 +1409,7 @@ define void @fadd_vf_v2f64(<2 x double>* %x, double %y) { ; CHECK-LABEL: fadd_vf_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: vse64.v v8, (a0) @@ -1425,7 +1425,7 @@ define void @fadd_fv_v8f16(<8 x half>* %x, half %y) { ; CHECK-LABEL: fadd_fv_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: vse16.v v8, (a0) @@ -1441,7 +1441,7 @@ define void @fadd_fv_v4f32(<4 x float>* %x, float %y) { ; CHECK-LABEL: fadd_fv_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: vse32.v v8, (a0) @@ -1457,7 +1457,7 @@ define void @fadd_fv_v2f64(<2 x double>* %x, double %y) { ; CHECK-LABEL: fadd_fv_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: vse64.v v8, (a0) @@ -1473,7 +1473,7 @@ define void @fsub_vf_v8f16(<8 x half>* %x, half %y) { ; CHECK-LABEL: fsub_vf_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: vse16.v v8, (a0) @@ -1489,7 +1489,7 @@ define void @fsub_vf_v4f32(<4 x float>* %x, float %y) { ; CHECK-LABEL: fsub_vf_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: vse32.v v8, (a0) @@ -1505,7 +1505,7 @@ define void @fsub_vf_v2f64(<2 x double>* %x, double %y) { ; CHECK-LABEL: fsub_vf_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: vse64.v v8, (a0) @@ -1521,7 +1521,7 @@ define void @fsub_fv_v8f16(<8 x half>* %x, half %y) { ; CHECK-LABEL: fsub_fv_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: vse16.v v8, (a0) @@ -1537,7 +1537,7 @@ define void @fsub_fv_v4f32(<4 x float>* %x, float %y) { ; CHECK-LABEL: fsub_fv_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: vse32.v v8, (a0) @@ -1553,7 +1553,7 @@ define void @fsub_fv_v2f64(<2 x double>* %x, double %y) { ; CHECK-LABEL: fsub_fv_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: vse64.v v8, (a0) @@ -1569,7 +1569,7 @@ define void @fmul_vf_v8f16(<8 x half>* %x, half %y) { ; CHECK-LABEL: fmul_vf_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: vse16.v v8, (a0) @@ -1585,7 +1585,7 @@ define void @fmul_vf_v4f32(<4 x float>* %x, float %y) { ; CHECK-LABEL: fmul_vf_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: vse32.v v8, (a0) @@ -1601,7 +1601,7 @@ define void @fmul_vf_v2f64(<2 x double>* %x, double %y) { ; CHECK-LABEL: fmul_vf_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: vse64.v v8, (a0) @@ -1617,7 +1617,7 @@ define void @fmul_fv_v8f16(<8 x half>* %x, half %y) { ; CHECK-LABEL: fmul_fv_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: vse16.v v8, (a0) @@ -1633,7 +1633,7 @@ define void @fmul_fv_v4f32(<4 x float>* %x, float %y) { ; CHECK-LABEL: fmul_fv_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: vse32.v v8, (a0) @@ -1649,7 +1649,7 @@ define void @fmul_fv_v2f64(<2 x double>* %x, double %y) { ; CHECK-LABEL: fmul_fv_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: vse64.v v8, (a0) @@ -1665,7 +1665,7 @@ define void @fdiv_vf_v8f16(<8 x half>* %x, half %y) { ; CHECK-LABEL: fdiv_vf_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: vse16.v v8, (a0) @@ -1681,7 +1681,7 @@ define void @fdiv_vf_v4f32(<4 x float>* %x, float %y) { ; CHECK-LABEL: fdiv_vf_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: vse32.v v8, (a0) @@ -1697,7 +1697,7 @@ define void @fdiv_vf_v2f64(<2 x double>* %x, double %y) { ; CHECK-LABEL: fdiv_vf_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: vse64.v v8, (a0) @@ -1713,7 +1713,7 @@ define void @fdiv_fv_v8f16(<8 x half>* %x, half %y) { ; CHECK-LABEL: fdiv_fv_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: vse16.v v8, (a0) @@ -1729,7 +1729,7 @@ define void @fdiv_fv_v4f32(<4 x float>* %x, float %y) { ; CHECK-LABEL: fdiv_fv_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: vse32.v v8, (a0) @@ -1745,7 +1745,7 @@ define void @fdiv_fv_v2f64(<2 x double>* %x, double %y) { ; CHECK-LABEL: fdiv_fv_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: vse64.v v8, (a0) @@ -1761,7 +1761,7 @@ define void @fma_vf_v8f16(<8 x half>* %x, <8 x half>* %y, half %z) { ; CHECK-LABEL: fma_vf_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vfmacc.vf v9, fa0, v8 @@ -1779,7 +1779,7 @@ define void @fma_vf_v4f32(<4 x float>* %x, <4 x float>* %y, float %z) { ; CHECK-LABEL: fma_vf_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vfmacc.vf v9, fa0, v8 @@ -1797,7 +1797,7 @@ define void @fma_vf_v2f64(<2 x double>* %x, <2 x double>* %y, double %z) { ; CHECK-LABEL: fma_vf_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vfmacc.vf v9, fa0, v8 @@ -1815,7 +1815,7 @@ define void @fma_fv_v8f16(<8 x half>* %x, <8 x half>* %y, half %z) { ; CHECK-LABEL: fma_fv_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vfmacc.vf v9, fa0, v8 @@ -1833,7 +1833,7 @@ define void @fma_fv_v4f32(<4 x float>* %x, <4 x float>* %y, float %z) { ; CHECK-LABEL: fma_fv_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vfmacc.vf v9, fa0, v8 @@ -1851,7 +1851,7 @@ define void @fma_fv_v2f64(<2 x double>* %x, <2 x double>* %y, double %z) { ; CHECK-LABEL: fma_fv_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vfmacc.vf v9, fa0, v8 @@ -1869,7 +1869,7 @@ define void @fmsub_vf_v8f16(<8 x half>* %x, <8 x half>* %y, half %z) { ; CHECK-LABEL: fmsub_vf_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vfmsac.vf v9, fa0, v8 @@ -1888,7 +1888,7 @@ define void @fnmsub_vf_v4f32(<4 x float>* %x, <4 x float>* %y, float %z) { ; CHECK-LABEL: fnmsub_vf_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vfnmsac.vf v9, fa0, v8 @@ -1907,7 +1907,7 @@ define void @fnmadd_vf_v2f64(<2 x double>* %x, <2 x double>* %y, double %z) { ; CHECK-LABEL: fnmadd_vf_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vfnmacc.vf v9, fa0, v8 @@ -1927,7 +1927,7 @@ define void @fnmsub_fv_v4f32(<4 x float>* %x, <4 x float>* %y, float %z) { ; CHECK-LABEL: fnmsub_fv_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vfnmsac.vf v9, fa0, v8 @@ -1946,7 +1946,7 @@ define void @fnmadd_fv_v2f64(<2 x double>* %x, <2 x double>* %y, double %z) { ; CHECK-LABEL: fnmadd_fv_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vfnmacc.vf v9, fa0, v8 @@ -2236,7 +2236,7 @@ define void @fmuladd_v8f16(<8 x half>* %x, <8 x half>* %y, <8 x half>* %z) { ; CHECK-LABEL: fmuladd_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vle16.v v10, (a2) @@ -2255,7 +2255,7 @@ define void @fmuladd_v4f32(<4 x float>* %x, <4 x float>* %y, <4 x float>* %z) { ; CHECK-LABEL: fmuladd_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vle32.v v10, (a2) @@ -2274,7 +2274,7 @@ define void @fmuladd_v2f64(<2 x double>* %x, <2 x double>* %y, <2 x double>* %z) { ; CHECK-LABEL: fmuladd_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vle64.v v10, (a2) @@ -2293,7 +2293,7 @@ define void @fmsub_fmuladd_v8f16(<8 x half>* %x, <8 x half>* %y, <8 x half>* %z) { ; CHECK-LABEL: fmsub_fmuladd_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vle16.v v10, (a2) @@ -2312,7 +2312,7 @@ define void @fnmsub_fmuladd_v4f32(<4 x float>* %x, <4 x float>* %y, <4 x float>* %z) { ; CHECK-LABEL: fnmsub_fmuladd_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vle32.v v10, (a2) @@ -2331,7 +2331,7 @@ define void @fnmadd_fmuladd_v2f64(<2 x double>* %x, <2 x double>* %y, <2 x double>* %z) { ; CHECK-LABEL: fnmadd_fmuladd_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vle64.v v10, (a2) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i-sat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i-sat.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i-sat.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i-sat.ll @@ -5,7 +5,7 @@ define void @fp2si_v2f32_v2i32(<2 x float>* %x, <2 x i32>* %y) { ; CHECK-LABEL: fp2si_v2f32_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmfne.vv v0, v8, v8 ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 @@ -22,7 +22,7 @@ define void @fp2ui_v2f32_v2i32(<2 x float>* %x, <2 x i32>* %y) { ; CHECK-LABEL: fp2ui_v2f32_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmfne.vv v0, v8, v8 ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 @@ -40,7 +40,7 @@ ; ; CHECK-LABEL: fp2si_v8f32_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmfne.vv v0, v8, v8 ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 @@ -58,7 +58,7 @@ ; ; CHECK-LABEL: fp2ui_v8f32_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmfne.vv v0, v8, v8 ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 @@ -75,11 +75,11 @@ define void @fp2si_v2f32_v2i64(<2 x float>* %x, <2 x i64>* %y) { ; CHECK-LABEL: fp2si_v2f32_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmfne.vv v0, v8, v8 ; CHECK-NEXT: vfwcvt.rtz.x.f.v v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; CHECK-NEXT: vmerge.vim v8, v9, 0, v0 ; CHECK-NEXT: vse64.v v8, (a1) ; CHECK-NEXT: ret @@ -93,11 +93,11 @@ define void @fp2ui_v2f32_v2i64(<2 x float>* %x, <2 x i64>* %y) { ; CHECK-LABEL: fp2ui_v2f32_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmfne.vv v0, v8, v8 ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; CHECK-NEXT: vmerge.vim v8, v9, 0, v0 ; CHECK-NEXT: vse64.v v8, (a1) ; CHECK-NEXT: ret @@ -112,11 +112,11 @@ ; ; CHECK-LABEL: fp2si_v8f32_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmfne.vv v0, v8, v8 ; CHECK-NEXT: vfwcvt.rtz.x.f.v v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; CHECK-NEXT: vmerge.vim v8, v12, 0, v0 ; CHECK-NEXT: vse64.v v8, (a1) ; CHECK-NEXT: ret @@ -131,11 +131,11 @@ ; ; CHECK-LABEL: fp2ui_v8f32_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmfne.vv v0, v8, v8 ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; CHECK-NEXT: vmerge.vim v8, v12, 0, v0 ; CHECK-NEXT: vse64.v v8, (a1) ; CHECK-NEXT: ret @@ -149,13 +149,13 @@ define void @fp2si_v2f16_v2i64(<2 x half>* %x, <2 x i64>* %y) { ; CHECK-LABEL: fp2si_v2f16_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmfne.vv v0, v8, v8 ; CHECK-NEXT: vfwcvt.f.f.v v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: vse64.v v8, (a1) ; CHECK-NEXT: ret @@ -169,13 +169,13 @@ define void @fp2ui_v2f16_v2i64(<2 x half>* %x, <2 x i64>* %y) { ; CHECK-LABEL: fp2ui_v2f16_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmfne.vv v0, v8, v8 ; CHECK-NEXT: vfwcvt.f.f.v v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: vse64.v v8, (a1) ; CHECK-NEXT: ret @@ -189,9 +189,9 @@ define void @fp2si_v2f64_v2i8(<2 x double>* %x, <2 x i8>* %y) { ; RV32-LABEL: fp2si_v2f64_v2i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vslidedown.vi v9, v8, 1 ; RV32-NEXT: lui a0, %hi(.LCPI10_0) ; RV32-NEXT: fld ft0, %lo(.LCPI10_0)(a0) @@ -205,9 +205,9 @@ ; RV32-NEXT: fmin.d ft2, ft2, ft1 ; RV32-NEXT: fcvt.w.d a0, ft2, rtz ; RV32-NEXT: .LBB10_2: -; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; RV32-NEXT: vmv.v.x v9, a0 -; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; RV32-NEXT: vfmv.f.s ft2, v8 ; RV32-NEXT: feq.d a0, ft2, ft2 ; RV32-NEXT: beqz a0, .LBB10_4 @@ -216,16 +216,16 @@ ; RV32-NEXT: fmin.d ft0, ft0, ft1 ; RV32-NEXT: fcvt.w.d a0, ft0, rtz ; RV32-NEXT: .LBB10_4: -; RV32-NEXT: vsetivli zero, 2, e8, mf8, tu, mu +; RV32-NEXT: vsetivli zero, 2, e8, mf8, tu, ma ; RV32-NEXT: vmv.s.x v9, a0 ; RV32-NEXT: vse8.v v9, (a1) ; RV32-NEXT: ret ; ; RV64-LABEL: fp2si_v2f64_v2i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vslidedown.vi v9, v8, 1 ; RV64-NEXT: lui a0, %hi(.LCPI10_0) ; RV64-NEXT: fld ft0, %lo(.LCPI10_0)(a0) @@ -239,9 +239,9 @@ ; RV64-NEXT: fmin.d ft2, ft2, ft1 ; RV64-NEXT: fcvt.l.d a0, ft2, rtz ; RV64-NEXT: .LBB10_2: -; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; RV64-NEXT: vmv.v.x v9, a0 -; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; RV64-NEXT: vfmv.f.s ft2, v8 ; RV64-NEXT: feq.d a0, ft2, ft2 ; RV64-NEXT: beqz a0, .LBB10_4 @@ -250,7 +250,7 @@ ; RV64-NEXT: fmin.d ft0, ft0, ft1 ; RV64-NEXT: fcvt.l.d a0, ft0, rtz ; RV64-NEXT: .LBB10_4: -; RV64-NEXT: vsetivli zero, 2, e8, mf8, tu, mu +; RV64-NEXT: vsetivli zero, 2, e8, mf8, tu, ma ; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vse8.v v9, (a1) ; RV64-NEXT: ret @@ -264,7 +264,7 @@ define void @fp2ui_v2f64_v2i8(<2 x double>* %x, <2 x i8>* %y) { ; RV32-LABEL: fp2ui_v2f64_v2i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: lui a0, %hi(.LCPI11_0) ; RV32-NEXT: fld ft0, %lo(.LCPI11_0)(a0) @@ -273,22 +273,22 @@ ; RV32-NEXT: fmax.d ft1, ft1, ft2 ; RV32-NEXT: fmin.d ft1, ft1, ft0 ; RV32-NEXT: fcvt.wu.d a0, ft1, rtz -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vslidedown.vi v8, v8, 1 ; RV32-NEXT: vfmv.f.s ft1, v8 ; RV32-NEXT: fmax.d ft1, ft1, ft2 ; RV32-NEXT: fmin.d ft0, ft1, ft0 ; RV32-NEXT: fcvt.wu.d a2, ft0, rtz -; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; RV32-NEXT: vmv.v.x v8, a2 -; RV32-NEXT: vsetvli zero, zero, e8, mf8, tu, mu +; RV32-NEXT: vsetvli zero, zero, e8, mf8, tu, ma ; RV32-NEXT: vmv.s.x v8, a0 ; RV32-NEXT: vse8.v v8, (a1) ; RV32-NEXT: ret ; ; RV64-LABEL: fp2ui_v2f64_v2i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: lui a0, %hi(.LCPI11_0) ; RV64-NEXT: fld ft0, %lo(.LCPI11_0)(a0) @@ -297,15 +297,15 @@ ; RV64-NEXT: fmax.d ft1, ft1, ft2 ; RV64-NEXT: fmin.d ft1, ft1, ft0 ; RV64-NEXT: fcvt.lu.d a0, ft1, rtz -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vslidedown.vi v8, v8, 1 ; RV64-NEXT: vfmv.f.s ft1, v8 ; RV64-NEXT: fmax.d ft1, ft1, ft2 ; RV64-NEXT: fmin.d ft0, ft1, ft0 ; RV64-NEXT: fcvt.lu.d a2, ft0, rtz -; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; RV64-NEXT: vmv.v.x v8, a2 -; RV64-NEXT: vsetvli zero, zero, e8, mf8, tu, mu +; RV64-NEXT: vsetvli zero, zero, e8, mf8, tu, ma ; RV64-NEXT: vmv.s.x v8, a0 ; RV64-NEXT: vse8.v v8, (a1) ; RV64-NEXT: ret @@ -322,7 +322,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: lui a0, %hi(.LCPI12_0) ; RV32-NEXT: fld ft0, %lo(.LCPI12_0)(a0) @@ -337,7 +337,7 @@ ; RV32-NEXT: fcvt.w.d a0, ft2, rtz ; RV32-NEXT: .LBB12_2: ; RV32-NEXT: sb a0, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m4, ta, ma ; RV32-NEXT: vslidedown.vi v12, v8, 7 ; RV32-NEXT: vfmv.f.s ft2, v12 ; RV32-NEXT: feq.d a0, ft2, ft2 @@ -427,7 +427,7 @@ ; RV32-NEXT: .LBB12_22: ; RV32-NEXT: sb a0, 9(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV32-NEXT: vle8.v v8, (a0) ; RV32-NEXT: vse8.v v8, (a1) ; RV32-NEXT: addi sp, sp, 16 @@ -437,7 +437,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -16 ; RV64-NEXT: .cfi_def_cfa_offset 16 -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: lui a0, %hi(.LCPI12_0) ; RV64-NEXT: fld ft0, %lo(.LCPI12_0)(a0) @@ -452,7 +452,7 @@ ; RV64-NEXT: fcvt.l.d a0, ft2, rtz ; RV64-NEXT: .LBB12_2: ; RV64-NEXT: sb a0, 8(sp) -; RV64-NEXT: vsetivli zero, 1, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m4, ta, ma ; RV64-NEXT: vslidedown.vi v12, v8, 7 ; RV64-NEXT: vfmv.f.s ft2, v12 ; RV64-NEXT: feq.d a0, ft2, ft2 @@ -542,7 +542,7 @@ ; RV64-NEXT: .LBB12_22: ; RV64-NEXT: sb a0, 9(sp) ; RV64-NEXT: addi a0, sp, 8 -; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV64-NEXT: vle8.v v8, (a0) ; RV64-NEXT: vse8.v v8, (a1) ; RV64-NEXT: addi sp, sp, 16 @@ -560,7 +560,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: lui a0, %hi(.LCPI13_0) ; RV32-NEXT: fld ft0, %lo(.LCPI13_0)(a0) @@ -570,7 +570,7 @@ ; RV32-NEXT: fmin.d ft1, ft1, ft0 ; RV32-NEXT: fcvt.wu.d a0, ft1, rtz ; RV32-NEXT: sb a0, 8(sp) -; RV32-NEXT: vsetivli zero, 1, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m4, ta, ma ; RV32-NEXT: vslidedown.vi v12, v8, 7 ; RV32-NEXT: vfmv.f.s ft1, v12 ; RV32-NEXT: fmax.d ft1, ft1, ft2 @@ -614,7 +614,7 @@ ; RV32-NEXT: fcvt.wu.d a0, ft0, rtz ; RV32-NEXT: sb a0, 9(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV32-NEXT: vle8.v v8, (a0) ; RV32-NEXT: vse8.v v8, (a1) ; RV32-NEXT: addi sp, sp, 16 @@ -624,7 +624,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -16 ; RV64-NEXT: .cfi_def_cfa_offset 16 -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: lui a0, %hi(.LCPI13_0) ; RV64-NEXT: fld ft0, %lo(.LCPI13_0)(a0) @@ -634,7 +634,7 @@ ; RV64-NEXT: fmin.d ft1, ft1, ft0 ; RV64-NEXT: fcvt.lu.d a0, ft1, rtz ; RV64-NEXT: sb a0, 8(sp) -; RV64-NEXT: vsetivli zero, 1, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m4, ta, ma ; RV64-NEXT: vslidedown.vi v12, v8, 7 ; RV64-NEXT: vfmv.f.s ft1, v12 ; RV64-NEXT: fmax.d ft1, ft1, ft2 @@ -678,7 +678,7 @@ ; RV64-NEXT: fcvt.lu.d a0, ft0, rtz ; RV64-NEXT: sb a0, 9(sp) ; RV64-NEXT: addi a0, sp, 8 -; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV64-NEXT: vle8.v v8, (a0) ; RV64-NEXT: vse8.v v8, (a1) ; RV64-NEXT: addi sp, sp, 16 @@ -693,10 +693,10 @@ define void @fp2si_v2f64_v2i32(<2 x double>* %x, <2 x i32>* %y) { ; CHECK-LABEL: fp2si_v2f64_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vmerge.vim v8, v9, 0, v0 ; CHECK-NEXT: vse32.v v8, (a1) @@ -711,10 +711,10 @@ define void @fp2ui_v2f64_v2i32(<2 x double>* %x, <2 x i32>* %y) { ; CHECK-LABEL: fp2ui_v2f64_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vmerge.vim v8, v9, 0, v0 ; CHECK-NEXT: vse32.v v8, (a1) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll @@ -7,7 +7,7 @@ define void @fp2si_v2f32_v2i32(<2 x float>* %x, <2 x i32>* %y) { ; CHECK-LABEL: fp2si_v2f32_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: vse32.v v8, (a1) @@ -21,7 +21,7 @@ define void @fp2ui_v2f32_v2i32(<2 x float>* %x, <2 x i32>* %y) { ; CHECK-LABEL: fp2ui_v2f32_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: vse32.v v8, (a1) @@ -35,7 +35,7 @@ define <2 x i1> @fp2si_v2f32_v2i1(<2 x float> %x) { ; CHECK-LABEL: fp2si_v2f32_v2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -47,7 +47,7 @@ define <2 x i15> @fp2si_v2f32_v2i15(<2 x float> %x) { ; CHECK-LABEL: fp2si_v2f32_v2i15: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -58,7 +58,7 @@ define <2 x i15> @fp2ui_v2f32_v2i15(<2 x float> %x) { ; CHECK-LABEL: fp2ui_v2f32_v2i15: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -69,7 +69,7 @@ define <2 x i1> @fp2ui_v2f32_v2i1(<2 x float> %x) { ; CHECK-LABEL: fp2ui_v2f32_v2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -81,7 +81,7 @@ define void @fp2si_v8f32_v8i32(<8 x float>* %x, <8 x i32>* %y) { ; LMULMAX8-LABEL: fp2si_v8f32_v8i32: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX8-NEXT: vle32.v v8, (a0) ; LMULMAX8-NEXT: vfcvt.rtz.x.f.v v8, v8 ; LMULMAX8-NEXT: vse32.v v8, (a1) @@ -89,7 +89,7 @@ ; ; LMULMAX1-LABEL: fp2si_v8f32_v8i32: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: addi a2, a0, 16 ; LMULMAX1-NEXT: vle32.v v8, (a2) ; LMULMAX1-NEXT: vle32.v v9, (a0) @@ -108,7 +108,7 @@ define void @fp2ui_v8f32_v8i32(<8 x float>* %x, <8 x i32>* %y) { ; LMULMAX8-LABEL: fp2ui_v8f32_v8i32: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX8-NEXT: vle32.v v8, (a0) ; LMULMAX8-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; LMULMAX8-NEXT: vse32.v v8, (a1) @@ -116,7 +116,7 @@ ; ; LMULMAX1-LABEL: fp2ui_v8f32_v8i32: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: addi a2, a0, 16 ; LMULMAX1-NEXT: vle32.v v8, (a2) ; LMULMAX1-NEXT: vle32.v v9, (a0) @@ -135,7 +135,7 @@ define <8 x i1> @fp2si_v8f32_v8i1(<8 x float> %x) { ; LMULMAX8-LABEL: fp2si_v8f32_v8i1: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX8-NEXT: vfncvt.rtz.x.f.w v10, v8 ; LMULMAX8-NEXT: vand.vi v8, v10, 1 ; LMULMAX8-NEXT: vmsne.vi v0, v8, 0 @@ -143,23 +143,23 @@ ; ; LMULMAX1-LABEL: fp2si_v8f32_v8i1: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; LMULMAX1-NEXT: vfncvt.rtz.x.f.w v10, v8 ; LMULMAX1-NEXT: vand.vi v8, v10, 1 ; LMULMAX1-NEXT: vmsne.vi v0, v8, 0 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; LMULMAX1-NEXT: vmv.v.i v8, 0 ; LMULMAX1-NEXT: vmerge.vim v8, v8, 1, v0 -; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; LMULMAX1-NEXT: vfncvt.rtz.x.f.w v10, v9 ; LMULMAX1-NEXT: vand.vi v9, v10, 1 ; LMULMAX1-NEXT: vmsne.vi v0, v9, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; LMULMAX1-NEXT: vmv.v.i v9, 0 ; LMULMAX1-NEXT: vmerge.vim v9, v9, 1, v0 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, ma ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; LMULMAX1-NEXT: vmsne.vi v0, v8, 0 ; LMULMAX1-NEXT: ret %z = fptosi <8 x float> %x to <8 x i1> @@ -169,7 +169,7 @@ define <8 x i1> @fp2ui_v8f32_v8i1(<8 x float> %x) { ; LMULMAX8-LABEL: fp2ui_v8f32_v8i1: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX8-NEXT: vfncvt.rtz.xu.f.w v10, v8 ; LMULMAX8-NEXT: vand.vi v8, v10, 1 ; LMULMAX8-NEXT: vmsne.vi v0, v8, 0 @@ -177,23 +177,23 @@ ; ; LMULMAX1-LABEL: fp2ui_v8f32_v8i1: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; LMULMAX1-NEXT: vfncvt.rtz.xu.f.w v10, v8 ; LMULMAX1-NEXT: vand.vi v8, v10, 1 ; LMULMAX1-NEXT: vmsne.vi v0, v8, 0 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; LMULMAX1-NEXT: vmv.v.i v8, 0 ; LMULMAX1-NEXT: vmerge.vim v8, v8, 1, v0 -; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; LMULMAX1-NEXT: vfncvt.rtz.xu.f.w v10, v9 ; LMULMAX1-NEXT: vand.vi v9, v10, 1 ; LMULMAX1-NEXT: vmsne.vi v0, v9, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; LMULMAX1-NEXT: vmv.v.i v9, 0 ; LMULMAX1-NEXT: vmerge.vim v9, v9, 1, v0 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, ma ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; LMULMAX1-NEXT: vmsne.vi v0, v8, 0 ; LMULMAX1-NEXT: ret %z = fptoui <8 x float> %x to <8 x i1> @@ -203,7 +203,7 @@ define void @fp2si_v2f32_v2i64(<2 x float>* %x, <2 x i64>* %y) { ; CHECK-LABEL: fp2si_v2f32_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfwcvt.rtz.x.f.v v9, v8 ; CHECK-NEXT: vse64.v v9, (a1) @@ -217,7 +217,7 @@ define void @fp2ui_v2f32_v2i64(<2 x float>* %x, <2 x i64>* %y) { ; CHECK-LABEL: fp2ui_v2f32_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v9, v8 ; CHECK-NEXT: vse64.v v9, (a1) @@ -231,7 +231,7 @@ define void @fp2si_v8f32_v8i64(<8 x float>* %x, <8 x i64>* %y) { ; LMULMAX8-LABEL: fp2si_v8f32_v8i64: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX8-NEXT: vle32.v v8, (a0) ; LMULMAX8-NEXT: vfwcvt.rtz.x.f.v v12, v8 ; LMULMAX8-NEXT: vse64.v v12, (a1) @@ -239,17 +239,17 @@ ; ; LMULMAX1-LABEL: fp2si_v8f32_v8i64: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: addi a2, a0, 16 ; LMULMAX1-NEXT: vle32.v v8, (a2) ; LMULMAX1-NEXT: vle32.v v9, (a0) -; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v10, v8, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vfwcvt.rtz.x.f.v v11, v10 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v10, v9, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vfwcvt.rtz.x.f.v v12, v10 ; LMULMAX1-NEXT: vfwcvt.rtz.x.f.v v10, v8 ; LMULMAX1-NEXT: vfwcvt.rtz.x.f.v v8, v9 @@ -270,7 +270,7 @@ define void @fp2ui_v8f32_v8i64(<8 x float>* %x, <8 x i64>* %y) { ; LMULMAX8-LABEL: fp2ui_v8f32_v8i64: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX8-NEXT: vle32.v v8, (a0) ; LMULMAX8-NEXT: vfwcvt.rtz.xu.f.v v12, v8 ; LMULMAX8-NEXT: vse64.v v12, (a1) @@ -278,17 +278,17 @@ ; ; LMULMAX1-LABEL: fp2ui_v8f32_v8i64: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: addi a2, a0, 16 ; LMULMAX1-NEXT: vle32.v v8, (a2) ; LMULMAX1-NEXT: vle32.v v9, (a0) -; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v10, v8, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vfwcvt.rtz.xu.f.v v11, v10 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v10, v9, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vfwcvt.rtz.xu.f.v v12, v10 ; LMULMAX1-NEXT: vfwcvt.rtz.xu.f.v v10, v8 ; LMULMAX1-NEXT: vfwcvt.rtz.xu.f.v v8, v9 @@ -309,10 +309,10 @@ define void @fp2si_v2f16_v2i64(<2 x half>* %x, <2 x i64>* %y) { ; CHECK-LABEL: fp2si_v2f16_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfwcvt.f.f.v v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v9 ; CHECK-NEXT: vse64.v v8, (a1) ; CHECK-NEXT: ret @@ -325,10 +325,10 @@ define void @fp2ui_v2f16_v2i64(<2 x half>* %x, <2 x i64>* %y) { ; CHECK-LABEL: fp2ui_v2f16_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfwcvt.f.f.v v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v9 ; CHECK-NEXT: vse64.v v8, (a1) ; CHECK-NEXT: ret @@ -341,7 +341,7 @@ define <2 x i1> @fp2si_v2f16_v2i1(<2 x half> %x) { ; CHECK-LABEL: fp2si_v2f16_v2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -353,7 +353,7 @@ define <2 x i1> @fp2ui_v2f16_v2i1(<2 x half> %x) { ; CHECK-LABEL: fp2ui_v2f16_v2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -365,12 +365,12 @@ define void @fp2si_v2f64_v2i8(<2 x double>* %x, <2 x i8>* %y) { ; CHECK-LABEL: fp2si_v2f64_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v9, 0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a1) ; CHECK-NEXT: ret @@ -383,12 +383,12 @@ define void @fp2ui_v2f64_v2i8(<2 x double>* %x, <2 x i8>* %y) { ; CHECK-LABEL: fp2ui_v2f64_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v9, 0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a1) ; CHECK-NEXT: ret @@ -401,7 +401,7 @@ define <2 x i1> @fp2si_v2f64_v2i1(<2 x double> %x) { ; CHECK-LABEL: fp2si_v2f64_v2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -413,7 +413,7 @@ define <2 x i1> @fp2ui_v2f64_v2i1(<2 x double> %x) { ; CHECK-LABEL: fp2ui_v2f64_v2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -425,12 +425,12 @@ define void @fp2si_v8f64_v8i8(<8 x double>* %x, <8 x i8>* %y) { ; LMULMAX8-LABEL: fp2si_v8f64_v8i8: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX8-NEXT: vle64.v v8, (a0) ; LMULMAX8-NEXT: vfncvt.rtz.x.f.w v12, v8 -; LMULMAX8-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; LMULMAX8-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; LMULMAX8-NEXT: vnsrl.wi v8, v12, 0 -; LMULMAX8-NEXT: vsetvli zero, zero, e8, mf2, ta, mu +; LMULMAX8-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; LMULMAX8-NEXT: vnsrl.wi v8, v8, 0 ; LMULMAX8-NEXT: vse8.v v8, (a1) ; LMULMAX8-NEXT: ret @@ -438,7 +438,7 @@ ; LMULMAX1-LABEL: fp2si_v8f64_v8i8: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: addi a2, a0, 48 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vle64.v v8, (a2) ; LMULMAX1-NEXT: addi a2, a0, 32 ; LMULMAX1-NEXT: vle64.v v9, (a0) @@ -446,33 +446,33 @@ ; LMULMAX1-NEXT: addi a0, a0, 16 ; LMULMAX1-NEXT: vle64.v v11, (a0) ; LMULMAX1-NEXT: vfncvt.rtz.x.f.w v12, v9 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v12, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; LMULMAX1-NEXT: vfncvt.rtz.x.f.w v12, v11 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v11, v12, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v11, v11, 0 -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, ma ; LMULMAX1-NEXT: vslideup.vi v9, v11, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vfncvt.rtz.x.f.w v11, v10 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v10, v11, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v10, v10, 0 -; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, tu, ma ; LMULMAX1-NEXT: vslideup.vi v9, v10, 4 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vfncvt.rtz.x.f.w v10, v8 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v8, v10, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, ma ; LMULMAX1-NEXT: vslideup.vi v9, v8, 6 ; LMULMAX1-NEXT: vse8.v v9, (a1) ; LMULMAX1-NEXT: ret @@ -485,12 +485,12 @@ define void @fp2ui_v8f64_v8i8(<8 x double>* %x, <8 x i8>* %y) { ; LMULMAX8-LABEL: fp2ui_v8f64_v8i8: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX8-NEXT: vle64.v v8, (a0) ; LMULMAX8-NEXT: vfncvt.rtz.xu.f.w v12, v8 -; LMULMAX8-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; LMULMAX8-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; LMULMAX8-NEXT: vnsrl.wi v8, v12, 0 -; LMULMAX8-NEXT: vsetvli zero, zero, e8, mf2, ta, mu +; LMULMAX8-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; LMULMAX8-NEXT: vnsrl.wi v8, v8, 0 ; LMULMAX8-NEXT: vse8.v v8, (a1) ; LMULMAX8-NEXT: ret @@ -498,7 +498,7 @@ ; LMULMAX1-LABEL: fp2ui_v8f64_v8i8: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: addi a2, a0, 48 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vle64.v v8, (a2) ; LMULMAX1-NEXT: addi a2, a0, 32 ; LMULMAX1-NEXT: vle64.v v9, (a0) @@ -506,33 +506,33 @@ ; LMULMAX1-NEXT: addi a0, a0, 16 ; LMULMAX1-NEXT: vle64.v v11, (a0) ; LMULMAX1-NEXT: vfncvt.rtz.xu.f.w v12, v9 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v12, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; LMULMAX1-NEXT: vfncvt.rtz.xu.f.w v12, v11 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v11, v12, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v11, v11, 0 -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, ma ; LMULMAX1-NEXT: vslideup.vi v9, v11, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vfncvt.rtz.xu.f.w v11, v10 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v10, v11, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v10, v10, 0 -; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, tu, ma ; LMULMAX1-NEXT: vslideup.vi v9, v10, 4 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vfncvt.rtz.xu.f.w v10, v8 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v8, v10, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, ma ; LMULMAX1-NEXT: vslideup.vi v9, v8, 6 ; LMULMAX1-NEXT: vse8.v v9, (a1) ; LMULMAX1-NEXT: ret @@ -545,7 +545,7 @@ define <8 x i1> @fp2si_v8f64_v8i1(<8 x double> %x) { ; LMULMAX8-LABEL: fp2si_v8f64_v8i1: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX8-NEXT: vfncvt.rtz.x.f.w v12, v8 ; LMULMAX8-NEXT: vand.vi v8, v12, 1 ; LMULMAX8-NEXT: vmsne.vi v0, v8, 0 @@ -553,45 +553,45 @@ ; ; LMULMAX1-LABEL: fp2si_v8f64_v8i1: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vfncvt.rtz.x.f.w v12, v8 ; LMULMAX1-NEXT: vand.vi v8, v12, 1 ; LMULMAX1-NEXT: vmsne.vi v0, v8, 0 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; LMULMAX1-NEXT: vmv.v.i v8, 0 ; LMULMAX1-NEXT: vmerge.vim v12, v8, 1, v0 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vfncvt.rtz.x.f.w v13, v9 ; LMULMAX1-NEXT: vand.vi v9, v13, 1 ; LMULMAX1-NEXT: vmsne.vi v0, v9, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; LMULMAX1-NEXT: vmv.v.i v9, 0 ; LMULMAX1-NEXT: vmerge.vim v13, v9, 1, v0 -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, ma ; LMULMAX1-NEXT: vslideup.vi v12, v13, 2 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; LMULMAX1-NEXT: vmsne.vi v0, v12, 0 ; LMULMAX1-NEXT: vmerge.vim v12, v8, 1, v0 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vfncvt.rtz.x.f.w v13, v10 ; LMULMAX1-NEXT: vand.vi v10, v13, 1 ; LMULMAX1-NEXT: vmsne.vi v0, v10, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; LMULMAX1-NEXT: vmerge.vim v10, v9, 1, v0 -; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, tu, ma ; LMULMAX1-NEXT: vslideup.vi v12, v10, 4 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; LMULMAX1-NEXT: vmsne.vi v0, v12, 0 ; LMULMAX1-NEXT: vmerge.vim v8, v8, 1, v0 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vfncvt.rtz.x.f.w v10, v11 ; LMULMAX1-NEXT: vand.vi v10, v10, 1 ; LMULMAX1-NEXT: vmsne.vi v0, v10, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; LMULMAX1-NEXT: vmerge.vim v9, v9, 1, v0 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, ma ; LMULMAX1-NEXT: vslideup.vi v8, v9, 6 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; LMULMAX1-NEXT: vmsne.vi v0, v8, 0 ; LMULMAX1-NEXT: ret %z = fptosi <8 x double> %x to <8 x i1> @@ -601,7 +601,7 @@ define <8 x i1> @fp2ui_v8f64_v8i1(<8 x double> %x) { ; LMULMAX8-LABEL: fp2ui_v8f64_v8i1: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX8-NEXT: vfncvt.rtz.xu.f.w v12, v8 ; LMULMAX8-NEXT: vand.vi v8, v12, 1 ; LMULMAX8-NEXT: vmsne.vi v0, v8, 0 @@ -609,45 +609,45 @@ ; ; LMULMAX1-LABEL: fp2ui_v8f64_v8i1: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vfncvt.rtz.xu.f.w v12, v8 ; LMULMAX1-NEXT: vand.vi v8, v12, 1 ; LMULMAX1-NEXT: vmsne.vi v0, v8, 0 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; LMULMAX1-NEXT: vmv.v.i v8, 0 ; LMULMAX1-NEXT: vmerge.vim v12, v8, 1, v0 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vfncvt.rtz.xu.f.w v13, v9 ; LMULMAX1-NEXT: vand.vi v9, v13, 1 ; LMULMAX1-NEXT: vmsne.vi v0, v9, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; LMULMAX1-NEXT: vmv.v.i v9, 0 ; LMULMAX1-NEXT: vmerge.vim v13, v9, 1, v0 -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, ma ; LMULMAX1-NEXT: vslideup.vi v12, v13, 2 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; LMULMAX1-NEXT: vmsne.vi v0, v12, 0 ; LMULMAX1-NEXT: vmerge.vim v12, v8, 1, v0 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vfncvt.rtz.xu.f.w v13, v10 ; LMULMAX1-NEXT: vand.vi v10, v13, 1 ; LMULMAX1-NEXT: vmsne.vi v0, v10, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; LMULMAX1-NEXT: vmerge.vim v10, v9, 1, v0 -; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, tu, ma ; LMULMAX1-NEXT: vslideup.vi v12, v10, 4 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; LMULMAX1-NEXT: vmsne.vi v0, v12, 0 ; LMULMAX1-NEXT: vmerge.vim v8, v8, 1, v0 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vfncvt.rtz.xu.f.w v10, v11 ; LMULMAX1-NEXT: vand.vi v10, v10, 1 ; LMULMAX1-NEXT: vmsne.vi v0, v10, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; LMULMAX1-NEXT: vmerge.vim v9, v9, 1, v0 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, ma ; LMULMAX1-NEXT: vslideup.vi v8, v9, 6 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; LMULMAX1-NEXT: vmsne.vi v0, v8, 0 ; LMULMAX1-NEXT: ret %z = fptoui <8 x double> %x to <8 x i1> diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp-mask.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp-mask.ll @@ -21,7 +21,7 @@ define <4 x i1> @vfptosi_v4i1_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i1_v4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -46,7 +46,7 @@ define <4 x i1> @vfptosi_v4i1_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i1_v4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -72,7 +72,7 @@ define <4 x i1> @vfptosi_v4i1_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i1_v4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll @@ -33,7 +33,7 @@ define <4 x i8> @vfptosi_v4i8_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i8_v4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -56,7 +56,7 @@ define <4 x i16> @vfptosi_v4i16_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i16_v4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.vp.fptosi.v4i16.v4f16(<4 x half> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) @@ -79,7 +79,7 @@ define <4 x i32> @vfptosi_v4i32_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i32_v4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwcvt.rtz.x.f.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -104,9 +104,9 @@ define <4 x i64> @vfptosi_v4i64_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i64_v4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v10 ; CHECK-NEXT: ret %v = call <4 x i64> @llvm.vp.fptosi.v4i64.v4f16(<4 x half> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) @@ -130,9 +130,9 @@ define <4 x i8> @vfptosi_v4i8_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i8_v4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v9, 0 ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.vp.fptosi.v4i8.v4f32(<4 x float> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) @@ -155,7 +155,7 @@ define <4 x i16> @vfptosi_v4i16_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i16_v4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -178,7 +178,7 @@ define <4 x i32> @vfptosi_v4i32_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i32_v4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: ret %v = call <4 x i32> @llvm.vp.fptosi.v4i32.v4f32(<4 x float> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) @@ -201,7 +201,7 @@ define <4 x i64> @vfptosi_v4i64_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i64_v4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfwcvt.rtz.x.f.v v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -228,11 +228,11 @@ define <4 x i8> @vfptosi_v4i8_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i8_v4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.vp.fptosi.v4i8.v4f64(<4 x double> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) @@ -256,9 +256,9 @@ define <4 x i16> @vfptosi_v4i16_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i16_v4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.vp.fptosi.v4i16.v4f64(<4 x double> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) @@ -281,7 +281,7 @@ define <4 x i32> @vfptosi_v4i32_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i32_v4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -304,7 +304,7 @@ define <4 x i64> @vfptosi_v4i64_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_v4i64_v4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: ret %v = call <4 x i64> @llvm.vp.fptosi.v4i64.v4f64(<4 x double> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) @@ -318,7 +318,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: addi a2, a0, -16 ; CHECK-NEXT: vslidedown.vi v0, v0, 2 ; CHECK-NEXT: bltu a0, a2, .LBB25_2 @@ -349,14 +349,14 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a2, a1 ; CHECK-NEXT: .LBB26_2: -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: li a1, 16 ; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v16 ; CHECK-NEXT: bltu a0, a1, .LBB26_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: .LBB26_4: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: ret %v = call <32 x i64> @llvm.vp.fptosi.v32i64.v32f64(<32 x double> %va, <32 x i1> shufflevector (<32 x i1> insertelement (<32 x i1> undef, i1 true, i32 0), <32 x i1> undef, <32 x i32> zeroinitializer), i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp-mask.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp-mask.ll @@ -21,7 +21,7 @@ define <4 x i1> @vfptoui_v4i1_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i1_v4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -46,7 +46,7 @@ define <4 x i1> @vfptoui_v4i1_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i1_v4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -72,7 +72,7 @@ define <4 x i1> @vfptoui_v4i1_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i1_v4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp.ll @@ -33,7 +33,7 @@ define <4 x i8> @vfptoui_v4i8_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i8_v4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -56,7 +56,7 @@ define <4 x i16> @vfptoui_v4i16_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i16_v4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.vp.fptoui.v4i16.v4f16(<4 x half> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) @@ -79,7 +79,7 @@ define <4 x i32> @vfptoui_v4i32_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i32_v4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -104,9 +104,9 @@ define <4 x i64> @vfptoui_v4i64_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i64_v4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v10 ; CHECK-NEXT: ret %v = call <4 x i64> @llvm.vp.fptoui.v4i64.v4f16(<4 x half> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) @@ -130,9 +130,9 @@ define <4 x i8> @vfptoui_v4i8_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i8_v4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v9, 0 ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.vp.fptoui.v4i8.v4f32(<4 x float> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) @@ -155,7 +155,7 @@ define <4 x i16> @vfptoui_v4i16_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i16_v4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -178,7 +178,7 @@ define <4 x i32> @vfptoui_v4i32_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i32_v4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: ret %v = call <4 x i32> @llvm.vp.fptoui.v4i32.v4f32(<4 x float> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) @@ -201,7 +201,7 @@ define <4 x i64> @vfptoui_v4i64_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i64_v4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -228,11 +228,11 @@ define <4 x i8> @vfptoui_v4i8_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i8_v4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.vp.fptoui.v4i8.v4f64(<4 x double> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) @@ -256,9 +256,9 @@ define <4 x i16> @vfptoui_v4i16_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i16_v4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.vp.fptoui.v4i16.v4f64(<4 x double> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) @@ -281,7 +281,7 @@ define <4 x i32> @vfptoui_v4i32_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i32_v4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -304,7 +304,7 @@ define <4 x i64> @vfptoui_v4i64_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_v4i64_v4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: ret %v = call <4 x i64> @llvm.vp.fptoui.v4i64.v4f64(<4 x double> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) @@ -318,7 +318,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: addi a2, a0, -16 ; CHECK-NEXT: vslidedown.vi v0, v0, 2 ; CHECK-NEXT: bltu a0, a2, .LBB25_2 @@ -349,14 +349,14 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a2, a1 ; CHECK-NEXT: .LBB26_2: -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: li a1, 16 ; CHECK-NEXT: vfcvt.rtz.xu.f.v v16, v16 ; CHECK-NEXT: bltu a0, a1, .LBB26_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: .LBB26_4: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: ret %v = call <32 x i64> @llvm.vp.fptoui.v32i64.v32f64(<32 x double> %va, <32 x i1> shufflevector (<32 x i1> insertelement (<32 x i1> undef, i1 true, i32 0), <32 x i1> undef, <32 x i32> zeroinitializer), i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll @@ -7,7 +7,7 @@ define void @si2fp_v2i32_v2f32(<2 x i32>* %x, <2 x float>* %y) { ; CHECK-LABEL: si2fp_v2i32_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfcvt.f.x.v v8, v8 ; CHECK-NEXT: vse32.v v8, (a1) @@ -21,7 +21,7 @@ define void @ui2fp_v2i32_v2f32(<2 x i32>* %x, <2 x float>* %y) { ; CHECK-LABEL: ui2fp_v2i32_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 ; CHECK-NEXT: vse32.v v8, (a1) @@ -35,7 +35,7 @@ define <2 x float> @si2fp_v2i1_v2f32(<2 x i1> %x) { ; CHECK-LABEL: si2fp_v2i1_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: vfcvt.f.x.v v8, v8 @@ -47,10 +47,10 @@ define <2 x float> @si2fp_v2i7_v2f32(<2 x i7> %x) { ; CHECK-LABEL: si2fp_v2i7_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vsra.vi v8, v8, 1 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vsext.vf4 v9, v8 ; CHECK-NEXT: vfcvt.f.x.v v8, v9 ; CHECK-NEXT: ret @@ -62,9 +62,9 @@ ; CHECK-LABEL: ui2fp_v2i7_v2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 127 -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vzext.vf4 v9, v8 ; CHECK-NEXT: vfcvt.f.xu.v v8, v9 ; CHECK-NEXT: ret @@ -75,7 +75,7 @@ define <2 x float> @ui2fp_v2i1_v2f32(<2 x i1> %x) { ; CHECK-LABEL: ui2fp_v2i1_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 @@ -87,7 +87,7 @@ define void @si2fp_v8i32_v8f32(<8 x i32>* %x, <8 x float>* %y) { ; LMULMAX8-LABEL: si2fp_v8i32_v8f32: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX8-NEXT: vle32.v v8, (a0) ; LMULMAX8-NEXT: vfcvt.f.x.v v8, v8 ; LMULMAX8-NEXT: vse32.v v8, (a1) @@ -95,7 +95,7 @@ ; ; LMULMAX1-LABEL: si2fp_v8i32_v8f32: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: addi a2, a0, 16 ; LMULMAX1-NEXT: vle32.v v8, (a2) ; LMULMAX1-NEXT: vle32.v v9, (a0) @@ -114,7 +114,7 @@ define void @ui2fp_v8i32_v8f32(<8 x i32>* %x, <8 x float>* %y) { ; LMULMAX8-LABEL: ui2fp_v8i32_v8f32: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX8-NEXT: vle32.v v8, (a0) ; LMULMAX8-NEXT: vfcvt.f.xu.v v8, v8 ; LMULMAX8-NEXT: vse32.v v8, (a1) @@ -122,7 +122,7 @@ ; ; LMULMAX1-LABEL: ui2fp_v8i32_v8f32: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: addi a2, a0, 16 ; LMULMAX1-NEXT: vle32.v v8, (a2) ; LMULMAX1-NEXT: vle32.v v9, (a0) @@ -141,7 +141,7 @@ define <8 x float> @si2fp_v8i1_v8f32(<8 x i1> %x) { ; LMULMAX8-LABEL: si2fp_v8i1_v8f32: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX8-NEXT: vmv.v.i v8, 0 ; LMULMAX8-NEXT: vmerge.vim v8, v8, -1, v0 ; LMULMAX8-NEXT: vfcvt.f.x.v v8, v8 @@ -149,18 +149,18 @@ ; ; LMULMAX1-LABEL: si2fp_v8i1_v8f32: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vmv.v.i v9, 0 ; LMULMAX1-NEXT: vmerge.vim v8, v9, -1, v0 ; LMULMAX1-NEXT: vfcvt.f.x.v v8, v8 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; LMULMAX1-NEXT: vmv.v.i v10, 0 ; LMULMAX1-NEXT: vmerge.vim v10, v10, 1, v0 -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v10, v10, 4 -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; LMULMAX1-NEXT: vmsne.vi v0, v10, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; LMULMAX1-NEXT: vmerge.vim v9, v9, -1, v0 ; LMULMAX1-NEXT: vfcvt.f.x.v v9, v9 ; LMULMAX1-NEXT: ret @@ -171,7 +171,7 @@ define <8 x float> @ui2fp_v8i1_v8f32(<8 x i1> %x) { ; LMULMAX8-LABEL: ui2fp_v8i1_v8f32: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX8-NEXT: vmv.v.i v8, 0 ; LMULMAX8-NEXT: vmerge.vim v8, v8, 1, v0 ; LMULMAX8-NEXT: vfcvt.f.xu.v v8, v8 @@ -179,18 +179,18 @@ ; ; LMULMAX1-LABEL: ui2fp_v8i1_v8f32: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vmv.v.i v9, 0 ; LMULMAX1-NEXT: vmerge.vim v8, v9, 1, v0 ; LMULMAX1-NEXT: vfcvt.f.xu.v v8, v8 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; LMULMAX1-NEXT: vmv.v.i v10, 0 ; LMULMAX1-NEXT: vmerge.vim v10, v10, 1, v0 -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v10, v10, 4 -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; LMULMAX1-NEXT: vmsne.vi v0, v10, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; LMULMAX1-NEXT: vmerge.vim v9, v9, 1, v0 ; LMULMAX1-NEXT: vfcvt.f.xu.v v9, v9 ; LMULMAX1-NEXT: ret @@ -201,7 +201,7 @@ define void @si2fp_v2i16_v2f64(<2 x i16>* %x, <2 x double>* %y) { ; CHECK-LABEL: si2fp_v2i16_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsext.vf4 v9, v8 ; CHECK-NEXT: vfcvt.f.x.v v8, v9 @@ -216,7 +216,7 @@ define void @ui2fp_v2i16_v2f64(<2 x i16>* %x, <2 x double>* %y) { ; CHECK-LABEL: ui2fp_v2i16_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vzext.vf4 v9, v8 ; CHECK-NEXT: vfcvt.f.xu.v v8, v9 @@ -231,7 +231,7 @@ define void @si2fp_v8i16_v8f64(<8 x i16>* %x, <8 x double>* %y) { ; LMULMAX8-LABEL: si2fp_v8i16_v8f64: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; LMULMAX8-NEXT: vle16.v v8, (a0) ; LMULMAX8-NEXT: vsext.vf4 v12, v8 ; LMULMAX8-NEXT: vfcvt.f.x.v v8, v12 @@ -240,18 +240,18 @@ ; ; LMULMAX1-LABEL: si2fp_v8i16_v8f64: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-NEXT: vle16.v v8, (a0) -; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v9, v8, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vsext.vf4 v10, v9 ; LMULMAX1-NEXT: vfcvt.f.x.v v9, v10 -; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v10, v8, 4 -; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v11, v10, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vsext.vf4 v12, v11 ; LMULMAX1-NEXT: vfcvt.f.x.v v11, v12 ; LMULMAX1-NEXT: vsext.vf4 v12, v10 @@ -275,7 +275,7 @@ define void @ui2fp_v8i16_v8f64(<8 x i16>* %x, <8 x double>* %y) { ; LMULMAX8-LABEL: ui2fp_v8i16_v8f64: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; LMULMAX8-NEXT: vle16.v v8, (a0) ; LMULMAX8-NEXT: vzext.vf4 v12, v8 ; LMULMAX8-NEXT: vfcvt.f.xu.v v8, v12 @@ -284,18 +284,18 @@ ; ; LMULMAX1-LABEL: ui2fp_v8i16_v8f64: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-NEXT: vle16.v v8, (a0) -; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v9, v8, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vzext.vf4 v10, v9 ; LMULMAX1-NEXT: vfcvt.f.xu.v v9, v10 -; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v10, v8, 4 -; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v11, v10, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vzext.vf4 v12, v11 ; LMULMAX1-NEXT: vfcvt.f.xu.v v11, v12 ; LMULMAX1-NEXT: vzext.vf4 v12, v10 @@ -319,7 +319,7 @@ define <8 x double> @si2fp_v8i1_v8f64(<8 x i1> %x) { ; LMULMAX8-LABEL: si2fp_v8i1_v8f64: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; LMULMAX8-NEXT: vmv.v.i v8, 0 ; LMULMAX8-NEXT: vmerge.vim v8, v8, -1, v0 ; LMULMAX8-NEXT: vfcvt.f.x.v v8, v8 @@ -328,38 +328,38 @@ ; LMULMAX1-LABEL: si2fp_v8i1_v8f64: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vmv1r.v v10, v0 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vmv.v.i v11, 0 ; LMULMAX1-NEXT: vmerge.vim v8, v11, -1, v0 ; LMULMAX1-NEXT: vfcvt.f.x.v v8, v8 -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; LMULMAX1-NEXT: vmv.v.i v12, 0 ; LMULMAX1-NEXT: vmerge.vim v9, v12, 1, v0 -; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v9, v9, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; LMULMAX1-NEXT: vmsne.vi v0, v9, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; LMULMAX1-NEXT: vmerge.vim v9, v11, -1, v0 ; LMULMAX1-NEXT: vfcvt.f.x.v v9, v9 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; LMULMAX1-NEXT: vmv.v.i v13, 0 ; LMULMAX1-NEXT: vmv1r.v v0, v10 ; LMULMAX1-NEXT: vmerge.vim v10, v13, 1, v0 -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v10, v10, 4 -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; LMULMAX1-NEXT: vmsne.vi v0, v10, 0 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vmerge.vim v10, v11, -1, v0 ; LMULMAX1-NEXT: vfcvt.f.x.v v10, v10 -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; LMULMAX1-NEXT: vmerge.vim v12, v12, 1, v0 -; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v12, v12, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; LMULMAX1-NEXT: vmsne.vi v0, v12, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; LMULMAX1-NEXT: vmerge.vim v11, v11, -1, v0 ; LMULMAX1-NEXT: vfcvt.f.x.v v11, v11 ; LMULMAX1-NEXT: ret @@ -370,7 +370,7 @@ define <8 x double> @ui2fp_v8i1_v8f64(<8 x i1> %x) { ; LMULMAX8-LABEL: ui2fp_v8i1_v8f64: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; LMULMAX8-NEXT: vmv.v.i v8, 0 ; LMULMAX8-NEXT: vmerge.vim v8, v8, 1, v0 ; LMULMAX8-NEXT: vfcvt.f.xu.v v8, v8 @@ -379,38 +379,38 @@ ; LMULMAX1-LABEL: ui2fp_v8i1_v8f64: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: vmv1r.v v10, v0 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vmv.v.i v11, 0 ; LMULMAX1-NEXT: vmerge.vim v8, v11, 1, v0 ; LMULMAX1-NEXT: vfcvt.f.xu.v v8, v8 -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; LMULMAX1-NEXT: vmv.v.i v12, 0 ; LMULMAX1-NEXT: vmerge.vim v9, v12, 1, v0 -; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v9, v9, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; LMULMAX1-NEXT: vmsne.vi v0, v9, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; LMULMAX1-NEXT: vmerge.vim v9, v11, 1, v0 ; LMULMAX1-NEXT: vfcvt.f.xu.v v9, v9 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; LMULMAX1-NEXT: vmv.v.i v13, 0 ; LMULMAX1-NEXT: vmv1r.v v0, v10 ; LMULMAX1-NEXT: vmerge.vim v10, v13, 1, v0 -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v10, v10, 4 -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; LMULMAX1-NEXT: vmsne.vi v0, v10, 0 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vmerge.vim v10, v11, 1, v0 ; LMULMAX1-NEXT: vfcvt.f.xu.v v10, v10 -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; LMULMAX1-NEXT: vmerge.vim v12, v12, 1, v0 -; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v12, v12, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; LMULMAX1-NEXT: vmsne.vi v0, v12, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; LMULMAX1-NEXT: vmerge.vim v11, v11, 1, v0 ; LMULMAX1-NEXT: vfcvt.f.xu.v v11, v11 ; LMULMAX1-NEXT: ret @@ -421,10 +421,10 @@ define void @si2fp_v2i64_v2f16(<2 x i64>* %x, <2 x half>* %y) { ; CHECK-LABEL: si2fp_v2i64_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfncvt.f.x.w v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v8, v9 ; CHECK-NEXT: vse16.v v8, (a1) ; CHECK-NEXT: ret @@ -437,10 +437,10 @@ define void @ui2fp_v2i64_v2f16(<2 x i64>* %x, <2 x half>* %y) { ; CHECK-LABEL: ui2fp_v2i64_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfncvt.f.xu.w v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v8, v9 ; CHECK-NEXT: vse16.v v8, (a1) ; CHECK-NEXT: ret @@ -453,7 +453,7 @@ define <2 x half> @si2fp_v2i1_v2f16(<2 x i1> %x) { ; CHECK-LABEL: si2fp_v2i1_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: vfcvt.f.x.v v8, v8 @@ -465,7 +465,7 @@ define <2 x half> @ui2fp_v2i1_v2f16(<2 x i1> %x) { ; CHECK-LABEL: ui2fp_v2i1_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 @@ -477,10 +477,10 @@ define void @si2fp_v8i64_v8f16(<8 x i64>* %x, <8 x half>* %y) { ; LMULMAX8-LABEL: si2fp_v8i64_v8f16: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX8-NEXT: vle64.v v8, (a0) ; LMULMAX8-NEXT: vfncvt.f.x.w v12, v8 -; LMULMAX8-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; LMULMAX8-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; LMULMAX8-NEXT: vfncvt.f.f.w v8, v12 ; LMULMAX8-NEXT: vse16.v v8, (a1) ; LMULMAX8-NEXT: ret @@ -488,7 +488,7 @@ ; LMULMAX1-LABEL: si2fp_v8i64_v8f16: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: addi a2, a0, 48 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vle64.v v8, (a2) ; LMULMAX1-NEXT: addi a2, a0, 32 ; LMULMAX1-NEXT: vle64.v v9, (a0) @@ -496,25 +496,25 @@ ; LMULMAX1-NEXT: addi a0, a0, 16 ; LMULMAX1-NEXT: vle64.v v11, (a0) ; LMULMAX1-NEXT: vfncvt.f.x.w v12, v9 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; LMULMAX1-NEXT: vfncvt.f.f.w v9, v12 -; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; LMULMAX1-NEXT: vfncvt.f.x.w v12, v11 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; LMULMAX1-NEXT: vfncvt.f.f.w v11, v12 -; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, ma ; LMULMAX1-NEXT: vslideup.vi v9, v11, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vfncvt.f.x.w v11, v10 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; LMULMAX1-NEXT: vfncvt.f.f.w v10, v11 -; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, ma ; LMULMAX1-NEXT: vslideup.vi v9, v10, 4 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vfncvt.f.x.w v10, v8 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; LMULMAX1-NEXT: vfncvt.f.f.w v8, v10 -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, ma ; LMULMAX1-NEXT: vslideup.vi v9, v8, 6 ; LMULMAX1-NEXT: vse16.v v9, (a1) ; LMULMAX1-NEXT: ret @@ -527,10 +527,10 @@ define void @ui2fp_v8i64_v8f16(<8 x i64>* %x, <8 x half>* %y) { ; LMULMAX8-LABEL: ui2fp_v8i64_v8f16: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX8-NEXT: vle64.v v8, (a0) ; LMULMAX8-NEXT: vfncvt.f.xu.w v12, v8 -; LMULMAX8-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; LMULMAX8-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; LMULMAX8-NEXT: vfncvt.f.f.w v8, v12 ; LMULMAX8-NEXT: vse16.v v8, (a1) ; LMULMAX8-NEXT: ret @@ -538,7 +538,7 @@ ; LMULMAX1-LABEL: ui2fp_v8i64_v8f16: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: addi a2, a0, 48 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vle64.v v8, (a2) ; LMULMAX1-NEXT: addi a2, a0, 32 ; LMULMAX1-NEXT: vle64.v v9, (a0) @@ -546,25 +546,25 @@ ; LMULMAX1-NEXT: addi a0, a0, 16 ; LMULMAX1-NEXT: vle64.v v11, (a0) ; LMULMAX1-NEXT: vfncvt.f.xu.w v12, v9 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; LMULMAX1-NEXT: vfncvt.f.f.w v9, v12 -; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; LMULMAX1-NEXT: vfncvt.f.xu.w v12, v11 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; LMULMAX1-NEXT: vfncvt.f.f.w v11, v12 -; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, ma ; LMULMAX1-NEXT: vslideup.vi v9, v11, 2 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vfncvt.f.xu.w v11, v10 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; LMULMAX1-NEXT: vfncvt.f.f.w v10, v11 -; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, ma ; LMULMAX1-NEXT: vslideup.vi v9, v10, 4 -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vfncvt.f.xu.w v10, v8 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; LMULMAX1-NEXT: vfncvt.f.f.w v8, v10 -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, ma ; LMULMAX1-NEXT: vslideup.vi v9, v8, 6 ; LMULMAX1-NEXT: vse16.v v9, (a1) ; LMULMAX1-NEXT: ret @@ -577,7 +577,7 @@ define <8 x half> @si2fp_v8i1_v8f16(<8 x i1> %x) { ; CHECK-LABEL: si2fp_v8i1_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: vfcvt.f.x.v v8, v8 @@ -589,7 +589,7 @@ define <8 x half> @ui2fp_v8i1_v8f16(<8 x i1> %x) { ; CHECK-LABEL: ui2fp_v8i1_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-i1.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-i1.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-i1.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-i1.ll @@ -8,7 +8,7 @@ ; CHECK-LABEL: insertelt_v1i1: ; CHECK: # %bb.0: ; CHECK-NEXT: andi a0, a0, 1 -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -19,29 +19,29 @@ define <1 x i1> @insertelt_idx_v1i1(<1 x i1> %x, i1 %elt, i32 zeroext %idx) nounwind { ; RV32-LABEL: insertelt_idx_v1i1: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; RV32-NEXT: vmv.s.x v8, a0 ; RV32-NEXT: vmv.v.i v9, 0 ; RV32-NEXT: vmerge.vim v9, v9, 1, v0 ; RV32-NEXT: addi a0, a1, 1 -; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; RV32-NEXT: vslideup.vx v9, v8, a1 -; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; RV32-NEXT: vand.vi v8, v9, 1 ; RV32-NEXT: vmsne.vi v0, v8, 0 ; RV32-NEXT: ret ; ; RV64-LABEL: insertelt_idx_v1i1: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; RV64-NEXT: vmv.s.x v8, a0 ; RV64-NEXT: vmv.v.i v9, 0 ; RV64-NEXT: vmerge.vim v9, v9, 1, v0 ; RV64-NEXT: sext.w a0, a1 ; RV64-NEXT: addi a1, a0, 1 -; RV64-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; RV64-NEXT: vsetvli zero, a1, e8, mf8, tu, ma ; RV64-NEXT: vslideup.vx v9, v8, a0 -; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; RV64-NEXT: vand.vi v8, v9, 1 ; RV64-NEXT: vmsne.vi v0, v8, 0 ; RV64-NEXT: ret @@ -52,13 +52,13 @@ define <2 x i1> @insertelt_v2i1(<2 x i1> %x, i1 %elt) nounwind { ; CHECK-LABEL: insertelt_v2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 1 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -69,29 +69,29 @@ define <2 x i1> @insertelt_idx_v2i1(<2 x i1> %x, i1 %elt, i32 zeroext %idx) nounwind { ; RV32-LABEL: insertelt_idx_v2i1: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; RV32-NEXT: vmv.s.x v8, a0 ; RV32-NEXT: vmv.v.i v9, 0 ; RV32-NEXT: vmerge.vim v9, v9, 1, v0 ; RV32-NEXT: addi a0, a1, 1 -; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; RV32-NEXT: vslideup.vx v9, v8, a1 -; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; RV32-NEXT: vand.vi v8, v9, 1 ; RV32-NEXT: vmsne.vi v0, v8, 0 ; RV32-NEXT: ret ; ; RV64-LABEL: insertelt_idx_v2i1: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; RV64-NEXT: vmv.s.x v8, a0 ; RV64-NEXT: vmv.v.i v9, 0 ; RV64-NEXT: vmerge.vim v9, v9, 1, v0 ; RV64-NEXT: sext.w a0, a1 ; RV64-NEXT: addi a1, a0, 1 -; RV64-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; RV64-NEXT: vsetvli zero, a1, e8, mf8, tu, ma ; RV64-NEXT: vslideup.vx v9, v8, a0 -; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; RV64-NEXT: vand.vi v8, v9, 1 ; RV64-NEXT: vmsne.vi v0, v8, 0 ; RV64-NEXT: ret @@ -102,13 +102,13 @@ define <8 x i1> @insertelt_v8i1(<8 x i1> %x, i1 %elt) nounwind { ; CHECK-LABEL: insertelt_v8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 1 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -119,29 +119,29 @@ define <8 x i1> @insertelt_idx_v8i1(<8 x i1> %x, i1 %elt, i32 zeroext %idx) nounwind { ; RV32-LABEL: insertelt_idx_v8i1: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV32-NEXT: vmv.s.x v8, a0 ; RV32-NEXT: vmv.v.i v9, 0 ; RV32-NEXT: vmerge.vim v9, v9, 1, v0 ; RV32-NEXT: addi a0, a1, 1 -; RV32-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; RV32-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; RV32-NEXT: vslideup.vx v9, v8, a1 -; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV32-NEXT: vand.vi v8, v9, 1 ; RV32-NEXT: vmsne.vi v0, v8, 0 ; RV32-NEXT: ret ; ; RV64-LABEL: insertelt_idx_v8i1: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV64-NEXT: vmv.s.x v8, a0 ; RV64-NEXT: vmv.v.i v9, 0 ; RV64-NEXT: vmerge.vim v9, v9, 1, v0 ; RV64-NEXT: sext.w a0, a1 ; RV64-NEXT: addi a1, a0, 1 -; RV64-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; RV64-NEXT: vsetvli zero, a1, e8, mf2, tu, ma ; RV64-NEXT: vslideup.vx v9, v8, a0 -; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV64-NEXT: vand.vi v8, v9, 1 ; RV64-NEXT: vmsne.vi v0, v8, 0 ; RV64-NEXT: ret @@ -153,13 +153,13 @@ ; CHECK-LABEL: insertelt_v64i1: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vmerge.vim v12, v12, 1, v0 -; CHECK-NEXT: vsetivli zero, 2, e8, m4, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e8, m4, tu, ma ; CHECK-NEXT: vslideup.vi v12, v8, 1 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vand.vi v8, v12, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -171,14 +171,14 @@ ; RV32-LABEL: insertelt_idx_v64i1: ; RV32: # %bb.0: ; RV32-NEXT: li a2, 64 -; RV32-NEXT: vsetvli zero, a2, e8, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e8, m4, ta, ma ; RV32-NEXT: vmv.s.x v8, a0 ; RV32-NEXT: vmv.v.i v12, 0 ; RV32-NEXT: vmerge.vim v12, v12, 1, v0 ; RV32-NEXT: addi a0, a1, 1 -; RV32-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; RV32-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; RV32-NEXT: vslideup.vx v12, v8, a1 -; RV32-NEXT: vsetvli zero, a2, e8, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e8, m4, ta, ma ; RV32-NEXT: vand.vi v8, v12, 1 ; RV32-NEXT: vmsne.vi v0, v8, 0 ; RV32-NEXT: ret @@ -186,15 +186,15 @@ ; RV64-LABEL: insertelt_idx_v64i1: ; RV64: # %bb.0: ; RV64-NEXT: li a2, 64 -; RV64-NEXT: vsetvli zero, a2, e8, m4, ta, mu +; RV64-NEXT: vsetvli zero, a2, e8, m4, ta, ma ; RV64-NEXT: vmv.s.x v8, a0 ; RV64-NEXT: vmv.v.i v12, 0 ; RV64-NEXT: vmerge.vim v12, v12, 1, v0 ; RV64-NEXT: sext.w a0, a1 ; RV64-NEXT: addi a1, a0, 1 -; RV64-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; RV64-NEXT: vsetvli zero, a1, e8, m4, tu, ma ; RV64-NEXT: vslideup.vx v12, v8, a0 -; RV64-NEXT: vsetvli zero, a2, e8, m4, ta, mu +; RV64-NEXT: vsetvli zero, a2, e8, m4, ta, ma ; RV64-NEXT: vand.vi v8, v12, 1 ; RV64-NEXT: vmsne.vi v0, v8, 0 ; RV64-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll @@ -12,9 +12,9 @@ define @insert_nxv8i32_v2i32_0( %vec, <2 x i32>* %svp) { ; CHECK-LABEL: insert_nxv8i32_v2i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v12, (a0) -; CHECK-NEXT: vsetivli zero, 2, e32, m4, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e32, m4, tu, ma ; CHECK-NEXT: vslideup.vi v8, v12, 0 ; CHECK-NEXT: ret %sv = load <2 x i32>, <2 x i32>* %svp @@ -25,9 +25,9 @@ define @insert_nxv8i32_v2i32_2( %vec, <2 x i32>* %svp) { ; CHECK-LABEL: insert_nxv8i32_v2i32_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v12, (a0) -; CHECK-NEXT: vsetivli zero, 4, e32, m4, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m4, tu, ma ; CHECK-NEXT: vslideup.vi v8, v12, 2 ; CHECK-NEXT: ret %sv = load <2 x i32>, <2 x i32>* %svp @@ -38,9 +38,9 @@ define @insert_nxv8i32_v2i32_6( %vec, <2 x i32>* %svp) { ; CHECK-LABEL: insert_nxv8i32_v2i32_6: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v12, (a0) -; CHECK-NEXT: vsetivli zero, 8, e32, m4, tu, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m4, tu, ma ; CHECK-NEXT: vslideup.vi v8, v12, 6 ; CHECK-NEXT: ret %sv = load <2 x i32>, <2 x i32>* %svp @@ -51,21 +51,21 @@ define @insert_nxv8i32_v8i32_0( %vec, <8 x i32>* %svp) { ; LMULMAX2-LABEL: insert_nxv8i32_v8i32_0: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vle32.v v12, (a0) -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m4, tu, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m4, tu, ma ; LMULMAX2-NEXT: vslideup.vi v8, v12, 0 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: insert_nxv8i32_v8i32_0: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vle32.v v12, (a0) ; LMULMAX1-NEXT: addi a0, a0, 16 ; LMULMAX1-NEXT: vle32.v v16, (a0) -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m4, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m4, tu, ma ; LMULMAX1-NEXT: vslideup.vi v8, v12, 0 -; LMULMAX1-NEXT: vsetivli zero, 8, e32, m4, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e32, m4, tu, ma ; LMULMAX1-NEXT: vslideup.vi v8, v16, 4 ; LMULMAX1-NEXT: ret %sv = load <8 x i32>, <8 x i32>* %svp @@ -76,21 +76,21 @@ define @insert_nxv8i32_v8i32_8( %vec, <8 x i32>* %svp) { ; LMULMAX2-LABEL: insert_nxv8i32_v8i32_8: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vle32.v v12, (a0) -; LMULMAX2-NEXT: vsetivli zero, 16, e32, m4, tu, mu +; LMULMAX2-NEXT: vsetivli zero, 16, e32, m4, tu, ma ; LMULMAX2-NEXT: vslideup.vi v8, v12, 8 ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: insert_nxv8i32_v8i32_8: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vle32.v v12, (a0) ; LMULMAX1-NEXT: addi a0, a0, 16 ; LMULMAX1-NEXT: vle32.v v16, (a0) -; LMULMAX1-NEXT: vsetivli zero, 12, e32, m4, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 12, e32, m4, tu, ma ; LMULMAX1-NEXT: vslideup.vi v8, v12, 8 -; LMULMAX1-NEXT: vsetivli zero, 16, e32, m4, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e32, m4, tu, ma ; LMULMAX1-NEXT: vslideup.vi v8, v16, 12 ; LMULMAX1-NEXT: ret %sv = load <8 x i32>, <8 x i32>* %svp @@ -101,7 +101,7 @@ define @insert_nxv8i32_undef_v2i32_0(<2 x i32>* %svp) { ; CHECK-LABEL: insert_nxv8i32_undef_v2i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: ret %sv = load <2 x i32>, <2 x i32>* %svp @@ -112,13 +112,13 @@ define void @insert_v4i32_v2i32_0(<4 x i32>* %vp, <2 x i32>* %svp) { ; CHECK-LABEL: insert_v4i32_v2i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a1) -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v9, (a0) -; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 0 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v9, (a0) ; CHECK-NEXT: ret %sv = load <2 x i32>, <2 x i32>* %svp @@ -131,11 +131,11 @@ define void @insert_v4i32_v2i32_2(<4 x i32>* %vp, <2 x i32>* %svp) { ; CHECK-LABEL: insert_v4i32_v2i32_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a1) -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v9, (a0) -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 2 ; CHECK-NEXT: vse32.v v9, (a0) ; CHECK-NEXT: ret @@ -149,9 +149,9 @@ define void @insert_v4i32_undef_v2i32_0(<4 x i32>* %vp, <2 x i32>* %svp) { ; CHECK-LABEL: insert_v4i32_undef_v2i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a1) -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret %sv = load <2 x i32>, <2 x i32>* %svp @@ -163,25 +163,25 @@ define void @insert_v8i32_v2i32_0(<8 x i32>* %vp, <2 x i32>* %svp) { ; LMULMAX2-LABEL: insert_v8i32_v2i32_0: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX2-NEXT: vle32.v v8, (a1) -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vle32.v v10, (a0) -; LMULMAX2-NEXT: vsetivli zero, 2, e32, m2, tu, mu +; LMULMAX2-NEXT: vsetivli zero, 2, e32, m2, tu, ma ; LMULMAX2-NEXT: vslideup.vi v10, v8, 0 -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vse32.v v10, (a0) ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: insert_v8i32_v2i32_0: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vle32.v v8, (a1) -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vle32.v v9, (a0) -; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, tu, ma ; LMULMAX1-NEXT: vslideup.vi v9, v8, 0 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vse32.v v9, (a0) ; LMULMAX1-NEXT: ret %sv = load <2 x i32>, <2 x i32>* %svp @@ -194,23 +194,23 @@ define void @insert_v8i32_v2i32_2(<8 x i32>* %vp, <2 x i32>* %svp) { ; LMULMAX2-LABEL: insert_v8i32_v2i32_2: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX2-NEXT: vle32.v v8, (a1) -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vle32.v v10, (a0) -; LMULMAX2-NEXT: vsetivli zero, 4, e32, m2, tu, mu +; LMULMAX2-NEXT: vsetivli zero, 4, e32, m2, tu, ma ; LMULMAX2-NEXT: vslideup.vi v10, v8, 2 -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vse32.v v10, (a0) ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: insert_v8i32_v2i32_2: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vle32.v v8, (a1) -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vle32.v v9, (a0) -; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, tu, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, tu, ma ; LMULMAX1-NEXT: vslideup.vi v9, v8, 2 ; LMULMAX1-NEXT: vse32.v v9, (a0) ; LMULMAX1-NEXT: ret @@ -224,23 +224,23 @@ define void @insert_v8i32_v2i32_6(<8 x i32>* %vp, <2 x i32>* %svp) { ; LMULMAX2-LABEL: insert_v8i32_v2i32_6: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX2-NEXT: vle32.v v8, (a1) -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vle32.v v10, (a0) -; LMULMAX2-NEXT: vsetvli zero, zero, e32, m2, tu, mu +; LMULMAX2-NEXT: vsetvli zero, zero, e32, m2, tu, ma ; LMULMAX2-NEXT: vslideup.vi v10, v8, 6 ; LMULMAX2-NEXT: vse32.v v10, (a0) ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: insert_v8i32_v2i32_6: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vle32.v v8, (a1) ; LMULMAX1-NEXT: addi a0, a0, 16 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vle32.v v9, (a0) -; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, tu, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e32, m1, tu, ma ; LMULMAX1-NEXT: vslideup.vi v9, v8, 2 ; LMULMAX1-NEXT: vse32.v v9, (a0) ; LMULMAX1-NEXT: ret @@ -254,18 +254,18 @@ define void @insert_v8i32_undef_v2i32_6(<8 x i32>* %vp, <2 x i32>* %svp) { ; LMULMAX2-LABEL: insert_v8i32_undef_v2i32_6: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX2-NEXT: vle32.v v8, (a1) -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, tu, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, tu, ma ; LMULMAX2-NEXT: vslideup.vi v10, v8, 6 ; LMULMAX2-NEXT: vse32.v v10, (a0) ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: insert_v8i32_undef_v2i32_6: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; LMULMAX1-NEXT: vle32.v v8, (a1) -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, ma ; LMULMAX1-NEXT: vslideup.vi v9, v8, 2 ; LMULMAX1-NEXT: addi a0, a0, 16 ; LMULMAX1-NEXT: vse32.v v9, (a0) @@ -279,13 +279,13 @@ define void @insert_v4i16_v2i16_0(<4 x i16>* %vp, <2 x i16>* %svp) { ; CHECK-LABEL: insert_v4i16_v2i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v9, (a1) -; CHECK-NEXT: vsetivli zero, 2, e16, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret %v = load <4 x i16>, <4 x i16>* %vp @@ -298,11 +298,11 @@ define void @insert_v4i16_v2i16_2(<4 x i16>* %vp, <2 x i16>* %svp) { ; CHECK-LABEL: insert_v4i16_v2i16_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v9, (a1) -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 2 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret @@ -317,25 +317,25 @@ ; LMULMAX2-LABEL: insert_v32i1_v8i1_0: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: li a2, 32 -; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; LMULMAX2-NEXT: vlm.v v8, (a0) -; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; LMULMAX2-NEXT: vlm.v v9, (a1) -; LMULMAX2-NEXT: vsetivli zero, 1, e8, mf4, tu, mu +; LMULMAX2-NEXT: vsetivli zero, 1, e8, mf4, tu, ma ; LMULMAX2-NEXT: vslideup.vi v8, v9, 0 -; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; LMULMAX2-NEXT: vsm.v v8, (a0) ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: insert_v32i1_v8i1_0: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-NEXT: vlm.v v8, (a0) -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; LMULMAX1-NEXT: vlm.v v9, (a1) -; LMULMAX1-NEXT: vsetivli zero, 1, e8, mf8, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 1, e8, mf8, tu, ma ; LMULMAX1-NEXT: vslideup.vi v8, v9, 0 -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-NEXT: vsm.v v8, (a0) ; LMULMAX1-NEXT: ret %v = load <32 x i1>, <32 x i1>* %vp @@ -349,26 +349,26 @@ ; LMULMAX2-LABEL: insert_v32i1_v8i1_16: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: li a2, 32 -; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; LMULMAX2-NEXT: vlm.v v8, (a0) -; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; LMULMAX2-NEXT: vlm.v v9, (a1) -; LMULMAX2-NEXT: vsetivli zero, 3, e8, mf4, tu, mu +; LMULMAX2-NEXT: vsetivli zero, 3, e8, mf4, tu, ma ; LMULMAX2-NEXT: vslideup.vi v8, v9, 2 -; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; LMULMAX2-NEXT: vsm.v v8, (a0) ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: insert_v32i1_v8i1_16: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: addi a0, a0, 2 -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-NEXT: vlm.v v8, (a0) -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; LMULMAX1-NEXT: vlm.v v9, (a1) -; LMULMAX1-NEXT: vsetivli zero, 1, e8, mf8, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 1, e8, mf8, tu, ma ; LMULMAX1-NEXT: vslideup.vi v8, v9, 0 -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-NEXT: vsm.v v8, (a0) ; LMULMAX1-NEXT: ret %v = load <32 x i1>, <32 x i1>* %vp @@ -381,20 +381,20 @@ define void @insert_v8i1_v4i1_0(<8 x i1>* %vp, <4 x i1>* %svp) { ; CHECK-LABEL: insert_v8i1_v4i1_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vlm.v v0, (a0) -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vlm.v v8, (a1) -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v8, v10, 1, v0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a0) ; CHECK-NEXT: ret @@ -408,20 +408,20 @@ define void @insert_v8i1_v4i1_4(<8 x i1>* %vp, <4 x i1>* %svp) { ; CHECK-LABEL: insert_v8i1_v4i1_4: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vlm.v v0, (a0) -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vlm.v v8, (a1) -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v8, v10, 1, v0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 4 -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a0) ; CHECK-NEXT: ret @@ -435,9 +435,9 @@ define @insert_nxv2i16_v2i16_0( %v, <2 x i16>* %svp) { ; CHECK-LABEL: insert_nxv2i16_v2i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) -; CHECK-NEXT: vsetivli zero, 2, e16, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 0 ; CHECK-NEXT: ret %sv = load <2 x i16>, <2 x i16>* %svp @@ -448,9 +448,9 @@ define @insert_nxv2i16_v2i16_2( %v, <2 x i16>* %svp) { ; CHECK-LABEL: insert_nxv2i16_v2i16_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) -; CHECK-NEXT: vsetivli zero, 6, e16, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 6, e16, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 4 ; CHECK-NEXT: ret %sv = load <2 x i16>, <2 x i16>* %svp @@ -461,18 +461,18 @@ define @insert_nxv2i1_v4i1_0( %v, <4 x i1>* %svp) { ; CHECK-LABEL: insert_nxv2i1_v4i1_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vlm.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v8, v10, 1, v0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 0 -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmsne.vi v0, v9, 0 ; CHECK-NEXT: ret %sv = load <4 x i1>, <4 x i1>* %svp @@ -483,9 +483,9 @@ define @insert_nxv8i1_v4i1_0( %v, <8 x i1>* %svp) { ; CHECK-LABEL: insert_nxv8i1_v4i1_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vlm.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, tu, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, tu, ma ; CHECK-NEXT: vslideup.vi v0, v8, 0 ; CHECK-NEXT: ret %sv = load <8 x i1>, <8 x i1>* %svp @@ -496,9 +496,9 @@ define @insert_nxv8i1_v8i1_16( %v, <8 x i1>* %svp) { ; CHECK-LABEL: insert_nxv8i1_v8i1_16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vlm.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 3, e8, mf8, tu, mu +; CHECK-NEXT: vsetivli zero, 3, e8, mf8, tu, ma ; CHECK-NEXT: vslideup.vi v0, v8, 2 ; CHECK-NEXT: ret %sv = load <8 x i1>, <8 x i1>* %svp @@ -511,10 +511,10 @@ define void @insert_v2i64_nxv16i64(<2 x i64>* %psv0, <2 x i64>* %psv1, * %out) { ; CHECK-LABEL: insert_v2i64_nxv16i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v16, (a1) -; CHECK-NEXT: vsetivli zero, 6, e64, m8, tu, mu +; CHECK-NEXT: vsetivli zero, 6, e64, m8, tu, ma ; CHECK-NEXT: vslideup.vi v8, v16, 4 ; CHECK-NEXT: vs8r.v v8, (a2) ; CHECK-NEXT: ret @@ -529,7 +529,7 @@ define void @insert_v2i64_nxv16i64_lo0(<2 x i64>* %psv, * %out) { ; CHECK-LABEL: insert_v2i64_nxv16i64_lo0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vs8r.v v8, (a1) ; CHECK-NEXT: ret @@ -542,9 +542,9 @@ define void @insert_v2i64_nxv16i64_lo2(<2 x i64>* %psv, * %out) { ; CHECK-LABEL: insert_v2i64_nxv16i64_lo2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 4, e64, m8, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m8, tu, ma ; CHECK-NEXT: vslideup.vi v16, v8, 2 ; CHECK-NEXT: vs8r.v v16, (a1) ; CHECK-NEXT: ret @@ -567,7 +567,7 @@ ; CHECK-NEXT: slli a2, a2, 4 ; CHECK-NEXT: sub sp, sp, a2 ; CHECK-NEXT: andi sp, sp, -64 -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: addi a0, sp, 128 ; CHECK-NEXT: vse64.v v8, (a0) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll @@ -8,23 +8,23 @@ define void @insertelt_v4i64(<4 x i64>* %x, i64 %y) { ; RV32-LABEL: insertelt_v4i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: vsetivli zero, 2, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 2, e32, m2, ta, ma ; RV32-NEXT: vmv.v.i v10, 0 ; RV32-NEXT: vslide1up.vx v12, v10, a2 ; RV32-NEXT: vslide1up.vx v10, v12, a1 -; RV32-NEXT: vsetivli zero, 4, e64, m2, tu, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, tu, ma ; RV32-NEXT: vslideup.vi v8, v10, 3 ; RV32-NEXT: vse64.v v8, (a0) ; RV32-NEXT: ret ; ; RV64-LABEL: insertelt_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.s.x v10, a1 -; RV64-NEXT: vsetvli zero, zero, e64, m2, tu, mu +; RV64-NEXT: vsetvli zero, zero, e64, m2, tu, ma ; RV64-NEXT: vslideup.vi v8, v10, 3 ; RV64-NEXT: vse64.v v8, (a0) ; RV64-NEXT: ret @@ -41,25 +41,25 @@ define void @insertelt_v3i64(<3 x i64>* %x, i64 %y) { ; RV32-LABEL: insertelt_v3i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: lw a3, 16(a0) ; RV32-NEXT: addi a4, a0, 20 -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vlse32.v v10, (a4), zero -; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu +; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, ma ; RV32-NEXT: vmv.s.x v10, a3 -; RV32-NEXT: vsetvli zero, zero, e64, m2, tu, mu +; RV32-NEXT: vsetvli zero, zero, e64, m2, tu, ma ; RV32-NEXT: vslideup.vi v8, v10, 2 -; RV32-NEXT: vsetivli zero, 2, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 2, e32, m2, ta, ma ; RV32-NEXT: vmv.v.i v10, 0 ; RV32-NEXT: vslide1up.vx v12, v10, a2 ; RV32-NEXT: vslide1up.vx v10, v12, a1 -; RV32-NEXT: vsetivli zero, 3, e64, m2, tu, mu +; RV32-NEXT: vsetivli zero, 3, e64, m2, tu, ma ; RV32-NEXT: vslideup.vi v8, v10, 2 ; RV32-NEXT: sw a1, 16(a0) ; RV32-NEXT: sw a2, 20(a0) -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vse64.v v8, (a0) ; RV32-NEXT: ret ; @@ -76,12 +76,12 @@ define void @insertelt_v16i8(<16 x i8>* %x, i8 %y) { ; CHECK-LABEL: insertelt_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, a1 -; CHECK-NEXT: vsetivli zero, 15, e8, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 15, e8, m1, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 14 -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %a = load <16 x i8>, <16 x i8>* %x @@ -94,27 +94,27 @@ ; RV32-LABEL: insertelt_v32i16: ; RV32: # %bb.0: ; RV32-NEXT: li a3, 32 -; RV32-NEXT: vsetvli zero, a3, e16, m4, ta, mu +; RV32-NEXT: vsetvli zero, a3, e16, m4, ta, ma ; RV32-NEXT: vle16.v v8, (a0) ; RV32-NEXT: vmv.s.x v12, a1 ; RV32-NEXT: addi a1, a2, 1 -; RV32-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; RV32-NEXT: vsetvli zero, a1, e16, m4, tu, ma ; RV32-NEXT: vslideup.vx v8, v12, a2 -; RV32-NEXT: vsetvli zero, a3, e16, m4, ta, mu +; RV32-NEXT: vsetvli zero, a3, e16, m4, ta, ma ; RV32-NEXT: vse16.v v8, (a0) ; RV32-NEXT: ret ; ; RV64-LABEL: insertelt_v32i16: ; RV64: # %bb.0: ; RV64-NEXT: li a3, 32 -; RV64-NEXT: vsetvli zero, a3, e16, m4, ta, mu +; RV64-NEXT: vsetvli zero, a3, e16, m4, ta, ma ; RV64-NEXT: vle16.v v8, (a0) ; RV64-NEXT: vmv.s.x v12, a1 ; RV64-NEXT: sext.w a1, a2 ; RV64-NEXT: addi a2, a1, 1 -; RV64-NEXT: vsetvli zero, a2, e16, m4, tu, mu +; RV64-NEXT: vsetvli zero, a2, e16, m4, tu, ma ; RV64-NEXT: vslideup.vx v8, v12, a1 -; RV64-NEXT: vsetvli zero, a3, e16, m4, ta, mu +; RV64-NEXT: vsetvli zero, a3, e16, m4, ta, ma ; RV64-NEXT: vse16.v v8, (a0) ; RV64-NEXT: ret %a = load <32 x i16>, <32 x i16>* %x @@ -126,26 +126,26 @@ define void @insertelt_v8f32(<8 x float>* %x, float %y, i32 %idx) { ; RV32-LABEL: insertelt_v8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: vfmv.s.f v10, fa0 ; RV32-NEXT: addi a2, a1, 1 -; RV32-NEXT: vsetvli zero, a2, e32, m2, tu, mu +; RV32-NEXT: vsetvli zero, a2, e32, m2, tu, ma ; RV32-NEXT: vslideup.vx v8, v10, a1 -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vse32.v v8, (a0) ; RV32-NEXT: ret ; ; RV64-LABEL: insertelt_v8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: vfmv.s.f v10, fa0 ; RV64-NEXT: sext.w a1, a1 ; RV64-NEXT: addi a2, a1, 1 -; RV64-NEXT: vsetvli zero, a2, e32, m2, tu, mu +; RV64-NEXT: vsetvli zero, a2, e32, m2, tu, ma ; RV64-NEXT: vslideup.vx v8, v10, a1 -; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64-NEXT: vse32.v v8, (a0) ; RV64-NEXT: ret %a = load <8 x float>, <8 x float>* %x @@ -157,10 +157,10 @@ define void @insertelt_v8i64_0(<8 x i64>* %x) { ; CHECK-LABEL: insertelt_v8i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: li a1, -1 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, ma ; CHECK-NEXT: vmv.s.x v8, a1 ; CHECK-NEXT: vse64.v v8, (a0) ; CHECK-NEXT: ret @@ -173,28 +173,28 @@ define void @insertelt_v8i64(<8 x i64>* %x, i32 %idx) { ; RV32-LABEL: insertelt_v8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: li a2, -1 ; RV32-NEXT: vmv.s.x v12, a2 ; RV32-NEXT: addi a2, a1, 1 -; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, ma ; RV32-NEXT: vslideup.vx v8, v12, a1 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vse64.v v8, (a0) ; RV32-NEXT: ret ; ; RV64-LABEL: insertelt_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: li a2, -1 ; RV64-NEXT: vmv.s.x v12, a2 ; RV64-NEXT: sext.w a1, a1 ; RV64-NEXT: addi a2, a1, 1 -; RV64-NEXT: vsetvli zero, a2, e64, m4, tu, mu +; RV64-NEXT: vsetvli zero, a2, e64, m4, tu, ma ; RV64-NEXT: vslideup.vx v8, v12, a1 -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vse64.v v8, (a0) ; RV64-NEXT: ret %a = load <8 x i64>, <8 x i64>* %x @@ -206,10 +206,10 @@ define void @insertelt_c6_v8i64_0(<8 x i64>* %x) { ; CHECK-LABEL: insertelt_c6_v8i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: li a1, 6 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, ma ; CHECK-NEXT: vmv.s.x v8, a1 ; CHECK-NEXT: vse64.v v8, (a0) ; CHECK-NEXT: ret @@ -222,28 +222,28 @@ define void @insertelt_c6_v8i64(<8 x i64>* %x, i32 %idx) { ; RV32-LABEL: insertelt_c6_v8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: li a2, 6 ; RV32-NEXT: vmv.s.x v12, a2 ; RV32-NEXT: addi a2, a1, 1 -; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, ma ; RV32-NEXT: vslideup.vx v8, v12, a1 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vse64.v v8, (a0) ; RV32-NEXT: ret ; ; RV64-LABEL: insertelt_c6_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: li a2, 6 ; RV64-NEXT: vmv.s.x v12, a2 ; RV64-NEXT: sext.w a1, a1 ; RV64-NEXT: addi a2, a1, 1 -; RV64-NEXT: vsetvli zero, a2, e64, m4, tu, mu +; RV64-NEXT: vsetvli zero, a2, e64, m4, tu, ma ; RV64-NEXT: vslideup.vx v8, v12, a1 -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vse64.v v8, (a0) ; RV64-NEXT: ret %a = load <8 x i64>, <8 x i64>* %x @@ -257,12 +257,12 @@ define void @insertelt_c6_v8i64_0_add(<8 x i64>* %x, <8 x i64>* %y) { ; CHECK-LABEL: insertelt_c6_v8i64_0_add: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: li a2, 6 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, ma ; CHECK-NEXT: vmv.s.x v8, a2 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; CHECK-NEXT: vle64.v v12, (a1) ; CHECK-NEXT: vadd.vv v8, v8, v12 ; CHECK-NEXT: vse64.v v8, (a0) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll @@ -5,7 +5,7 @@ define void @buildvec_vid_v16i8(<16 x i8>* %x) { ; CHECK-LABEL: buildvec_vid_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret @@ -16,7 +16,7 @@ define void @buildvec_vid_undefelts_v16i8(<16 x i8>* %x) { ; CHECK-LABEL: buildvec_vid_undefelts_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret @@ -30,7 +30,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI2_0) ; CHECK-NEXT: addi a1, a1, %lo(.LCPI2_0) -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a1) ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret @@ -41,7 +41,7 @@ define void @buildvec_vid_plus_imm_v16i8(<16 x i8>* %x) { ; CHECK-LABEL: buildvec_vid_plus_imm_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vadd.vi v8, v8, 2 ; CHECK-NEXT: vse8.v v8, (a0) @@ -53,7 +53,7 @@ define void @buildvec_vid_mpy_imm_v16i8(<16 x i8>* %x) { ; CHECK-LABEL: buildvec_vid_mpy_imm_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: li a1, 3 ; CHECK-NEXT: vmul.vx v8, v8, a1 @@ -66,7 +66,7 @@ define void @buildvec_vid_step2_add0_v4i8(<4 x i8>* %z0, <4 x i8>* %z1, <4 x i8>* %z2, <4 x i8>* %z3) { ; CHECK-LABEL: buildvec_vid_step2_add0_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vse8.v v8, (a0) @@ -84,7 +84,7 @@ define void @buildvec_vid_step2_add1_v4i8(<4 x i8>* %z0, <4 x i8>* %z1, <4 x i8>* %z2, <4 x i8>* %z3) { ; CHECK-LABEL: buildvec_vid_step2_add1_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vadd.vi v8, v8, 1 @@ -106,7 +106,7 @@ define void @buildvec_vid_stepn1_add0_v4i8(<4 x i8>* %z0, <4 x i8>* %z1, <4 x i8>* %z2, <4 x i8>* %z3) { ; CHECK-LABEL: buildvec_vid_stepn1_add0_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vrsub.vi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a0) @@ -124,7 +124,7 @@ define void @buildvec_vid_stepn2_add0_v4i8(<4 x i8>* %z0, <4 x i8>* %z1, <4 x i8>* %z2, <4 x i8>* %z3) { ; CHECK-LABEL: buildvec_vid_stepn2_add0_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vrsub.vi v8, v8, 0 @@ -143,7 +143,7 @@ define void @buildvec_vid_stepn2_add3_v4i8(<4 x i8>* %z0, <4 x i8>* %z1, <4 x i8>* %z2, <4 x i8>* %z3) { ; CHECK-LABEL: buildvec_vid_stepn2_add3_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vrsub.vi v8, v8, 3 @@ -156,7 +156,7 @@ define void @buildvec_vid_stepn3_add3_v4i8(<4 x i8>* %z0, <4 x i8>* %z1, <4 x i8>* %z2, <4 x i8>* %z3) { ; CHECK-LABEL: buildvec_vid_stepn3_add3_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 3 ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: li a1, -3 @@ -170,7 +170,7 @@ define void @buildvec_vid_stepn3_addn3_v4i32(<4 x i32>* %z0, <4 x i32>* %z1, <4 x i32>* %z2, <4 x i32>* %z3) { ; CHECK-LABEL: buildvec_vid_stepn3_addn3_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, -3 ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: li a4, -3 @@ -192,20 +192,20 @@ ; RV32-LABEL: buildvec_vid_step1_add0_v4i64: ; RV32: # %bb.0: ; RV32-NEXT: li a0, 1 -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vmv.s.x v9, a0 ; RV32-NEXT: vmv.v.i v8, 0 -; RV32-NEXT: vsetivli zero, 3, e32, m1, tu, mu +; RV32-NEXT: vsetivli zero, 3, e32, m1, tu, ma ; RV32-NEXT: vslideup.vi v8, v9, 2 ; RV32-NEXT: lui a0, %hi(.LCPI12_0) ; RV32-NEXT: addi a0, a0, %lo(.LCPI12_0) -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vle32.v v9, (a0) ; RV32-NEXT: ret ; ; RV64-LABEL: buildvec_vid_step1_add0_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vid.v v8 ; RV64-NEXT: vadd.vi v9, v8, 2 ; RV64-NEXT: ret @@ -216,20 +216,20 @@ ; RV32-LABEL: buildvec_vid_step2_add0_v4i64: ; RV32: # %bb.0: ; RV32-NEXT: li a0, 2 -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vmv.s.x v9, a0 ; RV32-NEXT: vmv.v.i v8, 0 -; RV32-NEXT: vsetivli zero, 3, e32, m1, tu, mu +; RV32-NEXT: vsetivli zero, 3, e32, m1, tu, ma ; RV32-NEXT: vslideup.vi v8, v9, 2 ; RV32-NEXT: lui a0, %hi(.LCPI13_0) ; RV32-NEXT: addi a0, a0, %lo(.LCPI13_0) -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vle32.v v9, (a0) ; RV32-NEXT: ret ; ; RV64-LABEL: buildvec_vid_step2_add0_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vid.v v8 ; RV64-NEXT: vadd.vv v8, v8, v8 ; RV64-NEXT: vadd.vi v9, v8, 4 @@ -242,7 +242,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a6, %hi(.LCPI14_0) ; RV32-NEXT: addi a6, a6, %lo(.LCPI14_0) -; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; RV32-NEXT: vle8.v v8, (a6) ; RV32-NEXT: lui a6, %hi(.LCPI14_1) ; RV32-NEXT: addi a6, a6, %lo(.LCPI14_1) @@ -251,14 +251,14 @@ ; RV32-NEXT: vse8.v v9, (a1) ; RV32-NEXT: lui a0, 1 ; RV32-NEXT: addi a0, a0, -2048 -; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; RV32-NEXT: vmv.v.x v8, a0 -; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; RV32-NEXT: vse8.v v8, (a2) ; RV32-NEXT: li a0, 2047 -; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; RV32-NEXT: vmv.v.x v8, a0 -; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; RV32-NEXT: lui a0, %hi(.LCPI14_2) ; RV32-NEXT: addi a0, a0, %lo(.LCPI14_2) ; RV32-NEXT: vle8.v v9, (a0) @@ -272,7 +272,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a6, %hi(.LCPI14_0) ; RV64-NEXT: addi a6, a6, %lo(.LCPI14_0) -; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; RV64-NEXT: vle8.v v8, (a6) ; RV64-NEXT: lui a6, %hi(.LCPI14_1) ; RV64-NEXT: addi a6, a6, %lo(.LCPI14_1) @@ -281,14 +281,14 @@ ; RV64-NEXT: vse8.v v9, (a1) ; RV64-NEXT: lui a0, 1 ; RV64-NEXT: addiw a0, a0, -2048 -; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; RV64-NEXT: vmv.v.x v8, a0 -; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; RV64-NEXT: vse8.v v8, (a2) ; RV64-NEXT: li a0, 2047 -; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; RV64-NEXT: vmv.v.x v8, a0 -; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; RV64-NEXT: lui a0, %hi(.LCPI14_2) ; RV64-NEXT: addi a0, a0, %lo(.LCPI14_2) ; RV64-NEXT: vle8.v v9, (a0) @@ -309,12 +309,12 @@ define void @buildvec_dominant0_v8i16(<8 x i16>* %x) { ; CHECK-LABEL: buildvec_dominant0_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v8, zero ; CHECK-NEXT: vmv.v.i v9, 8 -; CHECK-NEXT: vsetivli zero, 4, e16, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, m1, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 3 -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vse16.v v9, (a0) ; CHECK-NEXT: ret store <8 x i16> , <8 x i16>* %x @@ -324,7 +324,7 @@ define void @buildvec_dominant1_v8i16(<8 x i16>* %x) { ; CHECK-LABEL: buildvec_dominant1_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 8 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret @@ -343,7 +343,7 @@ define void @buildvec_dominant1_v2i8(<2 x i8>* %x) { ; CHECK-LABEL: buildvec_dominant1_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v8, -1 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret @@ -354,7 +354,7 @@ define void @buildvec_dominant2_v2i8(<2 x i8>* %x) { ; CHECK-LABEL: buildvec_dominant2_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vrsub.vi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a0) @@ -368,7 +368,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a1, %hi(.LCPI20_0) ; RV32-NEXT: addi a1, a1, %lo(.LCPI20_0) -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vle32.v v8, (a1) ; RV32-NEXT: vse32.v v8, (a0) ; RV32-NEXT: ret @@ -377,9 +377,9 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a1, %hi(.LCPI20_0) ; RV64-NEXT: ld a1, %lo(.LCPI20_0)(a1) -; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vmv.v.i v8, -1 -; RV64-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; RV64-NEXT: vsetvli zero, zero, e64, m1, tu, ma ; RV64-NEXT: vmv.s.x v8, a1 ; RV64-NEXT: vse64.v v8, (a0) ; RV64-NEXT: ret @@ -392,7 +392,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a1, %hi(.LCPI21_0) ; RV32-NEXT: addi a1, a1, %lo(.LCPI21_0) -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vle32.v v8, (a1) ; RV32-NEXT: vse32.v v8, (a0) ; RV32-NEXT: ret @@ -401,7 +401,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a1, %hi(.LCPI21_0) ; RV64-NEXT: addi a1, a1, %lo(.LCPI21_0) -; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vle64.v v8, (a1) ; RV64-NEXT: vse64.v v8, (a0) ; RV64-NEXT: ret @@ -413,9 +413,9 @@ ; CHECK-LABEL: buildvec_seq_v8i8_v4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 513 -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v8, a1 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret store <8 x i8> , <8 x i8>* %x @@ -427,9 +427,9 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a1, 48 ; RV32-NEXT: addi a1, a1, 513 -; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV32-NEXT: vmv.v.x v8, a1 -; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV32-NEXT: vse8.v v8, (a0) ; RV32-NEXT: ret ; @@ -437,9 +437,9 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a1, 48 ; RV64-NEXT: addiw a1, a1, 513 -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-NEXT: vmv.v.x v8, a1 -; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV64-NEXT: vse8.v v8, (a0) ; RV64-NEXT: ret store <8 x i8> , <8 x i8>* %x @@ -451,7 +451,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a1, %hi(.LCPI24_0) ; RV32-NEXT: addi a1, a1, %lo(.LCPI24_0) -; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV32-NEXT: vle8.v v8, (a1) ; RV32-NEXT: vse8.v v8, (a0) ; RV32-NEXT: ret @@ -460,9 +460,9 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a1, %hi(.LCPI24_0) ; RV64-NEXT: addi a1, a1, %lo(.LCPI24_0) -; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vlse64.v v8, (a1), zero -; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV64-NEXT: vse8.v v8, (a0) ; RV64-NEXT: ret store <16 x i8> , <16 x i8>* %x @@ -474,9 +474,9 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a1, 528432 ; RV32-NEXT: addi a1, a1, 513 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vmv.v.x v8, a1 -; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV32-NEXT: vse8.v v8, (a0) ; RV32-NEXT: ret ; @@ -484,9 +484,9 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a1, 528432 ; RV64-NEXT: addiw a1, a1, 513 -; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vmv.v.x v8, a1 -; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV64-NEXT: vse8.v v8, (a0) ; RV64-NEXT: ret store <16 x i8> , <16 x i8>* %x @@ -499,9 +499,9 @@ ; RV32-NEXT: li a1, 3 ; RV32-NEXT: sb a1, 8(a0) ; RV32-NEXT: li a1, 73 -; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; RV32-NEXT: vmv.s.x v0, a1 -; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV32-NEXT: vmv.v.i v9, 2 ; RV32-NEXT: li a1, 36 ; RV32-NEXT: vmv.s.x v8, a1 @@ -527,9 +527,9 @@ ; CHECK-LABEL: buildvec_seq_v4i16_v2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, -127 -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v8, a1 -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret store <4 x i16> , <4 x i16>* %x @@ -539,7 +539,7 @@ define void @buildvec_vid_step1o2_v4i32(<4 x i32>* %z0, <4 x i32>* %z1, <4 x i32>* %z2, <4 x i32>* %z3, <4 x i32>* %z4, <4 x i32>* %z5, <4 x i32>* %z6) { ; RV32-LABEL: buildvec_vid_step1o2_v4i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vid.v v8 ; RV32-NEXT: vsrl.vi v8, v8, 1 ; RV32-NEXT: vse32.v v8, (a0) @@ -549,21 +549,21 @@ ; RV32-NEXT: vse32.v v8, (a3) ; RV32-NEXT: vse32.v v8, (a4) ; RV32-NEXT: vmv.s.x v8, zero -; RV32-NEXT: vsetivli zero, 2, e32, m1, tu, mu +; RV32-NEXT: vsetivli zero, 2, e32, m1, tu, ma ; RV32-NEXT: vslideup.vi v9, v8, 1 -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vse32.v v9, (a5) ; RV32-NEXT: li a0, 1 ; RV32-NEXT: vmv.s.x v8, a0 ; RV32-NEXT: vmv.v.i v9, 0 -; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu +; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, ma ; RV32-NEXT: vslideup.vi v9, v8, 3 ; RV32-NEXT: vse32.v v9, (a6) ; RV32-NEXT: ret ; ; RV64-LABEL: buildvec_vid_step1o2_v4i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64-NEXT: vid.v v8 ; RV64-NEXT: vsrl.vi v8, v8, 1 ; RV64-NEXT: vse32.v v8, (a0) @@ -573,14 +573,14 @@ ; RV64-NEXT: vse32.v v8, (a3) ; RV64-NEXT: vse32.v v8, (a4) ; RV64-NEXT: vmv.s.x v8, zero -; RV64-NEXT: vsetivli zero, 2, e32, m1, tu, mu +; RV64-NEXT: vsetivli zero, 2, e32, m1, tu, ma ; RV64-NEXT: vslideup.vi v9, v8, 1 -; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64-NEXT: vse32.v v9, (a5) ; RV64-NEXT: li a0, 1 ; RV64-NEXT: vmv.s.x v8, a0 ; RV64-NEXT: vmv.v.i v9, 0 -; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, mu +; RV64-NEXT: vsetvli zero, zero, e32, m1, tu, ma ; RV64-NEXT: vslideup.vi v9, v8, 3 ; RV64-NEXT: vse32.v v9, (a6) ; RV64-NEXT: ret @@ -599,7 +599,7 @@ define void @buildvec_vid_step1o2_add3_v4i16(<4 x i16>* %z0, <4 x i16>* %z1, <4 x i16>* %z2, <4 x i16>* %z3, <4 x i16>* %z4, <4 x i16>* %z5, <4 x i16>* %z6) { ; CHECK-LABEL: buildvec_vid_step1o2_add3_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vsrl.vi v8, v8, 1 ; CHECK-NEXT: vadd.vi v8, v8, 3 @@ -612,13 +612,13 @@ ; CHECK-NEXT: li a0, 3 ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: vmv.v.i v10, 4 -; CHECK-NEXT: vsetivli zero, 2, e16, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v10, v8, 1 -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vse16.v v10, (a5) ; CHECK-NEXT: li a0, 4 ; CHECK-NEXT: vmv.s.x v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 3 ; CHECK-NEXT: vse16.v v9, (a6) ; CHECK-NEXT: ret @@ -637,7 +637,7 @@ define void @buildvec_vid_stepn1o4_addn5_v8i8(<8 x i8>* %z0) { ; CHECK-LABEL: buildvec_vid_stepn1o4_addn5_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vrsub.vi v8, v8, -5 @@ -650,7 +650,7 @@ define void @buildvec_vid_mpy_imm_v8i16(<8 x i16>* %x) { ; CHECK-LABEL: buildvec_vid_mpy_imm_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: li a1, 17 ; CHECK-NEXT: vmul.vx v8, v8, a1 @@ -663,7 +663,7 @@ define void @buildvec_vid_shl_imm_v8i16(<8 x i16>* %x) { ; CHECK-LABEL: buildvec_vid_shl_imm_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: vse16.v v8, (a0) @@ -675,7 +675,7 @@ define <4 x i32> @splat_c3_v4i32(<4 x i32> %v) { ; CHECK-LABEL: splat_c3_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vrgather.vi v9, v8, 3 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -688,7 +688,7 @@ define <4 x i32> @splat_idx_v4i32(<4 x i32> %v, i64 %idx) { ; CHECK-LABEL: splat_idx_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vrgather.vx v9, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -701,7 +701,7 @@ define <8 x i16> @splat_c4_v8i16(<8 x i16> %v) { ; CHECK-LABEL: splat_c4_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vrgather.vi v9, v8, 4 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -714,7 +714,7 @@ define <8 x i16> @splat_idx_v8i16(<8 x i16> %v, i64 %idx) { ; CHECK-LABEL: splat_idx_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vrgather.vx v9, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -729,7 +729,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI37_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI37_0) -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: ret ret <4 x i8> @@ -740,7 +740,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI38_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI38_0) -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: ret ret <4 x i8> @@ -752,10 +752,10 @@ ; CHECK-LABEL: buildvec_not_vid_v16i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 3 -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vmv.v.i v8, 0 -; CHECK-NEXT: vsetivli zero, 7, e8, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 7, e8, m1, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 6 ; CHECK-NEXT: ret ret <16 x i8> diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll @@ -9,7 +9,7 @@ define void @sext_v4i8_v4i32(<4 x i8>* %x, <4 x i32>* %z) { ; CHECK-LABEL: sext_v4i8_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsext.vf4 v9, v8 ; CHECK-NEXT: vse32.v v9, (a1) @@ -23,7 +23,7 @@ define void @zext_v4i8_v4i32(<4 x i8>* %x, <4 x i32>* %z) { ; CHECK-LABEL: zext_v4i8_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vzext.vf4 v9, v8 ; CHECK-NEXT: vse32.v v9, (a1) @@ -37,7 +37,7 @@ define void @sext_v8i8_v8i32(<8 x i8>* %x, <8 x i32>* %z) { ; LMULMAX8-LABEL: sext_v8i8_v8i32: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX8-NEXT: vle8.v v8, (a0) ; LMULMAX8-NEXT: vsext.vf4 v10, v8 ; LMULMAX8-NEXT: vse32.v v10, (a1) @@ -45,7 +45,7 @@ ; ; LMULMAX2-LABEL: sext_v8i8_v8i32: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vle8.v v8, (a0) ; LMULMAX2-NEXT: vsext.vf4 v10, v8 ; LMULMAX2-NEXT: vse32.v v10, (a1) @@ -53,11 +53,11 @@ ; ; LMULMAX1-LABEL: sext_v8i8_v8i32: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; LMULMAX1-NEXT: vle8.v v8, (a0) -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v9, v8, 4 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vsext.vf4 v10, v9 ; LMULMAX1-NEXT: vsext.vf4 v9, v8 ; LMULMAX1-NEXT: addi a0, a1, 16 @@ -74,7 +74,7 @@ ; LMULMAX8-LABEL: sext_v32i8_v32i32: ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: li a2, 32 -; LMULMAX8-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; LMULMAX8-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; LMULMAX8-NEXT: vle8.v v8, (a0) ; LMULMAX8-NEXT: vsext.vf4 v16, v8 ; LMULMAX8-NEXT: vse32.v v16, (a1) @@ -83,17 +83,17 @@ ; LMULMAX2-LABEL: sext_v32i8_v32i32: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: li a2, 32 -; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; LMULMAX2-NEXT: vle8.v v8, (a0) -; LMULMAX2-NEXT: vsetivli zero, 8, e8, m1, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e8, m1, ta, ma ; LMULMAX2-NEXT: vslidedown.vi v10, v8, 8 -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vsext.vf4 v12, v10 -; LMULMAX2-NEXT: vsetivli zero, 16, e8, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 16, e8, m2, ta, ma ; LMULMAX2-NEXT: vslidedown.vi v10, v8, 16 -; LMULMAX2-NEXT: vsetivli zero, 8, e8, m1, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e8, m1, ta, ma ; LMULMAX2-NEXT: vslidedown.vi v14, v10, 8 -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vsext.vf4 v16, v14 ; LMULMAX2-NEXT: vsext.vf4 v14, v8 ; LMULMAX2-NEXT: vsext.vf4 v8, v10 @@ -108,29 +108,29 @@ ; ; LMULMAX1-LABEL: sext_v32i8_v32i32: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-NEXT: addi a2, a0, 16 ; LMULMAX1-NEXT: vle8.v v8, (a2) ; LMULMAX1-NEXT: vle8.v v9, (a0) -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v10, v8, 4 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vsext.vf4 v11, v10 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v10, v8, 8 -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v12, v10, 4 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vsext.vf4 v13, v12 -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v12, v9, 4 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vsext.vf4 v14, v12 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v12, v9, 8 -; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; LMULMAX1-NEXT: vslidedown.vi v15, v12, 4 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vsext.vf4 v16, v15 ; LMULMAX1-NEXT: vsext.vf4 v15, v10 ; LMULMAX1-NEXT: vsext.vf4 v10, v12 @@ -161,10 +161,10 @@ define void @trunc_v4i8_v4i32(<4 x i32>* %x, <4 x i8>* %z) { ; CHECK-LABEL: trunc_v4i8_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vnsrl.wi v8, v8, 0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a1) ; CHECK-NEXT: ret @@ -177,38 +177,38 @@ define void @trunc_v8i8_v8i32(<8 x i32>* %x, <8 x i8>* %z) { ; LMULMAX8-LABEL: trunc_v8i8_v8i32: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX8-NEXT: vle32.v v8, (a0) ; LMULMAX8-NEXT: vnsrl.wi v10, v8, 0 -; LMULMAX8-NEXT: vsetvli zero, zero, e8, mf2, ta, mu +; LMULMAX8-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; LMULMAX8-NEXT: vnsrl.wi v8, v10, 0 ; LMULMAX8-NEXT: vse8.v v8, (a1) ; LMULMAX8-NEXT: ret ; ; LMULMAX2-LABEL: trunc_v8i8_v8i32: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX2-NEXT: vle32.v v8, (a0) ; LMULMAX2-NEXT: vnsrl.wi v10, v8, 0 -; LMULMAX2-NEXT: vsetvli zero, zero, e8, mf2, ta, mu +; LMULMAX2-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; LMULMAX2-NEXT: vnsrl.wi v8, v10, 0 ; LMULMAX2-NEXT: vse8.v v8, (a1) ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: trunc_v8i8_v8i32: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; LMULMAX1-NEXT: vle32.v v8, (a0) ; LMULMAX1-NEXT: addi a0, a0, 16 ; LMULMAX1-NEXT: vle32.v v9, (a0) ; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 -; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0 -; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, ma ; LMULMAX1-NEXT: vslideup.vi v8, v9, 4 ; LMULMAX1-NEXT: vse8.v v8, (a1) ; LMULMAX1-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll @@ -9,7 +9,7 @@ define <4 x i8> @interleave_v2i8(<2 x i8> %x, <2 x i8> %y) { ; CHECK-LABEL: interleave_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf8, ta, ma ; CHECK-NEXT: vwaddu.vv v10, v8, v9 ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: vwmaccu.vx v10, a0, v9 @@ -22,7 +22,7 @@ define <4 x i16> @interleave_v2i16(<2 x i16> %x, <2 x i16> %y) { ; CHECK-LABEL: interleave_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf4, ta, ma ; CHECK-NEXT: vwaddu.vv v10, v8, v9 ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: vwmaccu.vx v10, a0, v9 @@ -36,7 +36,7 @@ define <4 x i32> @interleave_v2i32(<2 x i32> %x, <2 x i32> %y) { ; CHECK-LABEL: interleave_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, mf2, ta, ma ; CHECK-NEXT: vwaddu.vv v10, v9, v8 ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: vwmaccu.vx v10, a0, v8 @@ -53,7 +53,7 @@ ; RV32-V128: # %bb.0: ; RV32-V128-NEXT: vmv1r.v v12, v9 ; RV32-V128-NEXT: # kill: def $v8 killed $v8 def $v8m2 -; RV32-V128-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; RV32-V128-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; RV32-V128-NEXT: vid.v v10 ; RV32-V128-NEXT: vsrl.vi v14, v10, 1 ; RV32-V128-NEXT: vsetvli zero, zero, e64, m2, ta, mu @@ -80,7 +80,7 @@ ; ; RV32-V512-LABEL: interleave_v2i64: ; RV32-V512: # %bb.0: -; RV32-V512-NEXT: vsetivli zero, 4, e16, mf4, ta, mu +; RV32-V512-NEXT: vsetivli zero, 4, e16, mf4, ta, ma ; RV32-V512-NEXT: vid.v v10 ; RV32-V512-NEXT: vsrl.vi v11, v10, 1 ; RV32-V512-NEXT: vsetvli zero, zero, e64, m1, ta, mu @@ -110,7 +110,7 @@ define <8 x i8> @interleave_v4i8(<4 x i8> %x, <4 x i8> %y) { ; V128-LABEL: interleave_v4i8: ; V128: # %bb.0: -; V128-NEXT: vsetivli zero, 8, e8, mf4, ta, mu +; V128-NEXT: vsetivli zero, 8, e8, mf4, ta, ma ; V128-NEXT: vwaddu.vv v10, v9, v8 ; V128-NEXT: li a0, -1 ; V128-NEXT: vwmaccu.vx v10, a0, v8 @@ -119,7 +119,7 @@ ; ; V512-LABEL: interleave_v4i8: ; V512: # %bb.0: -; V512-NEXT: vsetivli zero, 8, e8, mf8, ta, mu +; V512-NEXT: vsetivli zero, 8, e8, mf8, ta, ma ; V512-NEXT: vwaddu.vv v10, v9, v8 ; V512-NEXT: li a0, -1 ; V512-NEXT: vwmaccu.vx v10, a0, v8 @@ -133,7 +133,7 @@ define <8 x i16> @interleave_v4i16(<4 x i16> %x, <4 x i16> %y) { ; V128-LABEL: interleave_v4i16: ; V128: # %bb.0: -; V128-NEXT: vsetivli zero, 8, e16, mf2, ta, mu +; V128-NEXT: vsetivli zero, 8, e16, mf2, ta, ma ; V128-NEXT: vwaddu.vv v10, v8, v9 ; V128-NEXT: li a0, -1 ; V128-NEXT: vwmaccu.vx v10, a0, v9 @@ -142,7 +142,7 @@ ; ; V512-LABEL: interleave_v4i16: ; V512: # %bb.0: -; V512-NEXT: vsetivli zero, 8, e16, mf4, ta, mu +; V512-NEXT: vsetivli zero, 8, e16, mf4, ta, ma ; V512-NEXT: vwaddu.vv v10, v8, v9 ; V512-NEXT: li a0, -1 ; V512-NEXT: vwmaccu.vx v10, a0, v9 @@ -155,7 +155,7 @@ define <8 x i32> @interleave_v4i32(<4 x i32> %x, <4 x i32> %y) { ; V128-LABEL: interleave_v4i32: ; V128: # %bb.0: -; V128-NEXT: vsetivli zero, 8, e32, m1, ta, mu +; V128-NEXT: vsetivli zero, 8, e32, m1, ta, ma ; V128-NEXT: vwaddu.vv v10, v8, v9 ; V128-NEXT: li a0, -1 ; V128-NEXT: vwmaccu.vx v10, a0, v9 @@ -164,7 +164,7 @@ ; ; V512-LABEL: interleave_v4i32: ; V512: # %bb.0: -; V512-NEXT: vsetivli zero, 8, e32, mf2, ta, mu +; V512-NEXT: vsetivli zero, 8, e32, mf2, ta, ma ; V512-NEXT: vwaddu.vv v10, v8, v9 ; V512-NEXT: li a0, -1 ; V512-NEXT: vwmaccu.vx v10, a0, v9 @@ -177,7 +177,7 @@ define <16 x i8> @interleave_v8i8(<8 x i8> %x, <8 x i8> %y) { ; V128-LABEL: interleave_v8i8: ; V128: # %bb.0: -; V128-NEXT: vsetivli zero, 16, e8, mf2, ta, mu +; V128-NEXT: vsetivli zero, 16, e8, mf2, ta, ma ; V128-NEXT: vwaddu.vv v10, v8, v9 ; V128-NEXT: li a0, -1 ; V128-NEXT: vwmaccu.vx v10, a0, v9 @@ -186,7 +186,7 @@ ; ; V512-LABEL: interleave_v8i8: ; V512: # %bb.0: -; V512-NEXT: vsetivli zero, 16, e8, mf8, ta, mu +; V512-NEXT: vsetivli zero, 16, e8, mf8, ta, ma ; V512-NEXT: vwaddu.vv v10, v8, v9 ; V512-NEXT: li a0, -1 ; V512-NEXT: vwmaccu.vx v10, a0, v9 @@ -200,7 +200,7 @@ define <16 x i16> @interleave_v8i16(<8 x i16> %x, <8 x i16> %y) { ; V128-LABEL: interleave_v8i16: ; V128: # %bb.0: -; V128-NEXT: vsetivli zero, 16, e16, m1, ta, mu +; V128-NEXT: vsetivli zero, 16, e16, m1, ta, ma ; V128-NEXT: vwaddu.vv v10, v9, v8 ; V128-NEXT: li a0, -1 ; V128-NEXT: vwmaccu.vx v10, a0, v8 @@ -209,7 +209,7 @@ ; ; V512-LABEL: interleave_v8i16: ; V512: # %bb.0: -; V512-NEXT: vsetivli zero, 16, e16, mf4, ta, mu +; V512-NEXT: vsetivli zero, 16, e16, mf4, ta, ma ; V512-NEXT: vwaddu.vv v10, v9, v8 ; V512-NEXT: li a0, -1 ; V512-NEXT: vwmaccu.vx v10, a0, v8 @@ -222,7 +222,7 @@ define <16 x i32> @interleave_v8i32(<8 x i32> %x, <8 x i32> %y) { ; V128-LABEL: interleave_v8i32: ; V128: # %bb.0: -; V128-NEXT: vsetivli zero, 16, e32, m2, ta, mu +; V128-NEXT: vsetivli zero, 16, e32, m2, ta, ma ; V128-NEXT: vwaddu.vv v12, v8, v10 ; V128-NEXT: li a0, -1 ; V128-NEXT: vwmaccu.vx v12, a0, v10 @@ -231,7 +231,7 @@ ; ; V512-LABEL: interleave_v8i32: ; V512: # %bb.0: -; V512-NEXT: vsetivli zero, 16, e32, mf2, ta, mu +; V512-NEXT: vsetivli zero, 16, e32, mf2, ta, ma ; V512-NEXT: vwaddu.vv v10, v8, v9 ; V512-NEXT: li a0, -1 ; V512-NEXT: vwmaccu.vx v10, a0, v9 @@ -245,7 +245,7 @@ ; V128-LABEL: interleave_v16i8: ; V128: # %bb.0: ; V128-NEXT: li a0, 32 -; V128-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; V128-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; V128-NEXT: vwaddu.vv v10, v8, v9 ; V128-NEXT: li a0, -1 ; V128-NEXT: vwmaccu.vx v10, a0, v9 @@ -255,7 +255,7 @@ ; V512-LABEL: interleave_v16i8: ; V512: # %bb.0: ; V512-NEXT: li a0, 32 -; V512-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; V512-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; V512-NEXT: vwaddu.vv v10, v8, v9 ; V512-NEXT: li a0, -1 ; V512-NEXT: vwmaccu.vx v10, a0, v9 @@ -269,7 +269,7 @@ ; V128-LABEL: interleave_v16i16: ; V128: # %bb.0: ; V128-NEXT: li a0, 32 -; V128-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; V128-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; V128-NEXT: vwaddu.vv v12, v8, v10 ; V128-NEXT: li a0, -1 ; V128-NEXT: vwmaccu.vx v12, a0, v10 @@ -279,7 +279,7 @@ ; V512-LABEL: interleave_v16i16: ; V512: # %bb.0: ; V512-NEXT: li a0, 32 -; V512-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; V512-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; V512-NEXT: vwaddu.vv v10, v8, v9 ; V512-NEXT: li a0, -1 ; V512-NEXT: vwmaccu.vx v10, a0, v9 @@ -293,7 +293,7 @@ ; V128-LABEL: interleave_v16i32: ; V128: # %bb.0: ; V128-NEXT: li a0, 32 -; V128-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; V128-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; V128-NEXT: vwaddu.vv v16, v8, v12 ; V128-NEXT: li a0, -1 ; V128-NEXT: vwmaccu.vx v16, a0, v12 @@ -303,7 +303,7 @@ ; V512-LABEL: interleave_v16i32: ; V512: # %bb.0: ; V512-NEXT: li a0, 32 -; V512-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; V512-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; V512-NEXT: vwaddu.vv v10, v8, v9 ; V512-NEXT: li a0, -1 ; V512-NEXT: vwmaccu.vx v10, a0, v9 @@ -317,7 +317,7 @@ ; V128-LABEL: interleave_v32i8: ; V128: # %bb.0: ; V128-NEXT: li a0, 64 -; V128-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; V128-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; V128-NEXT: vwaddu.vv v12, v8, v10 ; V128-NEXT: li a0, -1 ; V128-NEXT: vwmaccu.vx v12, a0, v10 @@ -327,7 +327,7 @@ ; V512-LABEL: interleave_v32i8: ; V512: # %bb.0: ; V512-NEXT: li a0, 64 -; V512-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; V512-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; V512-NEXT: vwaddu.vv v10, v8, v9 ; V512-NEXT: li a0, -1 ; V512-NEXT: vwmaccu.vx v10, a0, v9 @@ -341,7 +341,7 @@ ; V128-LABEL: interleave_v32i16: ; V128: # %bb.0: ; V128-NEXT: li a0, 64 -; V128-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; V128-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; V128-NEXT: vwaddu.vv v16, v8, v12 ; V128-NEXT: li a0, -1 ; V128-NEXT: vwmaccu.vx v16, a0, v12 @@ -351,7 +351,7 @@ ; V512-LABEL: interleave_v32i16: ; V512: # %bb.0: ; V512-NEXT: li a0, 64 -; V512-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; V512-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; V512-NEXT: vwaddu.vv v10, v8, v9 ; V512-NEXT: li a0, -1 ; V512-NEXT: vwmaccu.vx v10, a0, v9 @@ -372,7 +372,7 @@ ; RV32-V128-NEXT: lui a0, %hi(.LCPI15_0) ; RV32-V128-NEXT: addi a0, a0, %lo(.LCPI15_0) ; RV32-V128-NEXT: li a1, 32 -; RV32-V128-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; RV32-V128-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; RV32-V128-NEXT: vle32.v v0, (a0) ; RV32-V128-NEXT: vmv8r.v v24, v8 ; RV32-V128-NEXT: addi a0, sp, 16 @@ -388,7 +388,7 @@ ; RV32-V128-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill ; RV32-V128-NEXT: lui a0, 699051 ; RV32-V128-NEXT: addi a0, a0, -1366 -; RV32-V128-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; RV32-V128-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV32-V128-NEXT: vmv.s.x v0, a0 ; RV32-V128-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; RV32-V128-NEXT: csrr a0, vlenb @@ -398,7 +398,7 @@ ; RV32-V128-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload ; RV32-V128-NEXT: vrgather.vv v8, v16, v24, v0.t ; RV32-V128-NEXT: vmv.v.v v24, v8 -; RV32-V128-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-V128-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV32-V128-NEXT: addi a0, sp, 16 ; RV32-V128-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload ; RV32-V128-NEXT: vwaddu.vv v0, v8, v16 @@ -422,7 +422,7 @@ ; RV64-V128-NEXT: lui a0, %hi(.LCPI15_0) ; RV64-V128-NEXT: addi a0, a0, %lo(.LCPI15_0) ; RV64-V128-NEXT: li a1, 32 -; RV64-V128-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; RV64-V128-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; RV64-V128-NEXT: vle32.v v0, (a0) ; RV64-V128-NEXT: vmv8r.v v24, v8 ; RV64-V128-NEXT: addi a0, sp, 16 @@ -438,7 +438,7 @@ ; RV64-V128-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill ; RV64-V128-NEXT: lui a0, 699051 ; RV64-V128-NEXT: addiw a0, a0, -1366 -; RV64-V128-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; RV64-V128-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV64-V128-NEXT: vmv.s.x v0, a0 ; RV64-V128-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; RV64-V128-NEXT: csrr a0, vlenb @@ -448,7 +448,7 @@ ; RV64-V128-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload ; RV64-V128-NEXT: vrgather.vv v8, v16, v24, v0.t ; RV64-V128-NEXT: vmv.v.v v24, v8 -; RV64-V128-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-V128-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV64-V128-NEXT: addi a0, sp, 16 ; RV64-V128-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload ; RV64-V128-NEXT: vwaddu.vv v0, v8, v16 @@ -465,7 +465,7 @@ ; V512-LABEL: interleave_v32i32: ; V512: # %bb.0: ; V512-NEXT: li a0, 64 -; V512-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; V512-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; V512-NEXT: vwaddu.vv v12, v8, v10 ; V512-NEXT: li a0, -1 ; V512-NEXT: vwmaccu.vx v12, a0, v10 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll @@ -9,7 +9,7 @@ define void @seteq_vv_v16i8(<16 x i8>* %x, <16 x i8>* %y) { ; CHECK-LABEL: seteq_vv_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vmseq.vv v0, v8, v9 @@ -29,7 +29,7 @@ ; CHECK-LABEL: setne_vv_v32i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vle8.v v10, (a1) ; CHECK-NEXT: vmsne.vv v0, v8, v10 @@ -49,7 +49,7 @@ ; CHECK-LABEL: setgt_vv_v64i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a3, 64 -; CHECK-NEXT: vsetvli zero, a3, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e8, m4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vle8.v v12, (a1) ; CHECK-NEXT: vmslt.vv v16, v12, v8 @@ -66,7 +66,7 @@ ; CHECK-LABEL: setlt_vv_v128i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a3, 128 -; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vle8.v v16, (a1) ; CHECK-NEXT: vmslt.vv v24, v8, v16 @@ -82,7 +82,7 @@ define void @setge_vv_v8i8(<8 x i8>* %x, <8 x i8>* %y, <8 x i1>* %z) { ; CHECK-LABEL: setge_vv_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vmsle.vv v8, v9, v8 @@ -98,7 +98,7 @@ define void @setle_vv_v16i8(<16 x i8>* %x, <16 x i8>* %y, <16 x i1>* %z) { ; CHECK-LABEL: setle_vv_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vmsle.vv v8, v8, v9 @@ -115,7 +115,7 @@ ; CHECK-LABEL: setugt_vv_v32i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a3, 32 -; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vle8.v v10, (a1) ; CHECK-NEXT: vmsltu.vv v12, v10, v8 @@ -132,7 +132,7 @@ ; CHECK-LABEL: setult_vv_v64i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a3, 64 -; CHECK-NEXT: vsetvli zero, a3, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e8, m4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vle8.v v12, (a1) ; CHECK-NEXT: vmsltu.vv v16, v8, v12 @@ -149,7 +149,7 @@ ; CHECK-LABEL: setuge_vv_v128i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a3, 128 -; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vle8.v v16, (a1) ; CHECK-NEXT: vmsleu.vv v24, v16, v8 @@ -165,7 +165,7 @@ define void @setule_vv_v8i8(<8 x i8>* %x, <8 x i8>* %y, <8 x i1>* %z) { ; CHECK-LABEL: setule_vv_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vmsleu.vv v8, v8, v9 @@ -181,7 +181,7 @@ define void @seteq_vx_v16i8(<16 x i8>* %x, i8 %y, <16 x i1>* %z) { ; CHECK-LABEL: seteq_vx_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmseq.vx v8, v8, a1 ; CHECK-NEXT: vsm.v v8, (a2) @@ -198,7 +198,7 @@ ; CHECK-LABEL: setne_vx_v32i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a3, 32 -; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsne.vx v10, v8, a1 ; CHECK-NEXT: vsm.v v10, (a2) @@ -215,7 +215,7 @@ ; CHECK-LABEL: setgt_vx_v64i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a3, 64 -; CHECK-NEXT: vsetvli zero, a3, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e8, m4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsgt.vx v12, v8, a1 ; CHECK-NEXT: vsm.v v12, (a2) @@ -232,7 +232,7 @@ ; CHECK-LABEL: setlt_vx_v128i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a3, 128 -; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmslt.vx v16, v8, a1 ; CHECK-NEXT: vsm.v v16, (a2) @@ -248,7 +248,7 @@ define void @setge_vx_v8i8(<8 x i8>* %x, i8 %y, <8 x i1>* %z) { ; CHECK-LABEL: setge_vx_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.v.x v9, a1 ; CHECK-NEXT: vmsle.vv v8, v9, v8 @@ -265,7 +265,7 @@ define void @setle_vx_v16i8(<16 x i8>* %x, i8 %y, <16 x i1>* %z) { ; CHECK-LABEL: setle_vx_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsle.vx v8, v8, a1 ; CHECK-NEXT: vsm.v v8, (a2) @@ -282,7 +282,7 @@ ; CHECK-LABEL: setugt_vx_v32i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a3, 32 -; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsgtu.vx v10, v8, a1 ; CHECK-NEXT: vsm.v v10, (a2) @@ -299,7 +299,7 @@ ; CHECK-LABEL: setult_vx_v64i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a3, 64 -; CHECK-NEXT: vsetvli zero, a3, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e8, m4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsltu.vx v12, v8, a1 ; CHECK-NEXT: vsm.v v12, (a2) @@ -316,7 +316,7 @@ ; CHECK-LABEL: setuge_vx_v128i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a3, 128 -; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.v.x v16, a1 ; CHECK-NEXT: vmsleu.vv v24, v16, v8 @@ -333,7 +333,7 @@ define void @setule_vx_v8i8(<8 x i8>* %x, i8 %y, <8 x i1>* %z) { ; CHECK-LABEL: setule_vx_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsleu.vx v8, v8, a1 ; CHECK-NEXT: vsm.v v8, (a2) @@ -349,7 +349,7 @@ define void @seteq_xv_v16i8(<16 x i8>* %x, i8 %y, <16 x i1>* %z) { ; CHECK-LABEL: seteq_xv_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmseq.vx v8, v8, a1 ; CHECK-NEXT: vsm.v v8, (a2) @@ -366,7 +366,7 @@ ; CHECK-LABEL: setne_xv_v32i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a3, 32 -; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsne.vx v10, v8, a1 ; CHECK-NEXT: vsm.v v10, (a2) @@ -383,7 +383,7 @@ ; CHECK-LABEL: setgt_xv_v64i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a3, 64 -; CHECK-NEXT: vsetvli zero, a3, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e8, m4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmslt.vx v12, v8, a1 ; CHECK-NEXT: vsm.v v12, (a2) @@ -400,7 +400,7 @@ ; CHECK-LABEL: setlt_xv_v128i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a3, 128 -; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsgt.vx v16, v8, a1 ; CHECK-NEXT: vsm.v v16, (a2) @@ -416,7 +416,7 @@ define void @setge_xv_v8i8(<8 x i8>* %x, i8 %y, <8 x i1>* %z) { ; CHECK-LABEL: setge_xv_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsle.vx v8, v8, a1 ; CHECK-NEXT: vsm.v v8, (a2) @@ -432,7 +432,7 @@ define void @setle_xv_v16i8(<16 x i8>* %x, i8 %y, <16 x i1>* %z) { ; CHECK-LABEL: setle_xv_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.v.x v9, a1 ; CHECK-NEXT: vmsle.vv v8, v9, v8 @@ -450,7 +450,7 @@ ; CHECK-LABEL: setugt_xv_v32i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a3, 32 -; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsltu.vx v10, v8, a1 ; CHECK-NEXT: vsm.v v10, (a2) @@ -467,7 +467,7 @@ ; CHECK-LABEL: setult_xv_v64i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a3, 64 -; CHECK-NEXT: vsetvli zero, a3, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e8, m4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsgtu.vx v12, v8, a1 ; CHECK-NEXT: vsm.v v12, (a2) @@ -484,7 +484,7 @@ ; CHECK-LABEL: setuge_xv_v128i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a3, 128 -; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsleu.vx v16, v8, a1 ; CHECK-NEXT: vsm.v v16, (a2) @@ -500,7 +500,7 @@ define void @setule_xv_v8i8(<8 x i8>* %x, i8 %y, <8 x i1>* %z) { ; CHECK-LABEL: setule_xv_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.v.x v9, a1 ; CHECK-NEXT: vmsleu.vv v8, v9, v8 @@ -517,7 +517,7 @@ define void @seteq_vi_v16i8(<16 x i8>* %x, <16 x i1>* %z) { ; CHECK-LABEL: seteq_vi_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmseq.vi v8, v8, 0 ; CHECK-NEXT: vsm.v v8, (a1) @@ -534,7 +534,7 @@ ; CHECK-LABEL: setne_vi_v32i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsne.vi v10, v8, 0 ; CHECK-NEXT: vsm.v v10, (a1) @@ -551,7 +551,7 @@ ; CHECK-LABEL: setgt_vi_v64i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 64 -; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsgt.vi v12, v8, 0 ; CHECK-NEXT: vsm.v v12, (a1) @@ -568,7 +568,7 @@ ; CHECK-LABEL: setgt_vi_v64i8_nonzero: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 64 -; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsgt.vi v12, v8, 5 ; CHECK-NEXT: vsm.v v12, (a1) @@ -585,7 +585,7 @@ ; CHECK-LABEL: setlt_vi_v128i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 128 -; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmslt.vx v16, v8, zero ; CHECK-NEXT: vsm.v v16, (a1) @@ -601,7 +601,7 @@ define void @setge_vi_v8i8(<8 x i8>* %x, <8 x i1>* %z) { ; CHECK-LABEL: setge_vi_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsgt.vi v8, v8, -1 ; CHECK-NEXT: vsm.v v8, (a1) @@ -617,7 +617,7 @@ define void @setle_vi_v16i8(<16 x i8>* %x, <16 x i1>* %z) { ; CHECK-LABEL: setle_vi_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsle.vi v8, v8, 0 ; CHECK-NEXT: vsm.v v8, (a1) @@ -634,7 +634,7 @@ ; CHECK-LABEL: setugt_vi_v32i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsgtu.vi v10, v8, 5 ; CHECK-NEXT: vsm.v v10, (a1) @@ -651,7 +651,7 @@ ; CHECK-LABEL: setult_vi_v64i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 64 -; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsleu.vi v12, v8, 4 ; CHECK-NEXT: vsm.v v12, (a1) @@ -668,7 +668,7 @@ ; CHECK-LABEL: setuge_vi_v128i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 128 -; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsgtu.vi v16, v8, 4 ; CHECK-NEXT: vsm.v v16, (a1) @@ -684,7 +684,7 @@ define void @setule_vi_v8i8(<8 x i8>* %x, <8 x i1>* %z) { ; CHECK-LABEL: setule_vi_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmsleu.vi v8, v8, 5 ; CHECK-NEXT: vsm.v v8, (a1) @@ -700,7 +700,7 @@ define void @seteq_vv_v8i16(<8 x i16>* %x, <8 x i16>* %y) { ; CHECK-LABEL: seteq_vv_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vmseq.vv v0, v8, v9 @@ -719,7 +719,7 @@ define void @setne_vv_v4i32(<4 x i32>* %x, <4 x i32>* %y) { ; CHECK-LABEL: setne_vv_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vmsne.vv v0, v8, v9 @@ -738,7 +738,7 @@ define void @setgt_vv_v2i64(<2 x i64>* %x, <2 x i64>* %y) { ; CHECK-LABEL: setgt_vv_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vmslt.vv v0, v9, v8 @@ -757,7 +757,7 @@ define void @setlt_vv_v16i16(<16 x i16>* %x, <16 x i16>* %y) { ; CHECK-LABEL: setlt_vv_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v10, (a1) ; CHECK-NEXT: vmslt.vv v0, v8, v10 @@ -776,7 +776,7 @@ define void @setugt_vv_v8i32(<8 x i32>* %x, <8 x i32>* %y) { ; CHECK-LABEL: setugt_vv_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v10, (a1) ; CHECK-NEXT: vmsltu.vv v0, v10, v8 @@ -795,7 +795,7 @@ define void @setult_vv_v4i64(<4 x i64>* %x, <4 x i64>* %y) { ; CHECK-LABEL: setult_vv_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v10, (a1) ; CHECK-NEXT: vmsltu.vv v0, v8, v10 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll @@ -6,9 +6,9 @@ ; CHECK-LABEL: shuffle_v4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 11 -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.s.x v0, a0 -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %s = shufflevector <4 x i16> %x, <4 x i16> %y, <4 x i32> @@ -19,9 +19,9 @@ ; CHECK-LABEL: shuffle_v8i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 203 -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.s.x v0, a0 -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %s = shufflevector <8 x i32> %x, <8 x i32> %y, <8 x i32> @@ -32,9 +32,9 @@ ; CHECK-LABEL: shuffle_xv_v4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 9 -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.s.x v0, a0 -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 5, v0 ; CHECK-NEXT: ret %s = shufflevector <4 x i16> , <4 x i16> %x, <4 x i32> @@ -45,9 +45,9 @@ ; CHECK-LABEL: shuffle_vx_v4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 6 -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.s.x v0, a0 -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 5, v0 ; CHECK-NEXT: ret %s = shufflevector <4 x i16> %x, <4 x i16> , <4 x i32> @@ -59,7 +59,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI4_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI4_0) -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v10, (a0) ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv1r.v v8, v9 @@ -73,7 +73,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI5_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI5_0) -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v10, (a0) ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv1r.v v8, v9 @@ -103,7 +103,7 @@ ; CHECK-LABEL: vrgather_shuffle_xv_v4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 12 -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.s.x v0, a0 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vid.v v9 @@ -137,7 +137,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, %hi(.LCPI9_0) ; RV32-NEXT: addi a0, a0, %lo(.LCPI9_0) -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vle16.v v16, (a0) ; RV32-NEXT: vrgatherei16.vv v12, v8, v16 ; RV32-NEXT: vmv.v.v v8, v12 @@ -147,7 +147,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, %hi(.LCPI9_0) ; RV64-NEXT: addi a0, a0, %lo(.LCPI9_0) -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vle64.v v16, (a0) ; RV64-NEXT: vrgather.vv v12, v8, v16 ; RV64-NEXT: vmv.v.v v8, v12 @@ -161,7 +161,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, %hi(.LCPI10_0) ; RV32-NEXT: addi a0, a0, %lo(.LCPI10_0) -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vle16.v v16, (a0) ; RV32-NEXT: vrgatherei16.vv v12, v8, v16 ; RV32-NEXT: vmv.v.v v8, v12 @@ -171,7 +171,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, %hi(.LCPI10_0) ; RV64-NEXT: addi a0, a0, %lo(.LCPI10_0) -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vle64.v v16, (a0) ; RV64-NEXT: vrgather.vv v12, v8, v16 ; RV64-NEXT: vmv.v.v v8, v12 @@ -184,10 +184,10 @@ ; RV32-LABEL: vrgather_shuffle_vv_v8i64: ; RV32: # %bb.0: ; RV32-NEXT: li a0, 5 -; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV32-NEXT: vmv.s.x v16, a0 ; RV32-NEXT: vmv.v.i v20, 2 -; RV32-NEXT: vsetvli zero, zero, e16, m1, tu, mu +; RV32-NEXT: vsetvli zero, zero, e16, m1, tu, ma ; RV32-NEXT: vslideup.vi v20, v16, 7 ; RV32-NEXT: lui a0, %hi(.LCPI11_0) ; RV32-NEXT: addi a0, a0, %lo(.LCPI11_0) @@ -203,10 +203,10 @@ ; RV64-LABEL: vrgather_shuffle_vv_v8i64: ; RV64: # %bb.0: ; RV64-NEXT: li a0, 5 -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vmv.s.x v16, a0 ; RV64-NEXT: vmv.v.i v20, 2 -; RV64-NEXT: vsetvli zero, zero, e64, m4, tu, mu +; RV64-NEXT: vsetvli zero, zero, e64, m4, tu, ma ; RV64-NEXT: vslideup.vi v20, v16, 7 ; RV64-NEXT: lui a0, %hi(.LCPI11_0) ; RV64-NEXT: addi a0, a0, %lo(.LCPI11_0) @@ -243,7 +243,7 @@ ; RV64-LABEL: vrgather_shuffle_xv_v8i64: ; RV64: # %bb.0: ; RV64-NEXT: li a0, 113 -; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; RV64-NEXT: vmv.s.x v0, a0 ; RV64-NEXT: lui a0, %hi(.LCPI12_0) ; RV64-NEXT: addi a0, a0, %lo(.LCPI12_0) @@ -278,7 +278,7 @@ ; RV64-LABEL: vrgather_shuffle_vx_v8i64: ; RV64: # %bb.0: ; RV64-NEXT: li a0, 115 -; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; RV64-NEXT: vmv.s.x v0, a0 ; RV64-NEXT: lui a0, %hi(.LCPI13_0) ; RV64-NEXT: addi a0, a0, %lo(.LCPI13_0) @@ -295,10 +295,10 @@ define <4 x i8> @interleave_shuffles(<4 x i8> %x) { ; CHECK-LABEL: interleave_shuffles: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vrgather.vi v9, v8, 0 ; CHECK-NEXT: vrgather.vi v10, v8, 1 -; CHECK-NEXT: vsetivli zero, 4, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf8, ta, ma ; CHECK-NEXT: vwaddu.vv v8, v9, v10 ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: vwmaccu.vx v8, a0, v10 @@ -312,7 +312,7 @@ define <8 x i8> @splat_ve4(<8 x i8> %v) { ; CHECK-LABEL: splat_ve4: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vrgather.vi v9, v8, 4 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -323,12 +323,12 @@ define <8 x i8> @splat_ve4_ins_i0ve2(<8 x i8> %v) { ; CHECK-LABEL: splat_ve4_ins_i0ve2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v10, 4 ; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, ma ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -340,12 +340,12 @@ ; CHECK-LABEL: splat_ve4_ins_i1ve3: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 3 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vmv.v.i v10, 4 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v10, v9, 1 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -357,7 +357,7 @@ ; CHECK-LABEL: splat_ve2_we0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 66 -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.s.x v0, a0 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vrgather.vi v10, v8, 2 @@ -371,10 +371,10 @@ define <8 x i8> @splat_ve2_we0_ins_i0ve4(<8 x i8> %v, <8 x i8> %w) { ; CHECK-LABEL: splat_ve2_we0_ins_i0ve4: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v11, 2 ; CHECK-NEXT: li a0, 4 -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, ma ; CHECK-NEXT: vmv.s.x v11, a0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; CHECK-NEXT: li a0, 66 @@ -391,11 +391,11 @@ ; CHECK-LABEL: splat_ve2_we0_ins_i0we4: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 67 -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.s.x v0, a0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vrgather.vi v10, v8, 2 -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 4 ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vrgather.vv v10, v9, v8, v0.t @@ -410,7 +410,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 8256 ; RV32-NEXT: addi a0, a0, 514 -; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV32-NEXT: vmv.v.x v11, a0 ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: li a0, 66 @@ -424,7 +424,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 8256 ; RV64-NEXT: addiw a0, a0, 514 -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-NEXT: vmv.v.x v11, a0 ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: li a0, 66 @@ -441,10 +441,10 @@ ; CHECK-LABEL: splat_ve2_we0_ins_i2we4: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 4 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: vmv.v.i v11, 0 -; CHECK-NEXT: vsetivli zero, 3, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 3, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v11, v10, 2 ; CHECK-NEXT: li a0, 70 ; CHECK-NEXT: vmv.s.x v0, a0 @@ -461,14 +461,14 @@ ; RV32-LABEL: splat_ve2_we0_ins_i2ve4_i5we6: ; RV32: # %bb.0: ; RV32-NEXT: li a0, 6 -; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV32-NEXT: vmv.s.x v10, a0 ; RV32-NEXT: vmv.v.i v11, 0 -; RV32-NEXT: vsetivli zero, 6, e8, mf2, tu, mu +; RV32-NEXT: vsetivli zero, 6, e8, mf2, tu, ma ; RV32-NEXT: vslideup.vi v11, v10, 5 ; RV32-NEXT: lui a0, 8256 ; RV32-NEXT: addi a0, a0, 2 -; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV32-NEXT: vmv.v.x v12, a0 ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: li a0, 98 @@ -481,14 +481,14 @@ ; RV64-LABEL: splat_ve2_we0_ins_i2ve4_i5we6: ; RV64: # %bb.0: ; RV64-NEXT: li a0, 6 -; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV64-NEXT: vmv.s.x v10, a0 ; RV64-NEXT: vmv.v.i v11, 0 -; RV64-NEXT: vsetivli zero, 6, e8, mf2, tu, mu +; RV64-NEXT: vsetivli zero, 6, e8, mf2, tu, ma ; RV64-NEXT: vslideup.vi v11, v10, 5 ; RV64-NEXT: lui a0, 8256 ; RV64-NEXT: addiw a0, a0, 2 -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-NEXT: vmv.v.x v12, a0 ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: li a0, 98 @@ -504,11 +504,11 @@ define <8 x i8> @widen_splat_ve3(<4 x i8> %v) { ; CHECK-LABEL: widen_splat_ve3: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vrgather.vi v8, v9, 3 ; CHECK-NEXT: ret %shuf = shufflevector <4 x i8> %v, <4 x i8> poison, <8 x i32> @@ -518,7 +518,7 @@ define <4 x i16> @slidedown_v4i16(<4 x i16> %x) { ; CHECK-LABEL: slidedown_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 1 ; CHECK-NEXT: ret %s = shufflevector <4 x i16> %x, <4 x i16> poison, <4 x i32> @@ -528,7 +528,7 @@ define <8 x i32> @slidedown_v8i32(<8 x i32> %x) { ; CHECK-LABEL: slidedown_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 3 ; CHECK-NEXT: ret %s = shufflevector <8 x i32> %x, <8 x i32> poison, <8 x i32> @@ -538,7 +538,7 @@ define <4 x i16> @slideup_v4i16(<4 x i16> %x) { ; CHECK-LABEL: slideup_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 1 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -549,7 +549,7 @@ define <8 x i32> @slideup_v8i32(<8 x i32> %x) { ; CHECK-LABEL: slideup_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, tu, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, tu, ma ; CHECK-NEXT: vslideup.vi v10, v8, 3 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -560,9 +560,9 @@ define <8 x i16> @splice_unary(<8 x i16> %x) { ; CHECK-LABEL: splice_unary: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v9, v8, 2 -; CHECK-NEXT: vsetivli zero, 8, e16, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 6 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -573,9 +573,9 @@ define <8 x i32> @splice_unary2(<8 x i32> %x) { ; CHECK-LABEL: splice_unary2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 3, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 3, e32, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v10, v8, 5 -; CHECK-NEXT: vsetivli zero, 8, e32, m2, tu, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, tu, ma ; CHECK-NEXT: vslideup.vi v10, v8, 3 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -586,9 +586,9 @@ define <8 x i16> @splice_binary(<8 x i16> %x, <8 x i16> %y) { ; CHECK-LABEL: splice_binary: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 -; CHECK-NEXT: vsetivli zero, 8, e16, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 6 ; CHECK-NEXT: ret %s = shufflevector <8 x i16> %x, <8 x i16> %y, <8 x i32> @@ -598,9 +598,9 @@ define <8 x i32> @splice_binary2(<8 x i32> %x, <8 x i32> %y) { ; CHECK-LABEL: splice_binary2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 3, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 3, e32, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 5 -; CHECK-NEXT: vsetivli zero, 8, e32, m2, tu, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 3 ; CHECK-NEXT: ret %s = shufflevector <8 x i32> %x, <8 x i32> %y, <8 x i32> diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll @@ -9,7 +9,7 @@ define void @splat_v16i8(<16 x i8>* %x, i8 %y) { ; CHECK-LABEL: splat_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.x v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret @@ -22,7 +22,7 @@ define void @splat_v8i16(<8 x i16>* %x, i16 %y) { ; CHECK-LABEL: splat_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.x v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret @@ -35,7 +35,7 @@ define void @splat_v4i32(<4 x i32>* %x, i32 %y) { ; CHECK-LABEL: splat_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.x v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret @@ -53,7 +53,7 @@ ; LMULMAX8-RV32-NEXT: sw a2, 12(sp) ; LMULMAX8-RV32-NEXT: sw a1, 8(sp) ; LMULMAX8-RV32-NEXT: addi a1, sp, 8 -; LMULMAX8-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX8-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX8-RV32-NEXT: vlse64.v v8, (a1), zero ; LMULMAX8-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX8-RV32-NEXT: addi sp, sp, 16 @@ -66,7 +66,7 @@ ; LMULMAX2-RV32-NEXT: sw a2, 12(sp) ; LMULMAX2-RV32-NEXT: sw a1, 8(sp) ; LMULMAX2-RV32-NEXT: addi a1, sp, 8 -; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX2-RV32-NEXT: vlse64.v v8, (a1), zero ; LMULMAX2-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX2-RV32-NEXT: addi sp, sp, 16 @@ -79,7 +79,7 @@ ; LMULMAX1-RV32-NEXT: sw a2, 12(sp) ; LMULMAX1-RV32-NEXT: sw a1, 8(sp) ; LMULMAX1-RV32-NEXT: addi a1, sp, 8 -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vlse64.v v8, (a1), zero ; LMULMAX1-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi sp, sp, 16 @@ -87,21 +87,21 @@ ; ; LMULMAX8-RV64-LABEL: splat_v2i64: ; LMULMAX8-RV64: # %bb.0: -; LMULMAX8-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX8-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX8-RV64-NEXT: vmv.v.x v8, a1 ; LMULMAX8-RV64-NEXT: vse64.v v8, (a0) ; LMULMAX8-RV64-NEXT: ret ; ; LMULMAX2-RV64-LABEL: splat_v2i64: ; LMULMAX2-RV64: # %bb.0: -; LMULMAX2-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX2-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX2-RV64-NEXT: vmv.v.x v8, a1 ; LMULMAX2-RV64-NEXT: vse64.v v8, (a0) ; LMULMAX2-RV64-NEXT: ret ; ; LMULMAX1-RV64-LABEL: splat_v2i64: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV64-NEXT: vmv.v.x v8, a1 ; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV64-NEXT: ret @@ -115,7 +115,7 @@ ; LMULMAX8-LABEL: splat_v32i8: ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: li a2, 32 -; LMULMAX8-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; LMULMAX8-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; LMULMAX8-NEXT: vmv.v.x v8, a1 ; LMULMAX8-NEXT: vse8.v v8, (a0) ; LMULMAX8-NEXT: ret @@ -123,14 +123,14 @@ ; LMULMAX2-LABEL: splat_v32i8: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: li a2, 32 -; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; LMULMAX2-NEXT: vmv.v.x v8, a1 ; LMULMAX2-NEXT: vse8.v v8, (a0) ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: splat_v32i8: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-NEXT: vmv.v.x v8, a1 ; LMULMAX1-NEXT: addi a1, a0, 16 ; LMULMAX1-NEXT: vse8.v v8, (a1) @@ -145,21 +145,21 @@ define void @splat_v16i16(<16 x i16>* %x, i16 %y) { ; LMULMAX8-LABEL: splat_v16i16: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX8-NEXT: vmv.v.x v8, a1 ; LMULMAX8-NEXT: vse16.v v8, (a0) ; LMULMAX8-NEXT: ret ; ; LMULMAX2-LABEL: splat_v16i16: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX2-NEXT: vmv.v.x v8, a1 ; LMULMAX2-NEXT: vse16.v v8, (a0) ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: splat_v16i16: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-NEXT: vmv.v.x v8, a1 ; LMULMAX1-NEXT: addi a1, a0, 16 ; LMULMAX1-NEXT: vse16.v v8, (a1) @@ -174,21 +174,21 @@ define void @splat_v8i32(<8 x i32>* %x, i32 %y) { ; LMULMAX8-LABEL: splat_v8i32: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX8-NEXT: vmv.v.x v8, a1 ; LMULMAX8-NEXT: vse32.v v8, (a0) ; LMULMAX8-NEXT: ret ; ; LMULMAX2-LABEL: splat_v8i32: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vmv.v.x v8, a1 ; LMULMAX2-NEXT: vse32.v v8, (a0) ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: splat_v8i32: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vmv.v.x v8, a1 ; LMULMAX1-NEXT: addi a1, a0, 16 ; LMULMAX1-NEXT: vse32.v v8, (a1) @@ -208,7 +208,7 @@ ; LMULMAX8-RV32-NEXT: sw a2, 12(sp) ; LMULMAX8-RV32-NEXT: sw a1, 8(sp) ; LMULMAX8-RV32-NEXT: addi a1, sp, 8 -; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX8-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX8-RV32-NEXT: vlse64.v v8, (a1), zero ; LMULMAX8-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX8-RV32-NEXT: addi sp, sp, 16 @@ -221,7 +221,7 @@ ; LMULMAX2-RV32-NEXT: sw a2, 12(sp) ; LMULMAX2-RV32-NEXT: sw a1, 8(sp) ; LMULMAX2-RV32-NEXT: addi a1, sp, 8 -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV32-NEXT: vlse64.v v8, (a1), zero ; LMULMAX2-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX2-RV32-NEXT: addi sp, sp, 16 @@ -230,9 +230,9 @@ ; LMULMAX1-RV32-LABEL: splat_v4i64: ; LMULMAX1-RV32: # %bb.0: ; LMULMAX1-RV32-NEXT: li a3, 5 -; LMULMAX1-RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; LMULMAX1-RV32-NEXT: vmv.s.x v0, a3 -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.x v8, a2 ; LMULMAX1-RV32-NEXT: vmerge.vxm v8, v8, a1, v0 ; LMULMAX1-RV32-NEXT: addi a1, a0, 16 @@ -242,21 +242,21 @@ ; ; LMULMAX8-RV64-LABEL: splat_v4i64: ; LMULMAX8-RV64: # %bb.0: -; LMULMAX8-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX8-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX8-RV64-NEXT: vmv.v.x v8, a1 ; LMULMAX8-RV64-NEXT: vse64.v v8, (a0) ; LMULMAX8-RV64-NEXT: ret ; ; LMULMAX2-RV64-LABEL: splat_v4i64: ; LMULMAX2-RV64: # %bb.0: -; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV64-NEXT: vmv.v.x v8, a1 ; LMULMAX2-RV64-NEXT: vse64.v v8, (a0) ; LMULMAX2-RV64-NEXT: ret ; ; LMULMAX1-RV64-LABEL: splat_v4i64: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV64-NEXT: vmv.v.x v8, a1 ; LMULMAX1-RV64-NEXT: addi a1, a0, 16 ; LMULMAX1-RV64-NEXT: vse64.v v8, (a1) @@ -271,7 +271,7 @@ define void @splat_zero_v16i8(<16 x i8>* %x) { ; CHECK-LABEL: splat_zero_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret @@ -284,7 +284,7 @@ define void @splat_zero_v8i16(<8 x i16>* %x) { ; CHECK-LABEL: splat_zero_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret @@ -297,7 +297,7 @@ define void @splat_zero_v4i32(<4 x i32>* %x) { ; CHECK-LABEL: splat_zero_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret @@ -310,7 +310,7 @@ define void @splat_zero_v2i64(<2 x i64>* %x) { ; CHECK-LABEL: splat_zero_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vse64.v v8, (a0) ; CHECK-NEXT: ret @@ -324,7 +324,7 @@ ; LMULMAX8-LABEL: splat_zero_v32i8: ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: li a1, 32 -; LMULMAX8-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; LMULMAX8-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; LMULMAX8-NEXT: vmv.v.i v8, 0 ; LMULMAX8-NEXT: vse8.v v8, (a0) ; LMULMAX8-NEXT: ret @@ -332,14 +332,14 @@ ; LMULMAX2-LABEL: splat_zero_v32i8: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: li a1, 32 -; LMULMAX2-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; LMULMAX2-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; LMULMAX2-NEXT: vmv.v.i v8, 0 ; LMULMAX2-NEXT: vse8.v v8, (a0) ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: splat_zero_v32i8: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-NEXT: vmv.v.i v8, 0 ; LMULMAX1-NEXT: vse8.v v8, (a0) ; LMULMAX1-NEXT: addi a0, a0, 16 @@ -354,21 +354,21 @@ define void @splat_zero_v16i16(<16 x i16>* %x) { ; LMULMAX8-LABEL: splat_zero_v16i16: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX8-NEXT: vmv.v.i v8, 0 ; LMULMAX8-NEXT: vse16.v v8, (a0) ; LMULMAX8-NEXT: ret ; ; LMULMAX2-LABEL: splat_zero_v16i16: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX2-NEXT: vmv.v.i v8, 0 ; LMULMAX2-NEXT: vse16.v v8, (a0) ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: splat_zero_v16i16: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-NEXT: vmv.v.i v8, 0 ; LMULMAX1-NEXT: vse16.v v8, (a0) ; LMULMAX1-NEXT: addi a0, a0, 16 @@ -383,21 +383,21 @@ define void @splat_zero_v8i32(<8 x i32>* %x) { ; LMULMAX8-LABEL: splat_zero_v8i32: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX8-NEXT: vmv.v.i v8, 0 ; LMULMAX8-NEXT: vse32.v v8, (a0) ; LMULMAX8-NEXT: ret ; ; LMULMAX2-LABEL: splat_zero_v8i32: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vmv.v.i v8, 0 ; LMULMAX2-NEXT: vse32.v v8, (a0) ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: splat_zero_v8i32: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vmv.v.i v8, 0 ; LMULMAX1-NEXT: vse32.v v8, (a0) ; LMULMAX1-NEXT: addi a0, a0, 16 @@ -412,21 +412,21 @@ define void @splat_zero_v4i64(<4 x i64>* %x) { ; LMULMAX8-LABEL: splat_zero_v4i64: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX8-NEXT: vmv.v.i v8, 0 ; LMULMAX8-NEXT: vse64.v v8, (a0) ; LMULMAX8-NEXT: ret ; ; LMULMAX2-LABEL: splat_zero_v4i64: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-NEXT: vmv.v.i v8, 0 ; LMULMAX2-NEXT: vse64.v v8, (a0) ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: splat_zero_v4i64: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.i v8, 0 ; LMULMAX1-RV32-NEXT: vse32.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a0, a0, 16 @@ -435,7 +435,7 @@ ; ; LMULMAX1-RV64-LABEL: splat_zero_v4i64: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV64-NEXT: vmv.v.i v8, 0 ; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a0, a0, 16 @@ -450,7 +450,7 @@ define void @splat_allones_v16i8(<16 x i8>* %x) { ; CHECK-LABEL: splat_allones_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, -1 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret @@ -463,7 +463,7 @@ define void @splat_allones_v8i16(<8 x i16>* %x) { ; CHECK-LABEL: splat_allones_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, -1 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret @@ -476,7 +476,7 @@ define void @splat_allones_v4i32(<4 x i32>* %x) { ; CHECK-LABEL: splat_allones_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, -1 ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret @@ -489,7 +489,7 @@ define void @splat_allones_v2i64(<2 x i64>* %x) { ; CHECK-LABEL: splat_allones_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, -1 ; CHECK-NEXT: vse64.v v8, (a0) ; CHECK-NEXT: ret @@ -503,7 +503,7 @@ ; LMULMAX8-LABEL: splat_allones_v32i8: ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: li a1, 32 -; LMULMAX8-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; LMULMAX8-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; LMULMAX8-NEXT: vmv.v.i v8, -1 ; LMULMAX8-NEXT: vse8.v v8, (a0) ; LMULMAX8-NEXT: ret @@ -511,14 +511,14 @@ ; LMULMAX2-LABEL: splat_allones_v32i8: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: li a1, 32 -; LMULMAX2-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; LMULMAX2-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; LMULMAX2-NEXT: vmv.v.i v8, -1 ; LMULMAX2-NEXT: vse8.v v8, (a0) ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: splat_allones_v32i8: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-NEXT: vmv.v.i v8, -1 ; LMULMAX1-NEXT: vse8.v v8, (a0) ; LMULMAX1-NEXT: addi a0, a0, 16 @@ -533,21 +533,21 @@ define void @splat_allones_v16i16(<16 x i16>* %x) { ; LMULMAX8-LABEL: splat_allones_v16i16: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX8-NEXT: vmv.v.i v8, -1 ; LMULMAX8-NEXT: vse16.v v8, (a0) ; LMULMAX8-NEXT: ret ; ; LMULMAX2-LABEL: splat_allones_v16i16: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX2-NEXT: vmv.v.i v8, -1 ; LMULMAX2-NEXT: vse16.v v8, (a0) ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: splat_allones_v16i16: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-NEXT: vmv.v.i v8, -1 ; LMULMAX1-NEXT: vse16.v v8, (a0) ; LMULMAX1-NEXT: addi a0, a0, 16 @@ -562,21 +562,21 @@ define void @splat_allones_v8i32(<8 x i32>* %x) { ; LMULMAX8-LABEL: splat_allones_v8i32: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX8-NEXT: vmv.v.i v8, -1 ; LMULMAX8-NEXT: vse32.v v8, (a0) ; LMULMAX8-NEXT: ret ; ; LMULMAX2-LABEL: splat_allones_v8i32: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vmv.v.i v8, -1 ; LMULMAX2-NEXT: vse32.v v8, (a0) ; LMULMAX2-NEXT: ret ; ; LMULMAX1-LABEL: splat_allones_v8i32: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vmv.v.i v8, -1 ; LMULMAX1-NEXT: vse32.v v8, (a0) ; LMULMAX1-NEXT: addi a0, a0, 16 @@ -591,21 +591,21 @@ define void @splat_allones_v4i64(<4 x i64>* %x) { ; LMULMAX8-LABEL: splat_allones_v4i64: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX8-NEXT: vmv.v.i v8, -1 ; LMULMAX8-NEXT: vse64.v v8, (a0) ; LMULMAX8-NEXT: ret ; ; LMULMAX2-LABEL: splat_allones_v4i64: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-NEXT: vmv.v.i v8, -1 ; LMULMAX2-NEXT: vse64.v v8, (a0) ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: splat_allones_v4i64: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.i v8, -1 ; LMULMAX1-RV32-NEXT: vse32.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a0, a0, 16 @@ -614,7 +614,7 @@ ; ; LMULMAX1-RV64-LABEL: splat_allones_v4i64: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV64-NEXT: vmv.v.i v8, -1 ; LMULMAX1-RV64-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a0, a0, 16 @@ -633,7 +633,7 @@ define void @splat_allones_with_use_v4i64(<4 x i64>* %x) { ; LMULMAX8-LABEL: splat_allones_with_use_v4i64: ; LMULMAX8: # %bb.0: -; LMULMAX8-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX8-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX8-NEXT: vle64.v v8, (a0) ; LMULMAX8-NEXT: vadd.vi v8, v8, -1 ; LMULMAX8-NEXT: vse64.v v8, (a0) @@ -641,7 +641,7 @@ ; ; LMULMAX2-LABEL: splat_allones_with_use_v4i64: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-NEXT: vle64.v v8, (a0) ; LMULMAX2-NEXT: vadd.vi v8, v8, -1 ; LMULMAX2-NEXT: vse64.v v8, (a0) @@ -649,13 +649,13 @@ ; ; LMULMAX1-RV32-LABEL: splat_allones_with_use_v4i64: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a1, a0, 16 ; LMULMAX1-RV32-NEXT: vle64.v v9, (a1) -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.i v10, -1 -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vadd.vv v9, v9, v10 ; LMULMAX1-RV32-NEXT: vadd.vv v8, v8, v10 ; LMULMAX1-RV32-NEXT: vse64.v v8, (a0) @@ -664,7 +664,7 @@ ; ; LMULMAX1-RV64-LABEL: splat_allones_with_use_v4i64: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV64-NEXT: addi a1, a0, 16 ; LMULMAX1-RV64-NEXT: vle64.v v8, (a1) ; LMULMAX1-RV64-NEXT: vle64.v v9, (a0) @@ -688,7 +688,7 @@ ; LMULMAX8-RV32: # %bb.0: ; LMULMAX8-RV32-NEXT: addi sp, sp, -16 ; LMULMAX8-RV32-NEXT: .cfi_def_cfa_offset 16 -; LMULMAX8-RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; LMULMAX8-RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; LMULMAX8-RV32-NEXT: vle64.v v8, (a0) ; LMULMAX8-RV32-NEXT: sw a2, 12(sp) ; LMULMAX8-RV32-NEXT: sw a1, 8(sp) @@ -702,7 +702,7 @@ ; LMULMAX2-RV32-LABEL: vadd_vx_v16i64: ; LMULMAX2-RV32: # %bb.0: ; LMULMAX2-RV32-NEXT: addi a4, a0, 64 -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV32-NEXT: vle64.v v8, (a4) ; LMULMAX2-RV32-NEXT: addi a4, a0, 96 ; LMULMAX2-RV32-NEXT: vle64.v v10, (a4) @@ -711,10 +711,10 @@ ; LMULMAX2-RV32-NEXT: vle64.v v14, (a0) ; LMULMAX2-RV32-NEXT: li a0, 85 ; LMULMAX2-RV32-NEXT: vmv.s.x v0, a0 -; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-RV32-NEXT: vmv.v.x v16, a2 ; LMULMAX2-RV32-NEXT: vmerge.vxm v16, v16, a1, v0 -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV32-NEXT: vadd.vv v14, v14, v16 ; LMULMAX2-RV32-NEXT: vadd.vv v12, v12, v16 ; LMULMAX2-RV32-NEXT: vadd.vv v10, v10, v16 @@ -731,7 +731,7 @@ ; LMULMAX1-RV32-LABEL: vadd_vx_v16i64: ; LMULMAX1-RV32: # %bb.0: ; LMULMAX1-RV32-NEXT: addi a4, a0, 96 -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle64.v v8, (a4) ; LMULMAX1-RV32-NEXT: addi a4, a0, 112 ; LMULMAX1-RV32-NEXT: vle64.v v9, (a4) @@ -748,10 +748,10 @@ ; LMULMAX1-RV32-NEXT: vle64.v v15, (a0) ; LMULMAX1-RV32-NEXT: li a0, 5 ; LMULMAX1-RV32-NEXT: vmv.s.x v0, a0 -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.x v16, a2 ; LMULMAX1-RV32-NEXT: vmerge.vxm v16, v16, a1, v0 -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vadd.vv v15, v15, v16 ; LMULMAX1-RV32-NEXT: vadd.vv v14, v14, v16 ; LMULMAX1-RV32-NEXT: vadd.vv v13, v13, v16 @@ -779,7 +779,7 @@ ; ; LMULMAX8-RV64-LABEL: vadd_vx_v16i64: ; LMULMAX8-RV64: # %bb.0: -; LMULMAX8-RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; LMULMAX8-RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; LMULMAX8-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX8-RV64-NEXT: vadd.vx v8, v8, a1 ; LMULMAX8-RV64-NEXT: vse64.v v8, (a2) @@ -787,7 +787,7 @@ ; ; LMULMAX2-RV64-LABEL: vadd_vx_v16i64: ; LMULMAX2-RV64: # %bb.0: -; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV64-NEXT: addi a3, a0, 96 ; LMULMAX2-RV64-NEXT: vle64.v v8, (a3) ; LMULMAX2-RV64-NEXT: addi a3, a0, 32 @@ -810,7 +810,7 @@ ; ; LMULMAX1-RV64-LABEL: vadd_vx_v16i64: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a3, a0, 96 ; LMULMAX1-RV64-NEXT: vle64.v v9, (a3) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll @@ -8,7 +8,7 @@ ; CHECK-LABEL: gather_const_v16i8: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a1, a0, 12 -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vlse8.v v8, (a1), zero ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret @@ -24,7 +24,7 @@ ; CHECK-LABEL: gather_const_v8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a1, a0, 10 -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vlse16.v v8, (a1), zero ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret @@ -40,7 +40,7 @@ ; CHECK-LABEL: gather_const_v4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a1, a0, 12 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vlse32.v v8, (a1), zero ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret @@ -56,7 +56,7 @@ ; CHECK-LABEL: gather_const_v2i64: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a1, a0, 8 -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v8, (a1), zero ; CHECK-NEXT: vse64.v v8, (a0) ; CHECK-NEXT: ret @@ -73,7 +73,7 @@ ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: li a1, 64 ; LMULMAX4-NEXT: addi a2, a0, 32 -; LMULMAX4-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; LMULMAX4-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; LMULMAX4-NEXT: vlse8.v v8, (a2), zero ; LMULMAX4-NEXT: vse8.v v8, (a0) ; LMULMAX4-NEXT: ret @@ -81,7 +81,7 @@ ; LMULMAX1-LABEL: gather_const_v64i8: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: addi a1, a0, 32 -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-NEXT: vlse8.v v8, (a1), zero ; LMULMAX1-NEXT: addi a2, a0, 16 ; LMULMAX1-NEXT: addi a3, a0, 48 @@ -103,7 +103,7 @@ ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: li a1, 32 ; LMULMAX4-NEXT: addi a2, a0, 50 -; LMULMAX4-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; LMULMAX4-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; LMULMAX4-NEXT: vlse16.v v8, (a2), zero ; LMULMAX4-NEXT: vse16.v v8, (a0) ; LMULMAX4-NEXT: ret @@ -111,7 +111,7 @@ ; LMULMAX1-LABEL: gather_const_v16i16: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: addi a1, a0, 50 -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-NEXT: vlse16.v v8, (a1), zero ; LMULMAX1-NEXT: addi a1, a0, 48 ; LMULMAX1-NEXT: addi a2, a0, 16 @@ -133,7 +133,7 @@ ; LMULMAX4-LABEL: gather_const_v16i32: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: addi a1, a0, 36 -; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; LMULMAX4-NEXT: vlse32.v v8, (a1), zero ; LMULMAX4-NEXT: vse32.v v8, (a0) ; LMULMAX4-NEXT: ret @@ -141,7 +141,7 @@ ; LMULMAX1-LABEL: gather_const_v16i32: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: addi a1, a0, 36 -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vlse32.v v8, (a1), zero ; LMULMAX1-NEXT: addi a1, a0, 32 ; LMULMAX1-NEXT: addi a2, a0, 16 @@ -163,7 +163,7 @@ ; LMULMAX4-LABEL: gather_const_v8i64: ; LMULMAX4: # %bb.0: ; LMULMAX4-NEXT: addi a1, a0, 24 -; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; LMULMAX4-NEXT: vlse64.v v8, (a1), zero ; LMULMAX4-NEXT: vse64.v v8, (a0) ; LMULMAX4-NEXT: ret @@ -171,7 +171,7 @@ ; LMULMAX1-LABEL: gather_const_v8i64: ; LMULMAX1: # %bb.0: ; LMULMAX1-NEXT: addi a1, a0, 24 -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vlse64.v v8, (a1), zero ; LMULMAX1-NEXT: addi a1, a0, 16 ; LMULMAX1-NEXT: addi a2, a0, 48 @@ -193,7 +193,7 @@ ; CHECK-LABEL: splat_concat_low: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a0, a0, 2 -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vlse16.v v8, (a0), zero ; CHECK-NEXT: vse16.v v8, (a2) ; CHECK-NEXT: ret @@ -209,7 +209,7 @@ ; CHECK-LABEL: splat_concat_high: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a0, a1, 2 -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vlse16.v v8, (a0), zero ; CHECK-NEXT: vse16.v v8, (a2) ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll @@ -9,7 +9,7 @@ define void @add_v16i8(<16 x i8>* %x, <16 x i8>* %y) { ; CHECK-LABEL: add_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vadd.vv v8, v8, v9 @@ -25,7 +25,7 @@ define void @add_v8i16(<8 x i16>* %x, <8 x i16>* %y) { ; CHECK-LABEL: add_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vadd.vv v8, v8, v9 @@ -41,7 +41,7 @@ define void @add_v4i32(<4 x i32>* %x, <4 x i32>* %y) { ; CHECK-LABEL: add_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vadd.vv v8, v8, v9 @@ -57,7 +57,7 @@ define void @add_v2i64(<2 x i64>* %x, <2 x i64>* %y) { ; CHECK-LABEL: add_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vadd.vv v8, v8, v9 @@ -73,7 +73,7 @@ define void @sub_v16i8(<16 x i8>* %x, <16 x i8>* %y) { ; CHECK-LABEL: sub_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vsub.vv v8, v8, v9 @@ -89,7 +89,7 @@ define void @sub_v8i16(<8 x i16>* %x, <8 x i16>* %y) { ; CHECK-LABEL: sub_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vsub.vv v8, v8, v9 @@ -105,7 +105,7 @@ define void @sub_v4i32(<4 x i32>* %x, <4 x i32>* %y) { ; CHECK-LABEL: sub_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vsub.vv v8, v8, v9 @@ -121,7 +121,7 @@ define void @sub_v2i64(<2 x i64>* %x, <2 x i64>* %y) { ; CHECK-LABEL: sub_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vsub.vv v8, v8, v9 @@ -137,7 +137,7 @@ define void @mul_v16i8(<16 x i8>* %x, <16 x i8>* %y) { ; CHECK-LABEL: mul_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vmul.vv v8, v8, v9 @@ -153,7 +153,7 @@ define void @mul_v8i16(<8 x i16>* %x, <8 x i16>* %y) { ; CHECK-LABEL: mul_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vmul.vv v8, v8, v9 @@ -169,7 +169,7 @@ define void @mul_v4i32(<4 x i32>* %x, <4 x i32>* %y) { ; CHECK-LABEL: mul_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vmul.vv v8, v8, v9 @@ -185,7 +185,7 @@ define void @mul_v2i64(<2 x i64>* %x, <2 x i64>* %y) { ; CHECK-LABEL: mul_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vmul.vv v8, v8, v9 @@ -201,7 +201,7 @@ define void @and_v16i8(<16 x i8>* %x, <16 x i8>* %y) { ; CHECK-LABEL: and_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vand.vv v8, v8, v9 @@ -217,7 +217,7 @@ define void @and_v8i16(<8 x i16>* %x, <8 x i16>* %y) { ; CHECK-LABEL: and_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vand.vv v8, v8, v9 @@ -233,7 +233,7 @@ define void @and_v4i32(<4 x i32>* %x, <4 x i32>* %y) { ; CHECK-LABEL: and_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vand.vv v8, v8, v9 @@ -249,7 +249,7 @@ define void @and_v2i64(<2 x i64>* %x, <2 x i64>* %y) { ; CHECK-LABEL: and_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vand.vv v8, v8, v9 @@ -265,7 +265,7 @@ define void @or_v16i8(<16 x i8>* %x, <16 x i8>* %y) { ; CHECK-LABEL: or_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vor.vv v8, v8, v9 @@ -281,7 +281,7 @@ define void @or_v8i16(<8 x i16>* %x, <8 x i16>* %y) { ; CHECK-LABEL: or_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vor.vv v8, v8, v9 @@ -297,7 +297,7 @@ define void @or_v4i32(<4 x i32>* %x, <4 x i32>* %y) { ; CHECK-LABEL: or_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vor.vv v8, v8, v9 @@ -313,7 +313,7 @@ define void @or_v2i64(<2 x i64>* %x, <2 x i64>* %y) { ; CHECK-LABEL: or_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vor.vv v8, v8, v9 @@ -329,7 +329,7 @@ define void @xor_v16i8(<16 x i8>* %x, <16 x i8>* %y) { ; CHECK-LABEL: xor_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vxor.vv v8, v8, v9 @@ -345,7 +345,7 @@ define void @xor_v8i16(<8 x i16>* %x, <8 x i16>* %y) { ; CHECK-LABEL: xor_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vxor.vv v8, v8, v9 @@ -361,7 +361,7 @@ define void @xor_v4i32(<4 x i32>* %x, <4 x i32>* %y) { ; CHECK-LABEL: xor_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vxor.vv v8, v8, v9 @@ -377,7 +377,7 @@ define void @xor_v2i64(<2 x i64>* %x, <2 x i64>* %y) { ; CHECK-LABEL: xor_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vxor.vv v8, v8, v9 @@ -393,7 +393,7 @@ define void @lshr_v16i8(<16 x i8>* %x, <16 x i8>* %y) { ; CHECK-LABEL: lshr_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vsrl.vv v8, v8, v9 @@ -409,7 +409,7 @@ define void @lshr_v8i16(<8 x i16>* %x, <8 x i16>* %y) { ; CHECK-LABEL: lshr_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vsrl.vv v8, v8, v9 @@ -425,7 +425,7 @@ define void @lshr_v4i32(<4 x i32>* %x, <4 x i32>* %y) { ; CHECK-LABEL: lshr_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vsrl.vv v8, v8, v9 @@ -441,7 +441,7 @@ define void @lshr_v2i64(<2 x i64>* %x, <2 x i64>* %y) { ; CHECK-LABEL: lshr_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vsrl.vv v8, v8, v9 @@ -457,7 +457,7 @@ define void @ashr_v16i8(<16 x i8>* %x, <16 x i8>* %y) { ; CHECK-LABEL: ashr_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vsra.vv v8, v8, v9 @@ -473,7 +473,7 @@ define void @ashr_v8i16(<8 x i16>* %x, <8 x i16>* %y) { ; CHECK-LABEL: ashr_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vsra.vv v8, v8, v9 @@ -489,7 +489,7 @@ define void @ashr_v4i32(<4 x i32>* %x, <4 x i32>* %y) { ; CHECK-LABEL: ashr_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vsra.vv v8, v8, v9 @@ -505,7 +505,7 @@ define void @ashr_v2i64(<2 x i64>* %x, <2 x i64>* %y) { ; CHECK-LABEL: ashr_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vsra.vv v8, v8, v9 @@ -521,7 +521,7 @@ define void @shl_v16i8(<16 x i8>* %x, <16 x i8>* %y) { ; CHECK-LABEL: shl_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vsll.vv v8, v8, v9 @@ -537,7 +537,7 @@ define void @shl_v8i16(<8 x i16>* %x, <8 x i16>* %y) { ; CHECK-LABEL: shl_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vsll.vv v8, v8, v9 @@ -553,7 +553,7 @@ define void @shl_v4i32(<4 x i32>* %x, <4 x i32>* %y) { ; CHECK-LABEL: shl_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vsll.vv v8, v8, v9 @@ -569,7 +569,7 @@ define void @shl_v2i64(<2 x i64>* %x, <2 x i64>* %y) { ; CHECK-LABEL: shl_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vsll.vv v8, v8, v9 @@ -585,7 +585,7 @@ define void @sdiv_v16i8(<16 x i8>* %x, <16 x i8>* %y) { ; CHECK-LABEL: sdiv_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vdiv.vv v8, v8, v9 @@ -601,7 +601,7 @@ define void @sdiv_v8i16(<8 x i16>* %x, <8 x i16>* %y) { ; CHECK-LABEL: sdiv_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vdiv.vv v8, v8, v9 @@ -617,7 +617,7 @@ define void @sdiv_v4i32(<4 x i32>* %x, <4 x i32>* %y) { ; CHECK-LABEL: sdiv_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vdiv.vv v8, v8, v9 @@ -633,7 +633,7 @@ define void @sdiv_v2i64(<2 x i64>* %x, <2 x i64>* %y) { ; CHECK-LABEL: sdiv_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vdiv.vv v8, v8, v9 @@ -649,7 +649,7 @@ define void @srem_v16i8(<16 x i8>* %x, <16 x i8>* %y) { ; CHECK-LABEL: srem_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vrem.vv v8, v8, v9 @@ -665,7 +665,7 @@ define void @srem_v8i16(<8 x i16>* %x, <8 x i16>* %y) { ; CHECK-LABEL: srem_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vrem.vv v8, v8, v9 @@ -681,7 +681,7 @@ define void @srem_v4i32(<4 x i32>* %x, <4 x i32>* %y) { ; CHECK-LABEL: srem_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vrem.vv v8, v8, v9 @@ -697,7 +697,7 @@ define void @srem_v2i64(<2 x i64>* %x, <2 x i64>* %y) { ; CHECK-LABEL: srem_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vrem.vv v8, v8, v9 @@ -713,7 +713,7 @@ define void @udiv_v16i8(<16 x i8>* %x, <16 x i8>* %y) { ; CHECK-LABEL: udiv_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vdivu.vv v8, v8, v9 @@ -729,7 +729,7 @@ define void @udiv_v8i16(<8 x i16>* %x, <8 x i16>* %y) { ; CHECK-LABEL: udiv_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vdivu.vv v8, v8, v9 @@ -745,7 +745,7 @@ define void @udiv_v4i32(<4 x i32>* %x, <4 x i32>* %y) { ; CHECK-LABEL: udiv_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vdivu.vv v8, v8, v9 @@ -761,7 +761,7 @@ define void @udiv_v2i64(<2 x i64>* %x, <2 x i64>* %y) { ; CHECK-LABEL: udiv_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vdivu.vv v8, v8, v9 @@ -777,7 +777,7 @@ define void @urem_v16i8(<16 x i8>* %x, <16 x i8>* %y) { ; CHECK-LABEL: urem_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vremu.vv v8, v8, v9 @@ -793,7 +793,7 @@ define void @urem_v8i16(<8 x i16>* %x, <8 x i16>* %y) { ; CHECK-LABEL: urem_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vremu.vv v8, v8, v9 @@ -809,7 +809,7 @@ define void @urem_v4i32(<4 x i32>* %x, <4 x i32>* %y) { ; CHECK-LABEL: urem_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vremu.vv v8, v8, v9 @@ -825,7 +825,7 @@ define void @urem_v2i64(<2 x i64>* %x, <2 x i64>* %y) { ; CHECK-LABEL: urem_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vremu.vv v8, v8, v9 @@ -841,38 +841,38 @@ define void @mulhu_v16i8(<16 x i8>* %x) { ; RV32-LABEL: mulhu_v16i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV32-NEXT: vle8.v v8, (a0) ; RV32-NEXT: li a1, 513 -; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; RV32-NEXT: vmv.s.x v0, a1 -; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV32-NEXT: vmv.v.i v9, 4 ; RV32-NEXT: vmerge.vim v9, v9, 1, v0 ; RV32-NEXT: lui a1, 1 ; RV32-NEXT: addi a2, a1, 78 -; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; RV32-NEXT: vmv.s.x v0, a2 -; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV32-NEXT: vmerge.vim v9, v9, 3, v0 ; RV32-NEXT: lui a2, 8 ; RV32-NEXT: addi a2, a2, 304 -; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; RV32-NEXT: vmv.s.x v0, a2 -; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV32-NEXT: vmerge.vim v9, v9, 2, v0 ; RV32-NEXT: lui a2, 3 ; RV32-NEXT: addi a2, a2, -2044 -; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; RV32-NEXT: vmv.s.x v0, a2 -; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV32-NEXT: vmv.v.i v10, 0 ; RV32-NEXT: li a2, -128 ; RV32-NEXT: vmerge.vxm v11, v10, a2, v0 ; RV32-NEXT: addi a1, a1, 32 -; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; RV32-NEXT: vmv.s.x v0, a1 -; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV32-NEXT: lui a1, %hi(.LCPI52_0) ; RV32-NEXT: addi a1, a1, %lo(.LCPI52_0) ; RV32-NEXT: vle8.v v12, (a1) @@ -888,38 +888,38 @@ ; ; RV64-LABEL: mulhu_v16i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV64-NEXT: vle8.v v8, (a0) ; RV64-NEXT: li a1, 513 -; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; RV64-NEXT: vmv.s.x v0, a1 -; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV64-NEXT: vmv.v.i v9, 4 ; RV64-NEXT: vmerge.vim v9, v9, 1, v0 ; RV64-NEXT: lui a1, 1 ; RV64-NEXT: addiw a2, a1, 78 -; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; RV64-NEXT: vmv.s.x v0, a2 -; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV64-NEXT: vmerge.vim v9, v9, 3, v0 ; RV64-NEXT: lui a2, 8 ; RV64-NEXT: addiw a2, a2, 304 -; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; RV64-NEXT: vmv.s.x v0, a2 -; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV64-NEXT: vmerge.vim v9, v9, 2, v0 ; RV64-NEXT: lui a2, 3 ; RV64-NEXT: addiw a2, a2, -2044 -; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; RV64-NEXT: vmv.s.x v0, a2 -; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV64-NEXT: vmv.v.i v10, 0 ; RV64-NEXT: li a2, -128 ; RV64-NEXT: vmerge.vxm v11, v10, a2, v0 ; RV64-NEXT: addiw a1, a1, 32 -; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; RV64-NEXT: vmv.s.x v0, a1 -; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV64-NEXT: lui a1, %hi(.LCPI52_0) ; RV64-NEXT: addi a1, a1, %lo(.LCPI52_0) ; RV64-NEXT: vle8.v v12, (a1) @@ -941,7 +941,7 @@ define void @mulhu_v8i16(<8 x i16>* %x) { ; CHECK-LABEL: mulhu_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: li a1, 1 ; CHECK-NEXT: vmv.s.x v9, a1 @@ -949,17 +949,17 @@ ; CHECK-NEXT: vmv.s.x v0, a1 ; CHECK-NEXT: vmv.v.i v10, 3 ; CHECK-NEXT: vmerge.vim v10, v10, 2, v0 -; CHECK-NEXT: vsetivli zero, 7, e16, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 7, e16, m1, tu, ma ; CHECK-NEXT: vslideup.vi v10, v9, 6 -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.i v11, 0 ; CHECK-NEXT: lui a1, 1048568 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, ma ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vmv.s.x v12, a1 -; CHECK-NEXT: vsetivli zero, 7, e16, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 7, e16, m1, tu, ma ; CHECK-NEXT: vslideup.vi v11, v9, 6 -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: lui a1, %hi(.LCPI53_0) ; CHECK-NEXT: addi a1, a1, %lo(.LCPI53_0) ; CHECK-NEXT: vle16.v v9, (a1) @@ -980,16 +980,16 @@ define void @mulhu_v4i32(<4 x i32>* %x) { ; CHECK-LABEL: mulhu_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: lui a1, 524288 ; CHECK-NEXT: vmv.s.x v9, a1 ; CHECK-NEXT: vmv.v.i v10, 0 -; CHECK-NEXT: vsetivli zero, 3, e32, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 3, e32, m1, tu, ma ; CHECK-NEXT: vslideup.vi v10, v9, 2 ; CHECK-NEXT: lui a1, %hi(.LCPI54_0) ; CHECK-NEXT: addi a1, a1, %lo(.LCPI54_0) -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vmulhu.vv v9, v8, v9 ; CHECK-NEXT: vsub.vv v8, v8, v9 @@ -998,9 +998,9 @@ ; CHECK-NEXT: li a1, 1 ; CHECK-NEXT: vmv.s.x v9, a1 ; CHECK-NEXT: vmv.v.i v10, 2 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, ma ; CHECK-NEXT: vslideup.vi v10, v9, 3 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v10 ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret @@ -1013,35 +1013,35 @@ define void @mulhu_v2i64(<2 x i64>* %x) { ; RV32-LABEL: mulhu_v2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: lui a1, %hi(.LCPI55_0) ; RV32-NEXT: addi a1, a1, %lo(.LCPI55_0) -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vle32.v v9, (a1) -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vmulhu.vv v8, v8, v9 ; RV32-NEXT: lui a1, %hi(.LCPI55_1) ; RV32-NEXT: addi a1, a1, %lo(.LCPI55_1) -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vle32.v v9, (a1) -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vsrl.vv v8, v8, v9 ; RV32-NEXT: vse64.v v8, (a0) ; RV32-NEXT: ret ; ; RV64-LABEL: mulhu_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: lui a1, %hi(.LCPI55_0) ; RV64-NEXT: addi a1, a1, %lo(.LCPI55_0) ; RV64-NEXT: vlse64.v v8, (a1), zero ; RV64-NEXT: lui a1, %hi(.LCPI55_1) ; RV64-NEXT: ld a1, %lo(.LCPI55_1)(a1) ; RV64-NEXT: vle64.v v9, (a0) -; RV64-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; RV64-NEXT: vsetvli zero, zero, e64, m1, tu, ma ; RV64-NEXT: vmv.s.x v8, a1 -; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; RV64-NEXT: vmulhu.vv v8, v9, v8 ; RV64-NEXT: vid.v v9 ; RV64-NEXT: vadd.vi v9, v9, 1 @@ -1057,13 +1057,13 @@ define void @mulhs_v16i8(<16 x i8>* %x) { ; RV32-LABEL: mulhs_v16i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV32-NEXT: vle8.v v8, (a0) ; RV32-NEXT: lui a1, 5 ; RV32-NEXT: addi a1, a1, -1452 -; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; RV32-NEXT: vmv.s.x v0, a1 -; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV32-NEXT: vmv.v.i v9, 7 ; RV32-NEXT: vmerge.vim v9, v9, 1, v0 ; RV32-NEXT: li a1, -123 @@ -1077,13 +1077,13 @@ ; ; RV64-LABEL: mulhs_v16i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV64-NEXT: vle8.v v8, (a0) ; RV64-NEXT: lui a1, 5 ; RV64-NEXT: addiw a1, a1, -1452 -; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; RV64-NEXT: vmv.s.x v0, a1 -; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV64-NEXT: vmv.v.i v9, 7 ; RV64-NEXT: vmerge.vim v9, v9, 1, v0 ; RV64-NEXT: li a1, -123 @@ -1103,7 +1103,7 @@ define void @mulhs_v8i16(<8 x i16>* %x) { ; RV32-LABEL: mulhs_v8i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV32-NEXT: vle16.v v8, (a0) ; RV32-NEXT: li a1, 105 ; RV32-NEXT: vmv.s.x v0, a1 @@ -1122,7 +1122,7 @@ ; ; RV64-LABEL: mulhs_v8i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64-NEXT: vle16.v v8, (a0) ; RV64-NEXT: li a1, 105 ; RV64-NEXT: vmv.s.x v0, a1 @@ -1147,7 +1147,7 @@ define void @mulhs_v4i32(<4 x i32>* %x) { ; RV32-LABEL: mulhs_v4i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: li a1, 5 ; RV32-NEXT: vmv.s.x v0, a1 @@ -1166,13 +1166,13 @@ ; ; RV64-LABEL: mulhs_v4i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: lui a1, %hi(.LCPI58_0) ; RV64-NEXT: addi a1, a1, %lo(.LCPI58_0) -; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vlse64.v v9, (a1), zero -; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64-NEXT: vmulh.vv v8, v8, v9 ; RV64-NEXT: vsra.vi v8, v8, 1 ; RV64-NEXT: vsrl.vi v9, v8, 31 @@ -1188,30 +1188,30 @@ define void @mulhs_v2i64(<2 x i64>* %x) { ; RV32-LABEL: mulhs_v2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: lui a1, 349525 ; RV32-NEXT: addi a2, a1, 1365 -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vmv.v.x v9, a2 ; RV32-NEXT: addi a1, a1, 1366 -; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu +; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, ma ; RV32-NEXT: vmv.s.x v9, a1 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vmulh.vv v9, v8, v9 -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vid.v v10 ; RV32-NEXT: vsrl.vi v10, v10, 1 ; RV32-NEXT: vrsub.vi v10, v10, 0 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vmadd.vv v10, v8, v9 ; RV32-NEXT: li a1, 1 ; RV32-NEXT: vmv.s.x v8, a1 -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vmv.v.i v9, 0 -; RV32-NEXT: vsetivli zero, 3, e32, m1, tu, mu +; RV32-NEXT: vsetivli zero, 3, e32, m1, tu, ma ; RV32-NEXT: vslideup.vi v9, v8, 2 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vsra.vv v8, v10, v9 ; RV32-NEXT: li a1, 63 ; RV32-NEXT: vsrl.vx v9, v10, a1 @@ -1221,16 +1221,16 @@ ; ; RV64-LABEL: mulhs_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: lui a1, %hi(.LCPI59_0) ; RV64-NEXT: addi a1, a1, %lo(.LCPI59_0) ; RV64-NEXT: vlse64.v v8, (a1), zero ; RV64-NEXT: lui a1, %hi(.LCPI59_1) ; RV64-NEXT: ld a1, %lo(.LCPI59_1)(a1) ; RV64-NEXT: vle64.v v9, (a0) -; RV64-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; RV64-NEXT: vsetvli zero, zero, e64, m1, tu, ma ; RV64-NEXT: vmv.s.x v8, a1 -; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; RV64-NEXT: vmulh.vv v8, v9, v8 ; RV64-NEXT: vid.v v10 ; RV64-NEXT: vrsub.vi v11, v10, 0 @@ -1250,7 +1250,7 @@ define void @smin_v16i8(<16 x i8>* %x, <16 x i8>* %y) { ; CHECK-LABEL: smin_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vmin.vv v8, v8, v9 @@ -1267,7 +1267,7 @@ define void @smin_v8i16(<8 x i16>* %x, <8 x i16>* %y) { ; CHECK-LABEL: smin_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vmin.vv v8, v8, v9 @@ -1284,7 +1284,7 @@ define void @smin_v4i32(<4 x i32>* %x, <4 x i32>* %y) { ; CHECK-LABEL: smin_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vmin.vv v8, v8, v9 @@ -1301,7 +1301,7 @@ define void @smin_v2i64(<2 x i64>* %x, <2 x i64>* %y) { ; CHECK-LABEL: smin_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vmin.vv v8, v8, v9 @@ -1318,7 +1318,7 @@ define void @smin_vx_v16i8(<16 x i8>* %x, i8 %y) { ; CHECK-LABEL: smin_vx_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmin.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) @@ -1335,7 +1335,7 @@ define void @smin_vx_v8i16(<8 x i16>* %x, i16 %y) { ; CHECK-LABEL: smin_vx_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmin.vx v8, v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) @@ -1352,7 +1352,7 @@ define void @smin_vx_v4i32(<4 x i32>* %x, i32 %y) { ; CHECK-LABEL: smin_vx_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmin.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) @@ -1369,7 +1369,7 @@ define void @smin_xv_v16i8(<16 x i8>* %x, i8 %y) { ; CHECK-LABEL: smin_xv_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmin.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) @@ -1385,7 +1385,7 @@ define void @smin_xv_v8i16(<8 x i16>* %x, i16 %y) { ; CHECK-LABEL: smin_xv_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmin.vx v8, v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) @@ -1401,7 +1401,7 @@ define void @smin_xv_v4i32(<4 x i32>* %x, i32 %y) { ; CHECK-LABEL: smin_xv_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmin.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) @@ -1417,7 +1417,7 @@ define void @smax_v16i8(<16 x i8>* %x, <16 x i8>* %y) { ; CHECK-LABEL: smax_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vmax.vv v8, v8, v9 @@ -1434,7 +1434,7 @@ define void @smax_v8i16(<8 x i16>* %x, <8 x i16>* %y) { ; CHECK-LABEL: smax_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vmax.vv v8, v8, v9 @@ -1451,7 +1451,7 @@ define void @smax_v4i32(<4 x i32>* %x, <4 x i32>* %y) { ; CHECK-LABEL: smax_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vmax.vv v8, v8, v9 @@ -1468,7 +1468,7 @@ define void @smax_v2i64(<2 x i64>* %x, <2 x i64>* %y) { ; CHECK-LABEL: smax_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vmax.vv v8, v8, v9 @@ -1485,7 +1485,7 @@ define void @smax_vx_v16i8(<16 x i8>* %x, i8 %y) { ; CHECK-LABEL: smax_vx_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmax.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) @@ -1502,7 +1502,7 @@ define void @smax_vx_v8i16(<8 x i16>* %x, i16 %y) { ; CHECK-LABEL: smax_vx_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmax.vx v8, v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) @@ -1519,7 +1519,7 @@ define void @smax_vx_v4i32(<4 x i32>* %x, i32 %y) { ; CHECK-LABEL: smax_vx_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmax.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) @@ -1536,7 +1536,7 @@ define void @smax_xv_v16i8(<16 x i8>* %x, i8 %y) { ; CHECK-LABEL: smax_xv_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmax.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) @@ -1552,7 +1552,7 @@ define void @smax_xv_v8i16(<8 x i16>* %x, i16 %y) { ; CHECK-LABEL: smax_xv_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmax.vx v8, v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) @@ -1568,7 +1568,7 @@ define void @smax_xv_v4i32(<4 x i32>* %x, i32 %y) { ; CHECK-LABEL: smax_xv_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmax.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) @@ -1584,7 +1584,7 @@ define void @umin_v16i8(<16 x i8>* %x, <16 x i8>* %y) { ; CHECK-LABEL: umin_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vminu.vv v8, v8, v9 @@ -1601,7 +1601,7 @@ define void @umin_v8i16(<8 x i16>* %x, <8 x i16>* %y) { ; CHECK-LABEL: umin_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vminu.vv v8, v8, v9 @@ -1618,7 +1618,7 @@ define void @umin_v4i32(<4 x i32>* %x, <4 x i32>* %y) { ; CHECK-LABEL: umin_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vminu.vv v8, v8, v9 @@ -1635,7 +1635,7 @@ define void @umin_v2i64(<2 x i64>* %x, <2 x i64>* %y) { ; CHECK-LABEL: umin_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vminu.vv v8, v8, v9 @@ -1652,7 +1652,7 @@ define void @umin_vx_v16i8(<16 x i8>* %x, i8 %y) { ; CHECK-LABEL: umin_vx_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vminu.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) @@ -1669,7 +1669,7 @@ define void @umin_vx_v8i16(<8 x i16>* %x, i16 %y) { ; CHECK-LABEL: umin_vx_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vminu.vx v8, v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) @@ -1686,7 +1686,7 @@ define void @umin_vx_v4i32(<4 x i32>* %x, i32 %y) { ; CHECK-LABEL: umin_vx_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vminu.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) @@ -1703,7 +1703,7 @@ define void @umin_xv_v16i8(<16 x i8>* %x, i8 %y) { ; CHECK-LABEL: umin_xv_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vminu.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) @@ -1719,7 +1719,7 @@ define void @umin_xv_v8i16(<8 x i16>* %x, i16 %y) { ; CHECK-LABEL: umin_xv_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vminu.vx v8, v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) @@ -1735,7 +1735,7 @@ define void @umin_xv_v4i32(<4 x i32>* %x, i32 %y) { ; CHECK-LABEL: umin_xv_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vminu.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) @@ -1751,7 +1751,7 @@ define void @umax_v16i8(<16 x i8>* %x, <16 x i8>* %y) { ; CHECK-LABEL: umax_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vmaxu.vv v8, v8, v9 @@ -1768,7 +1768,7 @@ define void @umax_v8i16(<8 x i16>* %x, <8 x i16>* %y) { ; CHECK-LABEL: umax_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vmaxu.vv v8, v8, v9 @@ -1785,7 +1785,7 @@ define void @umax_v4i32(<4 x i32>* %x, <4 x i32>* %y) { ; CHECK-LABEL: umax_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vmaxu.vv v8, v8, v9 @@ -1802,7 +1802,7 @@ define void @umax_v2i64(<2 x i64>* %x, <2 x i64>* %y) { ; CHECK-LABEL: umax_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v9, (a1) ; CHECK-NEXT: vmaxu.vv v8, v8, v9 @@ -1819,7 +1819,7 @@ define void @umax_vx_v16i8(<16 x i8>* %x, i8 %y) { ; CHECK-LABEL: umax_vx_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmaxu.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) @@ -1836,7 +1836,7 @@ define void @umax_vx_v8i16(<8 x i16>* %x, i16 %y) { ; CHECK-LABEL: umax_vx_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmaxu.vx v8, v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) @@ -1853,7 +1853,7 @@ define void @umax_vx_v4i32(<4 x i32>* %x, i32 %y) { ; CHECK-LABEL: umax_vx_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmaxu.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) @@ -1870,7 +1870,7 @@ define void @umax_xv_v16i8(<16 x i8>* %x, i8 %y) { ; CHECK-LABEL: umax_xv_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmaxu.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) @@ -1886,7 +1886,7 @@ define void @umax_xv_v8i16(<8 x i16>* %x, i16 %y) { ; CHECK-LABEL: umax_xv_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmaxu.vx v8, v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) @@ -1902,7 +1902,7 @@ define void @umax_xv_v4i32(<4 x i32>* %x, i32 %y) { ; CHECK-LABEL: umax_xv_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmaxu.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) @@ -1919,7 +1919,7 @@ ; LMULMAX2-LABEL: add_v32i8: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: li a2, 32 -; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; LMULMAX2-NEXT: vle8.v v8, (a0) ; LMULMAX2-NEXT: vle8.v v10, (a1) ; LMULMAX2-NEXT: vadd.vv v8, v8, v10 @@ -1928,7 +1928,7 @@ ; ; LMULMAX1-RV32-LABEL: add_v32i8: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle8.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle8.v v9, (a2) @@ -1943,7 +1943,7 @@ ; ; LMULMAX1-RV64-LABEL: add_v32i8: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle8.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle8.v v9, (a2) @@ -1965,7 +1965,7 @@ define void @add_v16i16(<16 x i16>* %x, <16 x i16>* %y) { ; LMULMAX2-LABEL: add_v16i16: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX2-NEXT: vle16.v v8, (a0) ; LMULMAX2-NEXT: vle16.v v10, (a1) ; LMULMAX2-NEXT: vadd.vv v8, v8, v10 @@ -1974,7 +1974,7 @@ ; ; LMULMAX1-RV32-LABEL: add_v16i16: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle16.v v9, (a2) @@ -1989,7 +1989,7 @@ ; ; LMULMAX1-RV64-LABEL: add_v16i16: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle16.v v9, (a2) @@ -2011,7 +2011,7 @@ define void @add_v8i32(<8 x i32>* %x, <8 x i32>* %y) { ; LMULMAX2-LABEL: add_v8i32: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vle32.v v8, (a0) ; LMULMAX2-NEXT: vle32.v v10, (a1) ; LMULMAX2-NEXT: vadd.vv v8, v8, v10 @@ -2020,7 +2020,7 @@ ; ; LMULMAX1-RV32-LABEL: add_v8i32: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle32.v v9, (a2) @@ -2035,7 +2035,7 @@ ; ; LMULMAX1-RV64-LABEL: add_v8i32: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle32.v v9, (a2) @@ -2057,7 +2057,7 @@ define void @add_v4i64(<4 x i64>* %x, <4 x i64>* %y) { ; LMULMAX2-LABEL: add_v4i64: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-NEXT: vle64.v v8, (a0) ; LMULMAX2-NEXT: vle64.v v10, (a1) ; LMULMAX2-NEXT: vadd.vv v8, v8, v10 @@ -2066,7 +2066,7 @@ ; ; LMULMAX1-RV32-LABEL: add_v4i64: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle64.v v9, (a2) @@ -2081,7 +2081,7 @@ ; ; LMULMAX1-RV64-LABEL: add_v4i64: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle64.v v9, (a2) @@ -2104,7 +2104,7 @@ ; LMULMAX2-LABEL: sub_v32i8: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: li a2, 32 -; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; LMULMAX2-NEXT: vle8.v v8, (a0) ; LMULMAX2-NEXT: vle8.v v10, (a1) ; LMULMAX2-NEXT: vsub.vv v8, v8, v10 @@ -2113,7 +2113,7 @@ ; ; LMULMAX1-RV32-LABEL: sub_v32i8: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle8.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle8.v v9, (a2) @@ -2128,7 +2128,7 @@ ; ; LMULMAX1-RV64-LABEL: sub_v32i8: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle8.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle8.v v9, (a2) @@ -2150,7 +2150,7 @@ define void @sub_v16i16(<16 x i16>* %x, <16 x i16>* %y) { ; LMULMAX2-LABEL: sub_v16i16: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX2-NEXT: vle16.v v8, (a0) ; LMULMAX2-NEXT: vle16.v v10, (a1) ; LMULMAX2-NEXT: vsub.vv v8, v8, v10 @@ -2159,7 +2159,7 @@ ; ; LMULMAX1-RV32-LABEL: sub_v16i16: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle16.v v9, (a2) @@ -2174,7 +2174,7 @@ ; ; LMULMAX1-RV64-LABEL: sub_v16i16: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle16.v v9, (a2) @@ -2196,7 +2196,7 @@ define void @sub_v8i32(<8 x i32>* %x, <8 x i32>* %y) { ; LMULMAX2-LABEL: sub_v8i32: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vle32.v v8, (a0) ; LMULMAX2-NEXT: vle32.v v10, (a1) ; LMULMAX2-NEXT: vsub.vv v8, v8, v10 @@ -2205,7 +2205,7 @@ ; ; LMULMAX1-RV32-LABEL: sub_v8i32: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle32.v v9, (a2) @@ -2220,7 +2220,7 @@ ; ; LMULMAX1-RV64-LABEL: sub_v8i32: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle32.v v9, (a2) @@ -2242,7 +2242,7 @@ define void @sub_v4i64(<4 x i64>* %x, <4 x i64>* %y) { ; LMULMAX2-LABEL: sub_v4i64: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-NEXT: vle64.v v8, (a0) ; LMULMAX2-NEXT: vle64.v v10, (a1) ; LMULMAX2-NEXT: vsub.vv v8, v8, v10 @@ -2251,7 +2251,7 @@ ; ; LMULMAX1-RV32-LABEL: sub_v4i64: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle64.v v9, (a2) @@ -2266,7 +2266,7 @@ ; ; LMULMAX1-RV64-LABEL: sub_v4i64: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle64.v v9, (a2) @@ -2289,7 +2289,7 @@ ; LMULMAX2-LABEL: mul_v32i8: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: li a2, 32 -; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; LMULMAX2-NEXT: vle8.v v8, (a0) ; LMULMAX2-NEXT: vle8.v v10, (a1) ; LMULMAX2-NEXT: vmul.vv v8, v8, v10 @@ -2298,7 +2298,7 @@ ; ; LMULMAX1-RV32-LABEL: mul_v32i8: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle8.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle8.v v9, (a2) @@ -2313,7 +2313,7 @@ ; ; LMULMAX1-RV64-LABEL: mul_v32i8: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle8.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle8.v v9, (a2) @@ -2335,7 +2335,7 @@ define void @mul_v16i16(<16 x i16>* %x, <16 x i16>* %y) { ; LMULMAX2-LABEL: mul_v16i16: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX2-NEXT: vle16.v v8, (a0) ; LMULMAX2-NEXT: vle16.v v10, (a1) ; LMULMAX2-NEXT: vmul.vv v8, v8, v10 @@ -2344,7 +2344,7 @@ ; ; LMULMAX1-RV32-LABEL: mul_v16i16: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle16.v v9, (a2) @@ -2359,7 +2359,7 @@ ; ; LMULMAX1-RV64-LABEL: mul_v16i16: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle16.v v9, (a2) @@ -2381,7 +2381,7 @@ define void @mul_v8i32(<8 x i32>* %x, <8 x i32>* %y) { ; LMULMAX2-LABEL: mul_v8i32: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vle32.v v8, (a0) ; LMULMAX2-NEXT: vle32.v v10, (a1) ; LMULMAX2-NEXT: vmul.vv v8, v8, v10 @@ -2390,7 +2390,7 @@ ; ; LMULMAX1-RV32-LABEL: mul_v8i32: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle32.v v9, (a2) @@ -2405,7 +2405,7 @@ ; ; LMULMAX1-RV64-LABEL: mul_v8i32: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle32.v v9, (a2) @@ -2427,7 +2427,7 @@ define void @mul_v4i64(<4 x i64>* %x, <4 x i64>* %y) { ; LMULMAX2-LABEL: mul_v4i64: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-NEXT: vle64.v v8, (a0) ; LMULMAX2-NEXT: vle64.v v10, (a1) ; LMULMAX2-NEXT: vmul.vv v8, v8, v10 @@ -2436,7 +2436,7 @@ ; ; LMULMAX1-RV32-LABEL: mul_v4i64: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle64.v v9, (a2) @@ -2451,7 +2451,7 @@ ; ; LMULMAX1-RV64-LABEL: mul_v4i64: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle64.v v9, (a2) @@ -2474,7 +2474,7 @@ ; LMULMAX2-LABEL: and_v32i8: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: li a2, 32 -; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; LMULMAX2-NEXT: vle8.v v8, (a0) ; LMULMAX2-NEXT: vle8.v v10, (a1) ; LMULMAX2-NEXT: vand.vv v8, v8, v10 @@ -2483,7 +2483,7 @@ ; ; LMULMAX1-RV32-LABEL: and_v32i8: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle8.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle8.v v9, (a2) @@ -2498,7 +2498,7 @@ ; ; LMULMAX1-RV64-LABEL: and_v32i8: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle8.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle8.v v9, (a2) @@ -2520,7 +2520,7 @@ define void @and_v16i16(<16 x i16>* %x, <16 x i16>* %y) { ; LMULMAX2-LABEL: and_v16i16: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX2-NEXT: vle16.v v8, (a0) ; LMULMAX2-NEXT: vle16.v v10, (a1) ; LMULMAX2-NEXT: vand.vv v8, v8, v10 @@ -2529,7 +2529,7 @@ ; ; LMULMAX1-RV32-LABEL: and_v16i16: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle16.v v9, (a2) @@ -2544,7 +2544,7 @@ ; ; LMULMAX1-RV64-LABEL: and_v16i16: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle16.v v9, (a2) @@ -2566,7 +2566,7 @@ define void @and_v8i32(<8 x i32>* %x, <8 x i32>* %y) { ; LMULMAX2-LABEL: and_v8i32: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vle32.v v8, (a0) ; LMULMAX2-NEXT: vle32.v v10, (a1) ; LMULMAX2-NEXT: vand.vv v8, v8, v10 @@ -2575,7 +2575,7 @@ ; ; LMULMAX1-RV32-LABEL: and_v8i32: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle32.v v9, (a2) @@ -2590,7 +2590,7 @@ ; ; LMULMAX1-RV64-LABEL: and_v8i32: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle32.v v9, (a2) @@ -2612,7 +2612,7 @@ define void @and_v4i64(<4 x i64>* %x, <4 x i64>* %y) { ; LMULMAX2-LABEL: and_v4i64: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-NEXT: vle64.v v8, (a0) ; LMULMAX2-NEXT: vle64.v v10, (a1) ; LMULMAX2-NEXT: vand.vv v8, v8, v10 @@ -2621,7 +2621,7 @@ ; ; LMULMAX1-RV32-LABEL: and_v4i64: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle64.v v9, (a2) @@ -2636,7 +2636,7 @@ ; ; LMULMAX1-RV64-LABEL: and_v4i64: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle64.v v9, (a2) @@ -2659,7 +2659,7 @@ ; LMULMAX2-LABEL: or_v32i8: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: li a2, 32 -; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; LMULMAX2-NEXT: vle8.v v8, (a0) ; LMULMAX2-NEXT: vle8.v v10, (a1) ; LMULMAX2-NEXT: vor.vv v8, v8, v10 @@ -2668,7 +2668,7 @@ ; ; LMULMAX1-RV32-LABEL: or_v32i8: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle8.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle8.v v9, (a2) @@ -2683,7 +2683,7 @@ ; ; LMULMAX1-RV64-LABEL: or_v32i8: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle8.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle8.v v9, (a2) @@ -2705,7 +2705,7 @@ define void @or_v16i16(<16 x i16>* %x, <16 x i16>* %y) { ; LMULMAX2-LABEL: or_v16i16: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX2-NEXT: vle16.v v8, (a0) ; LMULMAX2-NEXT: vle16.v v10, (a1) ; LMULMAX2-NEXT: vor.vv v8, v8, v10 @@ -2714,7 +2714,7 @@ ; ; LMULMAX1-RV32-LABEL: or_v16i16: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle16.v v9, (a2) @@ -2729,7 +2729,7 @@ ; ; LMULMAX1-RV64-LABEL: or_v16i16: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle16.v v9, (a2) @@ -2751,7 +2751,7 @@ define void @or_v8i32(<8 x i32>* %x, <8 x i32>* %y) { ; LMULMAX2-LABEL: or_v8i32: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vle32.v v8, (a0) ; LMULMAX2-NEXT: vle32.v v10, (a1) ; LMULMAX2-NEXT: vor.vv v8, v8, v10 @@ -2760,7 +2760,7 @@ ; ; LMULMAX1-RV32-LABEL: or_v8i32: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle32.v v9, (a2) @@ -2775,7 +2775,7 @@ ; ; LMULMAX1-RV64-LABEL: or_v8i32: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle32.v v9, (a2) @@ -2797,7 +2797,7 @@ define void @or_v4i64(<4 x i64>* %x, <4 x i64>* %y) { ; LMULMAX2-LABEL: or_v4i64: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-NEXT: vle64.v v8, (a0) ; LMULMAX2-NEXT: vle64.v v10, (a1) ; LMULMAX2-NEXT: vor.vv v8, v8, v10 @@ -2806,7 +2806,7 @@ ; ; LMULMAX1-RV32-LABEL: or_v4i64: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle64.v v9, (a2) @@ -2821,7 +2821,7 @@ ; ; LMULMAX1-RV64-LABEL: or_v4i64: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle64.v v9, (a2) @@ -2844,7 +2844,7 @@ ; LMULMAX2-LABEL: xor_v32i8: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: li a2, 32 -; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; LMULMAX2-NEXT: vle8.v v8, (a0) ; LMULMAX2-NEXT: vle8.v v10, (a1) ; LMULMAX2-NEXT: vxor.vv v8, v8, v10 @@ -2853,7 +2853,7 @@ ; ; LMULMAX1-RV32-LABEL: xor_v32i8: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle8.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle8.v v9, (a2) @@ -2868,7 +2868,7 @@ ; ; LMULMAX1-RV64-LABEL: xor_v32i8: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle8.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle8.v v9, (a2) @@ -2890,7 +2890,7 @@ define void @xor_v16i16(<16 x i16>* %x, <16 x i16>* %y) { ; LMULMAX2-LABEL: xor_v16i16: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX2-NEXT: vle16.v v8, (a0) ; LMULMAX2-NEXT: vle16.v v10, (a1) ; LMULMAX2-NEXT: vxor.vv v8, v8, v10 @@ -2899,7 +2899,7 @@ ; ; LMULMAX1-RV32-LABEL: xor_v16i16: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle16.v v9, (a2) @@ -2914,7 +2914,7 @@ ; ; LMULMAX1-RV64-LABEL: xor_v16i16: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle16.v v9, (a2) @@ -2936,7 +2936,7 @@ define void @xor_v8i32(<8 x i32>* %x, <8 x i32>* %y) { ; LMULMAX2-LABEL: xor_v8i32: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vle32.v v8, (a0) ; LMULMAX2-NEXT: vle32.v v10, (a1) ; LMULMAX2-NEXT: vxor.vv v8, v8, v10 @@ -2945,7 +2945,7 @@ ; ; LMULMAX1-RV32-LABEL: xor_v8i32: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle32.v v9, (a2) @@ -2960,7 +2960,7 @@ ; ; LMULMAX1-RV64-LABEL: xor_v8i32: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle32.v v9, (a2) @@ -2982,7 +2982,7 @@ define void @xor_v4i64(<4 x i64>* %x, <4 x i64>* %y) { ; LMULMAX2-LABEL: xor_v4i64: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-NEXT: vle64.v v8, (a0) ; LMULMAX2-NEXT: vle64.v v10, (a1) ; LMULMAX2-NEXT: vxor.vv v8, v8, v10 @@ -2991,7 +2991,7 @@ ; ; LMULMAX1-RV32-LABEL: xor_v4i64: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle64.v v9, (a2) @@ -3006,7 +3006,7 @@ ; ; LMULMAX1-RV64-LABEL: xor_v4i64: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle64.v v9, (a2) @@ -3029,7 +3029,7 @@ ; LMULMAX2-LABEL: lshr_v32i8: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: li a2, 32 -; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; LMULMAX2-NEXT: vle8.v v8, (a0) ; LMULMAX2-NEXT: vle8.v v10, (a1) ; LMULMAX2-NEXT: vsrl.vv v8, v8, v10 @@ -3038,7 +3038,7 @@ ; ; LMULMAX1-RV32-LABEL: lshr_v32i8: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle8.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle8.v v9, (a2) @@ -3053,7 +3053,7 @@ ; ; LMULMAX1-RV64-LABEL: lshr_v32i8: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle8.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle8.v v9, (a2) @@ -3075,7 +3075,7 @@ define void @lshr_v16i16(<16 x i16>* %x, <16 x i16>* %y) { ; LMULMAX2-LABEL: lshr_v16i16: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX2-NEXT: vle16.v v8, (a0) ; LMULMAX2-NEXT: vle16.v v10, (a1) ; LMULMAX2-NEXT: vsrl.vv v8, v8, v10 @@ -3084,7 +3084,7 @@ ; ; LMULMAX1-RV32-LABEL: lshr_v16i16: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle16.v v9, (a2) @@ -3099,7 +3099,7 @@ ; ; LMULMAX1-RV64-LABEL: lshr_v16i16: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle16.v v9, (a2) @@ -3121,7 +3121,7 @@ define void @lshr_v8i32(<8 x i32>* %x, <8 x i32>* %y) { ; LMULMAX2-LABEL: lshr_v8i32: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vle32.v v8, (a0) ; LMULMAX2-NEXT: vle32.v v10, (a1) ; LMULMAX2-NEXT: vsrl.vv v8, v8, v10 @@ -3130,7 +3130,7 @@ ; ; LMULMAX1-RV32-LABEL: lshr_v8i32: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle32.v v9, (a2) @@ -3145,7 +3145,7 @@ ; ; LMULMAX1-RV64-LABEL: lshr_v8i32: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle32.v v9, (a2) @@ -3167,7 +3167,7 @@ define void @lshr_v4i64(<4 x i64>* %x, <4 x i64>* %y) { ; LMULMAX2-LABEL: lshr_v4i64: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-NEXT: vle64.v v8, (a0) ; LMULMAX2-NEXT: vle64.v v10, (a1) ; LMULMAX2-NEXT: vsrl.vv v8, v8, v10 @@ -3176,7 +3176,7 @@ ; ; LMULMAX1-RV32-LABEL: lshr_v4i64: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle64.v v9, (a2) @@ -3191,7 +3191,7 @@ ; ; LMULMAX1-RV64-LABEL: lshr_v4i64: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle64.v v9, (a2) @@ -3214,7 +3214,7 @@ ; LMULMAX2-LABEL: ashr_v32i8: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: li a2, 32 -; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; LMULMAX2-NEXT: vle8.v v8, (a0) ; LMULMAX2-NEXT: vle8.v v10, (a1) ; LMULMAX2-NEXT: vsra.vv v8, v8, v10 @@ -3223,7 +3223,7 @@ ; ; LMULMAX1-RV32-LABEL: ashr_v32i8: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle8.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle8.v v9, (a2) @@ -3238,7 +3238,7 @@ ; ; LMULMAX1-RV64-LABEL: ashr_v32i8: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle8.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle8.v v9, (a2) @@ -3260,7 +3260,7 @@ define void @ashr_v16i16(<16 x i16>* %x, <16 x i16>* %y) { ; LMULMAX2-LABEL: ashr_v16i16: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX2-NEXT: vle16.v v8, (a0) ; LMULMAX2-NEXT: vle16.v v10, (a1) ; LMULMAX2-NEXT: vsra.vv v8, v8, v10 @@ -3269,7 +3269,7 @@ ; ; LMULMAX1-RV32-LABEL: ashr_v16i16: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle16.v v9, (a2) @@ -3284,7 +3284,7 @@ ; ; LMULMAX1-RV64-LABEL: ashr_v16i16: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle16.v v9, (a2) @@ -3306,7 +3306,7 @@ define void @ashr_v8i32(<8 x i32>* %x, <8 x i32>* %y) { ; LMULMAX2-LABEL: ashr_v8i32: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vle32.v v8, (a0) ; LMULMAX2-NEXT: vle32.v v10, (a1) ; LMULMAX2-NEXT: vsra.vv v8, v8, v10 @@ -3315,7 +3315,7 @@ ; ; LMULMAX1-RV32-LABEL: ashr_v8i32: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle32.v v9, (a2) @@ -3330,7 +3330,7 @@ ; ; LMULMAX1-RV64-LABEL: ashr_v8i32: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle32.v v9, (a2) @@ -3352,7 +3352,7 @@ define void @ashr_v4i64(<4 x i64>* %x, <4 x i64>* %y) { ; LMULMAX2-LABEL: ashr_v4i64: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-NEXT: vle64.v v8, (a0) ; LMULMAX2-NEXT: vle64.v v10, (a1) ; LMULMAX2-NEXT: vsra.vv v8, v8, v10 @@ -3361,7 +3361,7 @@ ; ; LMULMAX1-RV32-LABEL: ashr_v4i64: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle64.v v9, (a2) @@ -3376,7 +3376,7 @@ ; ; LMULMAX1-RV64-LABEL: ashr_v4i64: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle64.v v9, (a2) @@ -3399,7 +3399,7 @@ ; LMULMAX2-LABEL: shl_v32i8: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: li a2, 32 -; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; LMULMAX2-NEXT: vle8.v v8, (a0) ; LMULMAX2-NEXT: vle8.v v10, (a1) ; LMULMAX2-NEXT: vsll.vv v8, v8, v10 @@ -3408,7 +3408,7 @@ ; ; LMULMAX1-RV32-LABEL: shl_v32i8: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle8.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle8.v v9, (a2) @@ -3423,7 +3423,7 @@ ; ; LMULMAX1-RV64-LABEL: shl_v32i8: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle8.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle8.v v9, (a2) @@ -3445,7 +3445,7 @@ define void @shl_v16i16(<16 x i16>* %x, <16 x i16>* %y) { ; LMULMAX2-LABEL: shl_v16i16: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX2-NEXT: vle16.v v8, (a0) ; LMULMAX2-NEXT: vle16.v v10, (a1) ; LMULMAX2-NEXT: vsll.vv v8, v8, v10 @@ -3454,7 +3454,7 @@ ; ; LMULMAX1-RV32-LABEL: shl_v16i16: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle16.v v9, (a2) @@ -3469,7 +3469,7 @@ ; ; LMULMAX1-RV64-LABEL: shl_v16i16: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle16.v v9, (a2) @@ -3491,7 +3491,7 @@ define void @shl_v8i32(<8 x i32>* %x, <8 x i32>* %y) { ; LMULMAX2-LABEL: shl_v8i32: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vle32.v v8, (a0) ; LMULMAX2-NEXT: vle32.v v10, (a1) ; LMULMAX2-NEXT: vsll.vv v8, v8, v10 @@ -3500,7 +3500,7 @@ ; ; LMULMAX1-RV32-LABEL: shl_v8i32: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle32.v v9, (a2) @@ -3515,7 +3515,7 @@ ; ; LMULMAX1-RV64-LABEL: shl_v8i32: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle32.v v9, (a2) @@ -3537,7 +3537,7 @@ define void @shl_v4i64(<4 x i64>* %x, <4 x i64>* %y) { ; LMULMAX2-LABEL: shl_v4i64: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-NEXT: vle64.v v8, (a0) ; LMULMAX2-NEXT: vle64.v v10, (a1) ; LMULMAX2-NEXT: vsll.vv v8, v8, v10 @@ -3546,7 +3546,7 @@ ; ; LMULMAX1-RV32-LABEL: shl_v4i64: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle64.v v9, (a2) @@ -3561,7 +3561,7 @@ ; ; LMULMAX1-RV64-LABEL: shl_v4i64: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle64.v v9, (a2) @@ -3584,7 +3584,7 @@ ; LMULMAX2-LABEL: sdiv_v32i8: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: li a2, 32 -; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; LMULMAX2-NEXT: vle8.v v8, (a0) ; LMULMAX2-NEXT: vle8.v v10, (a1) ; LMULMAX2-NEXT: vdiv.vv v8, v8, v10 @@ -3593,7 +3593,7 @@ ; ; LMULMAX1-RV32-LABEL: sdiv_v32i8: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle8.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle8.v v9, (a2) @@ -3608,7 +3608,7 @@ ; ; LMULMAX1-RV64-LABEL: sdiv_v32i8: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle8.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle8.v v9, (a2) @@ -3630,7 +3630,7 @@ define void @sdiv_v16i16(<16 x i16>* %x, <16 x i16>* %y) { ; LMULMAX2-LABEL: sdiv_v16i16: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX2-NEXT: vle16.v v8, (a0) ; LMULMAX2-NEXT: vle16.v v10, (a1) ; LMULMAX2-NEXT: vdiv.vv v8, v8, v10 @@ -3639,7 +3639,7 @@ ; ; LMULMAX1-RV32-LABEL: sdiv_v16i16: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle16.v v9, (a2) @@ -3654,7 +3654,7 @@ ; ; LMULMAX1-RV64-LABEL: sdiv_v16i16: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle16.v v9, (a2) @@ -3676,7 +3676,7 @@ define void @sdiv_v8i32(<8 x i32>* %x, <8 x i32>* %y) { ; LMULMAX2-LABEL: sdiv_v8i32: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vle32.v v8, (a0) ; LMULMAX2-NEXT: vle32.v v10, (a1) ; LMULMAX2-NEXT: vdiv.vv v8, v8, v10 @@ -3685,7 +3685,7 @@ ; ; LMULMAX1-RV32-LABEL: sdiv_v8i32: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle32.v v9, (a2) @@ -3700,7 +3700,7 @@ ; ; LMULMAX1-RV64-LABEL: sdiv_v8i32: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle32.v v9, (a2) @@ -3722,7 +3722,7 @@ define void @sdiv_v4i64(<4 x i64>* %x, <4 x i64>* %y) { ; LMULMAX2-LABEL: sdiv_v4i64: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-NEXT: vle64.v v8, (a0) ; LMULMAX2-NEXT: vle64.v v10, (a1) ; LMULMAX2-NEXT: vdiv.vv v8, v8, v10 @@ -3731,7 +3731,7 @@ ; ; LMULMAX1-RV32-LABEL: sdiv_v4i64: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle64.v v9, (a2) @@ -3746,7 +3746,7 @@ ; ; LMULMAX1-RV64-LABEL: sdiv_v4i64: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle64.v v9, (a2) @@ -3769,7 +3769,7 @@ ; LMULMAX2-LABEL: srem_v32i8: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: li a2, 32 -; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; LMULMAX2-NEXT: vle8.v v8, (a0) ; LMULMAX2-NEXT: vle8.v v10, (a1) ; LMULMAX2-NEXT: vrem.vv v8, v8, v10 @@ -3778,7 +3778,7 @@ ; ; LMULMAX1-RV32-LABEL: srem_v32i8: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle8.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle8.v v9, (a2) @@ -3793,7 +3793,7 @@ ; ; LMULMAX1-RV64-LABEL: srem_v32i8: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle8.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle8.v v9, (a2) @@ -3815,7 +3815,7 @@ define void @srem_v16i16(<16 x i16>* %x, <16 x i16>* %y) { ; LMULMAX2-LABEL: srem_v16i16: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX2-NEXT: vle16.v v8, (a0) ; LMULMAX2-NEXT: vle16.v v10, (a1) ; LMULMAX2-NEXT: vrem.vv v8, v8, v10 @@ -3824,7 +3824,7 @@ ; ; LMULMAX1-RV32-LABEL: srem_v16i16: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle16.v v9, (a2) @@ -3839,7 +3839,7 @@ ; ; LMULMAX1-RV64-LABEL: srem_v16i16: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle16.v v9, (a2) @@ -3861,7 +3861,7 @@ define void @srem_v8i32(<8 x i32>* %x, <8 x i32>* %y) { ; LMULMAX2-LABEL: srem_v8i32: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vle32.v v8, (a0) ; LMULMAX2-NEXT: vle32.v v10, (a1) ; LMULMAX2-NEXT: vrem.vv v8, v8, v10 @@ -3870,7 +3870,7 @@ ; ; LMULMAX1-RV32-LABEL: srem_v8i32: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle32.v v9, (a2) @@ -3885,7 +3885,7 @@ ; ; LMULMAX1-RV64-LABEL: srem_v8i32: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle32.v v9, (a2) @@ -3907,7 +3907,7 @@ define void @srem_v4i64(<4 x i64>* %x, <4 x i64>* %y) { ; LMULMAX2-LABEL: srem_v4i64: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-NEXT: vle64.v v8, (a0) ; LMULMAX2-NEXT: vle64.v v10, (a1) ; LMULMAX2-NEXT: vrem.vv v8, v8, v10 @@ -3916,7 +3916,7 @@ ; ; LMULMAX1-RV32-LABEL: srem_v4i64: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle64.v v9, (a2) @@ -3931,7 +3931,7 @@ ; ; LMULMAX1-RV64-LABEL: srem_v4i64: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle64.v v9, (a2) @@ -3954,7 +3954,7 @@ ; LMULMAX2-LABEL: udiv_v32i8: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: li a2, 32 -; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; LMULMAX2-NEXT: vle8.v v8, (a0) ; LMULMAX2-NEXT: vle8.v v10, (a1) ; LMULMAX2-NEXT: vdivu.vv v8, v8, v10 @@ -3963,7 +3963,7 @@ ; ; LMULMAX1-RV32-LABEL: udiv_v32i8: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle8.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle8.v v9, (a2) @@ -3978,7 +3978,7 @@ ; ; LMULMAX1-RV64-LABEL: udiv_v32i8: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle8.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle8.v v9, (a2) @@ -4000,7 +4000,7 @@ define void @udiv_v16i16(<16 x i16>* %x, <16 x i16>* %y) { ; LMULMAX2-LABEL: udiv_v16i16: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX2-NEXT: vle16.v v8, (a0) ; LMULMAX2-NEXT: vle16.v v10, (a1) ; LMULMAX2-NEXT: vdivu.vv v8, v8, v10 @@ -4009,7 +4009,7 @@ ; ; LMULMAX1-RV32-LABEL: udiv_v16i16: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle16.v v9, (a2) @@ -4024,7 +4024,7 @@ ; ; LMULMAX1-RV64-LABEL: udiv_v16i16: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle16.v v9, (a2) @@ -4046,7 +4046,7 @@ define void @udiv_v8i32(<8 x i32>* %x, <8 x i32>* %y) { ; LMULMAX2-LABEL: udiv_v8i32: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vle32.v v8, (a0) ; LMULMAX2-NEXT: vle32.v v10, (a1) ; LMULMAX2-NEXT: vdivu.vv v8, v8, v10 @@ -4055,7 +4055,7 @@ ; ; LMULMAX1-RV32-LABEL: udiv_v8i32: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle32.v v9, (a2) @@ -4070,7 +4070,7 @@ ; ; LMULMAX1-RV64-LABEL: udiv_v8i32: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle32.v v9, (a2) @@ -4092,7 +4092,7 @@ define void @udiv_v4i64(<4 x i64>* %x, <4 x i64>* %y) { ; LMULMAX2-LABEL: udiv_v4i64: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-NEXT: vle64.v v8, (a0) ; LMULMAX2-NEXT: vle64.v v10, (a1) ; LMULMAX2-NEXT: vdivu.vv v8, v8, v10 @@ -4101,7 +4101,7 @@ ; ; LMULMAX1-RV32-LABEL: udiv_v4i64: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle64.v v9, (a2) @@ -4116,7 +4116,7 @@ ; ; LMULMAX1-RV64-LABEL: udiv_v4i64: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle64.v v9, (a2) @@ -4139,7 +4139,7 @@ ; LMULMAX2-LABEL: urem_v32i8: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: li a2, 32 -; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; LMULMAX2-NEXT: vle8.v v8, (a0) ; LMULMAX2-NEXT: vle8.v v10, (a1) ; LMULMAX2-NEXT: vremu.vv v8, v8, v10 @@ -4148,7 +4148,7 @@ ; ; LMULMAX1-RV32-LABEL: urem_v32i8: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle8.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle8.v v9, (a2) @@ -4163,7 +4163,7 @@ ; ; LMULMAX1-RV64-LABEL: urem_v32i8: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle8.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle8.v v9, (a2) @@ -4185,7 +4185,7 @@ define void @urem_v16i16(<16 x i16>* %x, <16 x i16>* %y) { ; LMULMAX2-LABEL: urem_v16i16: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX2-NEXT: vle16.v v8, (a0) ; LMULMAX2-NEXT: vle16.v v10, (a1) ; LMULMAX2-NEXT: vremu.vv v8, v8, v10 @@ -4194,7 +4194,7 @@ ; ; LMULMAX1-RV32-LABEL: urem_v16i16: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle16.v v9, (a2) @@ -4209,7 +4209,7 @@ ; ; LMULMAX1-RV64-LABEL: urem_v16i16: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle16.v v9, (a2) @@ -4231,7 +4231,7 @@ define void @urem_v8i32(<8 x i32>* %x, <8 x i32>* %y) { ; LMULMAX2-LABEL: urem_v8i32: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vle32.v v8, (a0) ; LMULMAX2-NEXT: vle32.v v10, (a1) ; LMULMAX2-NEXT: vremu.vv v8, v8, v10 @@ -4240,7 +4240,7 @@ ; ; LMULMAX1-RV32-LABEL: urem_v8i32: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle32.v v9, (a2) @@ -4255,7 +4255,7 @@ ; ; LMULMAX1-RV64-LABEL: urem_v8i32: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle32.v v9, (a2) @@ -4277,7 +4277,7 @@ define void @urem_v4i64(<4 x i64>* %x, <4 x i64>* %y) { ; LMULMAX2-LABEL: urem_v4i64: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-NEXT: vle64.v v8, (a0) ; LMULMAX2-NEXT: vle64.v v10, (a1) ; LMULMAX2-NEXT: vremu.vv v8, v8, v10 @@ -4286,7 +4286,7 @@ ; ; LMULMAX1-RV32-LABEL: urem_v4i64: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle64.v v9, (a2) @@ -4301,7 +4301,7 @@ ; ; LMULMAX1-RV64-LABEL: urem_v4i64: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle64.v v9, (a2) @@ -4323,7 +4323,7 @@ define void @extract_v4i64(<4 x i64>* %x, <4 x i64>* %y) { ; LMULMAX2-LABEL: extract_v4i64: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-NEXT: vle64.v v8, (a0) ; LMULMAX2-NEXT: vle64.v v10, (a1) ; LMULMAX2-NEXT: vadd.vv v8, v8, v10 @@ -4332,7 +4332,7 @@ ; ; LMULMAX1-LABEL: extract_v4i64: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vle64.v v8, (a0) ; LMULMAX1-NEXT: addi a2, a0, 16 ; LMULMAX1-NEXT: vle64.v v9, (a2) @@ -4357,13 +4357,13 @@ ; LMULMAX2-RV32-LABEL: mulhu_v32i8: ; LMULMAX2-RV32: # %bb.0: ; LMULMAX2-RV32-NEXT: li a1, 32 -; LMULMAX2-RV32-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; LMULMAX2-RV32-NEXT: vle8.v v8, (a0) ; LMULMAX2-RV32-NEXT: lui a2, 66049 ; LMULMAX2-RV32-NEXT: addi a2, a2, 32 -; LMULMAX2-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; LMULMAX2-RV32-NEXT: vmv.s.x v0, a2 -; LMULMAX2-RV32-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; LMULMAX2-RV32-NEXT: lui a2, %hi(.LCPI153_0) ; LMULMAX2-RV32-NEXT: addi a2, a2, %lo(.LCPI153_0) ; LMULMAX2-RV32-NEXT: vle8.v v10, (a2) @@ -4374,31 +4374,31 @@ ; LMULMAX2-RV32-NEXT: vsub.vv v8, v8, v10 ; LMULMAX2-RV32-NEXT: lui a2, 163907 ; LMULMAX2-RV32-NEXT: addi a2, a2, -2044 -; LMULMAX2-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; LMULMAX2-RV32-NEXT: vmv.s.x v0, a2 ; LMULMAX2-RV32-NEXT: li a2, -128 -; LMULMAX2-RV32-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; LMULMAX2-RV32-NEXT: vmerge.vxm v12, v12, a2, v0 ; LMULMAX2-RV32-NEXT: vmulhu.vv v8, v8, v12 ; LMULMAX2-RV32-NEXT: vadd.vv v8, v8, v10 ; LMULMAX2-RV32-NEXT: lui a2, 8208 ; LMULMAX2-RV32-NEXT: addi a2, a2, 513 -; LMULMAX2-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; LMULMAX2-RV32-NEXT: vmv.s.x v0, a2 -; LMULMAX2-RV32-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; LMULMAX2-RV32-NEXT: vmv.v.i v10, 4 ; LMULMAX2-RV32-NEXT: vmerge.vim v10, v10, 1, v0 ; LMULMAX2-RV32-NEXT: lui a2, 66785 ; LMULMAX2-RV32-NEXT: addi a2, a2, 78 -; LMULMAX2-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; LMULMAX2-RV32-NEXT: vmv.s.x v0, a2 -; LMULMAX2-RV32-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; LMULMAX2-RV32-NEXT: vmerge.vim v10, v10, 3, v0 ; LMULMAX2-RV32-NEXT: lui a2, 529160 ; LMULMAX2-RV32-NEXT: addi a2, a2, 304 -; LMULMAX2-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; LMULMAX2-RV32-NEXT: vmv.s.x v0, a2 -; LMULMAX2-RV32-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; LMULMAX2-RV32-NEXT: vmerge.vim v10, v10, 2, v0 ; LMULMAX2-RV32-NEXT: vsrl.vv v8, v8, v10 ; LMULMAX2-RV32-NEXT: vse8.v v8, (a0) @@ -4407,13 +4407,13 @@ ; LMULMAX2-RV64-LABEL: mulhu_v32i8: ; LMULMAX2-RV64: # %bb.0: ; LMULMAX2-RV64-NEXT: li a1, 32 -; LMULMAX2-RV64-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; LMULMAX2-RV64-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; LMULMAX2-RV64-NEXT: vle8.v v8, (a0) ; LMULMAX2-RV64-NEXT: lui a2, 66049 ; LMULMAX2-RV64-NEXT: addiw a2, a2, 32 -; LMULMAX2-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; LMULMAX2-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; LMULMAX2-RV64-NEXT: vmv.s.x v0, a2 -; LMULMAX2-RV64-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; LMULMAX2-RV64-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; LMULMAX2-RV64-NEXT: lui a2, %hi(.LCPI153_0) ; LMULMAX2-RV64-NEXT: addi a2, a2, %lo(.LCPI153_0) ; LMULMAX2-RV64-NEXT: vle8.v v10, (a2) @@ -4424,31 +4424,31 @@ ; LMULMAX2-RV64-NEXT: vsub.vv v8, v8, v10 ; LMULMAX2-RV64-NEXT: lui a2, 163907 ; LMULMAX2-RV64-NEXT: addiw a2, a2, -2044 -; LMULMAX2-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; LMULMAX2-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; LMULMAX2-RV64-NEXT: vmv.s.x v0, a2 ; LMULMAX2-RV64-NEXT: li a2, -128 -; LMULMAX2-RV64-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; LMULMAX2-RV64-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; LMULMAX2-RV64-NEXT: vmerge.vxm v12, v12, a2, v0 ; LMULMAX2-RV64-NEXT: vmulhu.vv v8, v8, v12 ; LMULMAX2-RV64-NEXT: vadd.vv v8, v8, v10 ; LMULMAX2-RV64-NEXT: lui a2, 8208 ; LMULMAX2-RV64-NEXT: addiw a2, a2, 513 -; LMULMAX2-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; LMULMAX2-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; LMULMAX2-RV64-NEXT: vmv.s.x v0, a2 -; LMULMAX2-RV64-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; LMULMAX2-RV64-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; LMULMAX2-RV64-NEXT: vmv.v.i v10, 4 ; LMULMAX2-RV64-NEXT: vmerge.vim v10, v10, 1, v0 ; LMULMAX2-RV64-NEXT: lui a2, 66785 ; LMULMAX2-RV64-NEXT: addiw a2, a2, 78 -; LMULMAX2-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; LMULMAX2-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; LMULMAX2-RV64-NEXT: vmv.s.x v0, a2 -; LMULMAX2-RV64-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; LMULMAX2-RV64-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; LMULMAX2-RV64-NEXT: vmerge.vim v10, v10, 3, v0 ; LMULMAX2-RV64-NEXT: lui a2, 529160 ; LMULMAX2-RV64-NEXT: addiw a2, a2, 304 -; LMULMAX2-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; LMULMAX2-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; LMULMAX2-RV64-NEXT: vmv.s.x v0, a2 -; LMULMAX2-RV64-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; LMULMAX2-RV64-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; LMULMAX2-RV64-NEXT: vmerge.vim v10, v10, 2, v0 ; LMULMAX2-RV64-NEXT: vsrl.vv v8, v8, v10 ; LMULMAX2-RV64-NEXT: vse8.v v8, (a0) @@ -4456,7 +4456,7 @@ ; ; LMULMAX1-LABEL: mulhu_v32i8: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-NEXT: addi a1, a0, 16 ; LMULMAX1-NEXT: vle8.v v8, (a1) ; LMULMAX1-NEXT: lui a2, %hi(.LCPI153_0) @@ -4477,7 +4477,7 @@ define void @mulhu_v16i16(<16 x i16>* %x) { ; LMULMAX2-RV32-LABEL: mulhu_v16i16: ; LMULMAX2-RV32: # %bb.0: -; LMULMAX2-RV32-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX2-RV32-NEXT: vle16.v v10, (a0) ; LMULMAX2-RV32-NEXT: lui a1, 2 ; LMULMAX2-RV32-NEXT: addi a1, a1, 289 @@ -4510,7 +4510,7 @@ ; ; LMULMAX2-RV64-LABEL: mulhu_v16i16: ; LMULMAX2-RV64: # %bb.0: -; LMULMAX2-RV64-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX2-RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX2-RV64-NEXT: vle16.v v10, (a0) ; LMULMAX2-RV64-NEXT: lui a1, 2 ; LMULMAX2-RV64-NEXT: addiw a1, a1, 289 @@ -4543,7 +4543,7 @@ ; ; LMULMAX1-LABEL: mulhu_v16i16: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-NEXT: addi a1, a0, 16 ; LMULMAX1-NEXT: vle16.v v8, (a1) ; LMULMAX1-NEXT: lui a2, %hi(.LCPI154_0) @@ -4564,7 +4564,7 @@ define void @mulhu_v8i32(<8 x i32>* %x) { ; LMULMAX2-LABEL: mulhu_v8i32: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vle32.v v8, (a0) ; LMULMAX2-NEXT: li a1, 68 ; LMULMAX2-NEXT: vmv.s.x v0, a1 @@ -4588,18 +4588,18 @@ ; ; LMULMAX1-RV32-LABEL: mulhu_v8i32: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a1, a0, 16 ; LMULMAX1-RV32-NEXT: vle32.v v9, (a1) ; LMULMAX1-RV32-NEXT: lui a2, 524288 ; LMULMAX1-RV32-NEXT: vmv.s.x v10, a2 ; LMULMAX1-RV32-NEXT: vmv.v.i v11, 0 -; LMULMAX1-RV32-NEXT: vsetivli zero, 3, e32, m1, tu, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 3, e32, m1, tu, ma ; LMULMAX1-RV32-NEXT: vslideup.vi v11, v10, 2 ; LMULMAX1-RV32-NEXT: lui a2, %hi(.LCPI155_0) ; LMULMAX1-RV32-NEXT: addi a2, a2, %lo(.LCPI155_0) -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle32.v v10, (a2) ; LMULMAX1-RV32-NEXT: vmulhu.vv v12, v9, v10 ; LMULMAX1-RV32-NEXT: vsub.vv v9, v9, v12 @@ -4608,9 +4608,9 @@ ; LMULMAX1-RV32-NEXT: li a2, 1 ; LMULMAX1-RV32-NEXT: vmv.s.x v12, a2 ; LMULMAX1-RV32-NEXT: vmv.v.i v13, 2 -; LMULMAX1-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu +; LMULMAX1-RV32-NEXT: vsetvli zero, zero, e32, m1, tu, ma ; LMULMAX1-RV32-NEXT: vslideup.vi v13, v12, 3 -; LMULMAX1-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vsrl.vv v9, v9, v13 ; LMULMAX1-RV32-NEXT: vmulhu.vv v10, v8, v10 ; LMULMAX1-RV32-NEXT: vsub.vv v8, v8, v10 @@ -4623,7 +4623,7 @@ ; ; LMULMAX1-RV64-LABEL: mulhu_v8i32: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV64-NEXT: addi a1, a0, 16 ; LMULMAX1-RV64-NEXT: vle32.v v8, (a1) ; LMULMAX1-RV64-NEXT: lui a2, %hi(.LCPI155_0) @@ -4644,46 +4644,46 @@ define void @mulhu_v4i64(<4 x i64>* %x) { ; LMULMAX2-RV32-LABEL: mulhu_v4i64: ; LMULMAX2-RV32: # %bb.0: -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV32-NEXT: vle64.v v8, (a0) ; LMULMAX2-RV32-NEXT: lui a1, %hi(.LCPI156_0) ; LMULMAX2-RV32-NEXT: addi a1, a1, %lo(.LCPI156_0) -; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-RV32-NEXT: vle32.v v10, (a1) -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV32-NEXT: vmulhu.vv v10, v8, v10 ; LMULMAX2-RV32-NEXT: vsub.vv v8, v8, v10 ; LMULMAX2-RV32-NEXT: lui a1, 524288 ; LMULMAX2-RV32-NEXT: vmv.s.x v12, a1 -; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-RV32-NEXT: vmv.v.i v14, 0 -; LMULMAX2-RV32-NEXT: vsetivli zero, 6, e32, m2, tu, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 6, e32, m2, tu, ma ; LMULMAX2-RV32-NEXT: vslideup.vi v14, v12, 5 -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV32-NEXT: vmulhu.vv v8, v8, v14 ; LMULMAX2-RV32-NEXT: vadd.vv v8, v8, v10 ; LMULMAX2-RV32-NEXT: lui a1, %hi(.LCPI156_1) ; LMULMAX2-RV32-NEXT: addi a1, a1, %lo(.LCPI156_1) -; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-RV32-NEXT: vle32.v v10, (a1) -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV32-NEXT: vsrl.vv v8, v8, v10 ; LMULMAX2-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX2-RV32-NEXT: ret ; ; LMULMAX2-RV64-LABEL: mulhu_v4i64: ; LMULMAX2-RV64: # %bb.0: -; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX2-RV64-NEXT: li a1, -1 ; LMULMAX2-RV64-NEXT: slli a1, a1, 63 ; LMULMAX2-RV64-NEXT: vmv.s.x v10, a1 ; LMULMAX2-RV64-NEXT: vmv.v.i v12, 0 -; LMULMAX2-RV64-NEXT: vsetivli zero, 3, e64, m2, tu, mu +; LMULMAX2-RV64-NEXT: vsetivli zero, 3, e64, m2, tu, ma ; LMULMAX2-RV64-NEXT: vslideup.vi v12, v10, 2 ; LMULMAX2-RV64-NEXT: lui a1, %hi(.LCPI156_0) ; LMULMAX2-RV64-NEXT: addi a1, a1, %lo(.LCPI156_0) -; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV64-NEXT: vle64.v v10, (a1) ; LMULMAX2-RV64-NEXT: vmulhu.vv v10, v8, v10 ; LMULMAX2-RV64-NEXT: lui a1, %hi(.LCPI156_1) @@ -4698,21 +4698,21 @@ ; ; LMULMAX1-RV32-LABEL: mulhu_v4i64: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a1, a0, 16 ; LMULMAX1-RV32-NEXT: vle64.v v9, (a1) ; LMULMAX1-RV32-NEXT: lui a2, %hi(.LCPI156_0) ; LMULMAX1-RV32-NEXT: addi a2, a2, %lo(.LCPI156_0) -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle32.v v10, (a2) -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vdivu.vv v9, v9, v10 ; LMULMAX1-RV32-NEXT: lui a2, %hi(.LCPI156_1) ; LMULMAX1-RV32-NEXT: addi a2, a2, %lo(.LCPI156_1) -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle32.v v10, (a2) -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vdivu.vv v8, v8, v10 ; LMULMAX1-RV32-NEXT: vse64.v v8, (a0) ; LMULMAX1-RV32-NEXT: vse64.v v9, (a1) @@ -4720,24 +4720,24 @@ ; ; LMULMAX1-RV64-LABEL: mulhu_v4i64: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a1, a0, 16 ; LMULMAX1-RV64-NEXT: vle64.v v9, (a1) ; LMULMAX1-RV64-NEXT: vmv.v.i v10, 0 ; LMULMAX1-RV64-NEXT: li a2, -1 ; LMULMAX1-RV64-NEXT: slli a2, a2, 63 -; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, tu, ma ; LMULMAX1-RV64-NEXT: vmv.s.x v10, a2 ; LMULMAX1-RV64-NEXT: lui a2, %hi(.LCPI156_0) ; LMULMAX1-RV64-NEXT: addi a2, a2, %lo(.LCPI156_0) -; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; LMULMAX1-RV64-NEXT: vlse64.v v11, (a2), zero ; LMULMAX1-RV64-NEXT: lui a2, %hi(.LCPI156_1) ; LMULMAX1-RV64-NEXT: ld a2, %lo(.LCPI156_1)(a2) -; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, tu, ma ; LMULMAX1-RV64-NEXT: vmv.s.x v11, a2 -; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; LMULMAX1-RV64-NEXT: vmulhu.vv v11, v9, v11 ; LMULMAX1-RV64-NEXT: vsub.vv v9, v9, v11 ; LMULMAX1-RV64-NEXT: vmulhu.vv v9, v9, v10 @@ -4750,9 +4750,9 @@ ; LMULMAX1-RV64-NEXT: ld a2, %lo(.LCPI156_3)(a2) ; LMULMAX1-RV64-NEXT: vadd.vi v12, v10, 2 ; LMULMAX1-RV64-NEXT: vsrl.vv v9, v9, v12 -; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, tu, ma ; LMULMAX1-RV64-NEXT: vmv.s.x v11, a2 -; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; LMULMAX1-RV64-NEXT: vmulhu.vv v8, v8, v11 ; LMULMAX1-RV64-NEXT: vadd.vi v10, v10, 1 ; LMULMAX1-RV64-NEXT: vsrl.vv v8, v8, v10 @@ -4769,16 +4769,16 @@ ; LMULMAX2-RV32-LABEL: mulhs_v32i8: ; LMULMAX2-RV32: # %bb.0: ; LMULMAX2-RV32-NEXT: li a1, 32 -; LMULMAX2-RV32-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; LMULMAX2-RV32-NEXT: vle8.v v8, (a0) ; LMULMAX2-RV32-NEXT: li a2, -123 ; LMULMAX2-RV32-NEXT: vmv.v.x v10, a2 ; LMULMAX2-RV32-NEXT: lui a2, 304453 ; LMULMAX2-RV32-NEXT: addi a2, a2, -1452 -; LMULMAX2-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; LMULMAX2-RV32-NEXT: vmv.s.x v0, a2 ; LMULMAX2-RV32-NEXT: li a2, 57 -; LMULMAX2-RV32-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; LMULMAX2-RV32-NEXT: vmerge.vxm v10, v10, a2, v0 ; LMULMAX2-RV32-NEXT: vmulhu.vv v8, v8, v10 ; LMULMAX2-RV32-NEXT: vmv.v.i v10, 7 @@ -4790,16 +4790,16 @@ ; LMULMAX2-RV64-LABEL: mulhs_v32i8: ; LMULMAX2-RV64: # %bb.0: ; LMULMAX2-RV64-NEXT: li a1, 32 -; LMULMAX2-RV64-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; LMULMAX2-RV64-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; LMULMAX2-RV64-NEXT: vle8.v v8, (a0) ; LMULMAX2-RV64-NEXT: li a2, -123 ; LMULMAX2-RV64-NEXT: vmv.v.x v10, a2 ; LMULMAX2-RV64-NEXT: lui a2, 304453 ; LMULMAX2-RV64-NEXT: addiw a2, a2, -1452 -; LMULMAX2-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; LMULMAX2-RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; LMULMAX2-RV64-NEXT: vmv.s.x v0, a2 ; LMULMAX2-RV64-NEXT: li a2, 57 -; LMULMAX2-RV64-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; LMULMAX2-RV64-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; LMULMAX2-RV64-NEXT: vmerge.vxm v10, v10, a2, v0 ; LMULMAX2-RV64-NEXT: vmulhu.vv v8, v8, v10 ; LMULMAX2-RV64-NEXT: vmv.v.i v10, 7 @@ -4810,15 +4810,15 @@ ; ; LMULMAX1-RV32-LABEL: mulhs_v32i8: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle8.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a1, a0, 16 ; LMULMAX1-RV32-NEXT: vle8.v v9, (a1) ; LMULMAX1-RV32-NEXT: lui a2, 5 ; LMULMAX1-RV32-NEXT: addi a2, a2, -1452 -; LMULMAX1-RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; LMULMAX1-RV32-NEXT: vmv.s.x v0, a2 -; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.i v10, -9 ; LMULMAX1-RV32-NEXT: vmerge.vim v10, v10, 9, v0 ; LMULMAX1-RV32-NEXT: vdivu.vv v9, v9, v10 @@ -4829,15 +4829,15 @@ ; ; LMULMAX1-RV64-LABEL: mulhs_v32i8: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle8.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a1, a0, 16 ; LMULMAX1-RV64-NEXT: vle8.v v9, (a1) ; LMULMAX1-RV64-NEXT: lui a2, 5 ; LMULMAX1-RV64-NEXT: addiw a2, a2, -1452 -; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; LMULMAX1-RV64-NEXT: vmv.s.x v0, a2 -; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-RV64-NEXT: vmv.v.i v10, -9 ; LMULMAX1-RV64-NEXT: vmerge.vim v10, v10, 9, v0 ; LMULMAX1-RV64-NEXT: vdivu.vv v9, v9, v10 @@ -4854,7 +4854,7 @@ define void @mulhs_v16i16(<16 x i16>* %x) { ; LMULMAX2-RV32-LABEL: mulhs_v16i16: ; LMULMAX2-RV32: # %bb.0: -; LMULMAX2-RV32-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX2-RV32-NEXT: vle16.v v8, (a0) ; LMULMAX2-RV32-NEXT: lui a1, 7 ; LMULMAX2-RV32-NEXT: addi a1, a1, -1687 @@ -4874,7 +4874,7 @@ ; ; LMULMAX2-RV64-LABEL: mulhs_v16i16: ; LMULMAX2-RV64: # %bb.0: -; LMULMAX2-RV64-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX2-RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX2-RV64-NEXT: vle16.v v8, (a0) ; LMULMAX2-RV64-NEXT: lui a1, 7 ; LMULMAX2-RV64-NEXT: addiw a1, a1, -1687 @@ -4894,7 +4894,7 @@ ; ; LMULMAX1-LABEL: mulhs_v16i16: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-NEXT: vle16.v v8, (a0) ; LMULMAX1-NEXT: addi a1, a0, 16 ; LMULMAX1-NEXT: vle16.v v9, (a1) @@ -4916,7 +4916,7 @@ define void @mulhs_v8i32(<8 x i32>* %x) { ; LMULMAX2-RV32-LABEL: mulhs_v8i32: ; LMULMAX2-RV32: # %bb.0: -; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-RV32-NEXT: vle32.v v8, (a0) ; LMULMAX2-RV32-NEXT: li a1, 85 ; LMULMAX2-RV32-NEXT: vmv.s.x v0, a1 @@ -4935,13 +4935,13 @@ ; ; LMULMAX2-RV64-LABEL: mulhs_v8i32: ; LMULMAX2-RV64: # %bb.0: -; LMULMAX2-RV64-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-RV64-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-RV64-NEXT: vle32.v v8, (a0) ; LMULMAX2-RV64-NEXT: lui a1, %hi(.LCPI159_0) ; LMULMAX2-RV64-NEXT: addi a1, a1, %lo(.LCPI159_0) -; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV64-NEXT: vlse64.v v10, (a1), zero -; LMULMAX2-RV64-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-RV64-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-RV64-NEXT: vmulh.vv v8, v8, v10 ; LMULMAX2-RV64-NEXT: vsra.vi v8, v8, 1 ; LMULMAX2-RV64-NEXT: vsrl.vi v10, v8, 31 @@ -4951,7 +4951,7 @@ ; ; LMULMAX1-RV32-LABEL: mulhs_v8i32: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a1, a0, 16 ; LMULMAX1-RV32-NEXT: vle32.v v9, (a1) @@ -4977,16 +4977,16 @@ ; ; LMULMAX1-RV64-LABEL: mulhs_v8i32: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a1, a0, 16 ; LMULMAX1-RV64-NEXT: vle32.v v9, (a1) ; LMULMAX1-RV64-NEXT: li a2, 3 ; LMULMAX1-RV64-NEXT: slli a2, a2, 33 ; LMULMAX1-RV64-NEXT: addi a2, a2, -5 -; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV64-NEXT: vmv.v.x v10, a2 -; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV64-NEXT: vdiv.vv v9, v9, v10 ; LMULMAX1-RV64-NEXT: vdiv.vv v8, v8, v10 ; LMULMAX1-RV64-NEXT: vse32.v v8, (a0) @@ -5001,33 +5001,33 @@ define void @mulhs_v4i64(<4 x i64>* %x) { ; LMULMAX2-RV32-LABEL: mulhs_v4i64: ; LMULMAX2-RV32: # %bb.0: -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV32-NEXT: vle64.v v8, (a0) ; LMULMAX2-RV32-NEXT: li a1, 17 ; LMULMAX2-RV32-NEXT: vmv.s.x v0, a1 ; LMULMAX2-RV32-NEXT: lui a1, 349525 ; LMULMAX2-RV32-NEXT: addi a2, a1, 1365 -; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-RV32-NEXT: vmv.v.x v10, a2 ; LMULMAX2-RV32-NEXT: addi a1, a1, 1366 ; LMULMAX2-RV32-NEXT: vmerge.vxm v10, v10, a1, v0 -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV32-NEXT: vmulh.vv v10, v8, v10 ; LMULMAX2-RV32-NEXT: li a1, 51 ; LMULMAX2-RV32-NEXT: vmv.s.x v0, a1 -; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-RV32-NEXT: vmv.v.i v12, -1 ; LMULMAX2-RV32-NEXT: vmerge.vim v12, v12, 0, v0 -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV32-NEXT: vmadd.vv v12, v8, v10 ; LMULMAX2-RV32-NEXT: li a1, 63 ; LMULMAX2-RV32-NEXT: vsrl.vx v8, v12, a1 ; LMULMAX2-RV32-NEXT: li a1, 68 ; LMULMAX2-RV32-NEXT: vmv.s.x v0, a1 -; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-RV32-NEXT: vmv.v.i v10, 0 ; LMULMAX2-RV32-NEXT: vmerge.vim v10, v10, 1, v0 -; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV32-NEXT: vsra.vv v10, v12, v10 ; LMULMAX2-RV32-NEXT: vadd.vv v8, v10, v8 ; LMULMAX2-RV32-NEXT: vse64.v v8, (a0) @@ -5035,7 +5035,7 @@ ; ; LMULMAX2-RV64-LABEL: mulhs_v4i64: ; LMULMAX2-RV64: # %bb.0: -; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-RV64-NEXT: li a1, 5 ; LMULMAX2-RV64-NEXT: vmv.s.x v0, a1 ; LMULMAX2-RV64-NEXT: lui a1, %hi(.LCPI160_0) @@ -5060,15 +5060,15 @@ ; ; LMULMAX1-RV32-LABEL: mulhs_v4i64: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a1, a0, 16 ; LMULMAX1-RV32-NEXT: vle64.v v9, (a1) ; LMULMAX1-RV32-NEXT: lui a2, %hi(.LCPI160_0) ; LMULMAX1-RV32-NEXT: addi a2, a2, %lo(.LCPI160_0) -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle32.v v10, (a2) -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vdiv.vv v9, v9, v10 ; LMULMAX1-RV32-NEXT: vdiv.vv v8, v8, v10 ; LMULMAX1-RV32-NEXT: vse64.v v8, (a0) @@ -5077,7 +5077,7 @@ ; ; LMULMAX1-RV64-LABEL: mulhs_v4i64: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV64-NEXT: lui a1, %hi(.LCPI160_0) ; LMULMAX1-RV64-NEXT: addi a1, a1, %lo(.LCPI160_0) @@ -5086,9 +5086,9 @@ ; LMULMAX1-RV64-NEXT: ld a1, %lo(.LCPI160_1)(a1) ; LMULMAX1-RV64-NEXT: addi a2, a0, 16 ; LMULMAX1-RV64-NEXT: vle64.v v10, (a2) -; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, tu, ma ; LMULMAX1-RV64-NEXT: vmv.s.x v9, a1 -; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; LMULMAX1-RV64-NEXT: vmulh.vv v11, v10, v9 ; LMULMAX1-RV64-NEXT: vid.v v12 ; LMULMAX1-RV64-NEXT: vrsub.vi v13, v12, 0 @@ -5115,7 +5115,7 @@ ; LMULMAX2-LABEL: smin_v32i8: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: li a2, 32 -; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; LMULMAX2-NEXT: vle8.v v8, (a0) ; LMULMAX2-NEXT: vle8.v v10, (a1) ; LMULMAX2-NEXT: vmin.vv v8, v8, v10 @@ -5124,7 +5124,7 @@ ; ; LMULMAX1-RV32-LABEL: smin_v32i8: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle8.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle8.v v9, (a2) @@ -5139,7 +5139,7 @@ ; ; LMULMAX1-RV64-LABEL: smin_v32i8: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle8.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle8.v v9, (a2) @@ -5162,7 +5162,7 @@ define void @smin_v16i16(<16 x i16>* %x, <16 x i16>* %y) { ; LMULMAX2-LABEL: smin_v16i16: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX2-NEXT: vle16.v v8, (a0) ; LMULMAX2-NEXT: vle16.v v10, (a1) ; LMULMAX2-NEXT: vmin.vv v8, v8, v10 @@ -5171,7 +5171,7 @@ ; ; LMULMAX1-RV32-LABEL: smin_v16i16: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle16.v v9, (a2) @@ -5186,7 +5186,7 @@ ; ; LMULMAX1-RV64-LABEL: smin_v16i16: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle16.v v9, (a2) @@ -5209,7 +5209,7 @@ define void @smin_v8i32(<8 x i32>* %x, <8 x i32>* %y) { ; LMULMAX2-LABEL: smin_v8i32: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vle32.v v8, (a0) ; LMULMAX2-NEXT: vle32.v v10, (a1) ; LMULMAX2-NEXT: vmin.vv v8, v8, v10 @@ -5218,7 +5218,7 @@ ; ; LMULMAX1-RV32-LABEL: smin_v8i32: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle32.v v9, (a2) @@ -5233,7 +5233,7 @@ ; ; LMULMAX1-RV64-LABEL: smin_v8i32: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle32.v v9, (a2) @@ -5256,7 +5256,7 @@ define void @smin_v4i64(<4 x i64>* %x, <4 x i64>* %y) { ; LMULMAX2-LABEL: smin_v4i64: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-NEXT: vle64.v v8, (a0) ; LMULMAX2-NEXT: vle64.v v10, (a1) ; LMULMAX2-NEXT: vmin.vv v8, v8, v10 @@ -5265,7 +5265,7 @@ ; ; LMULMAX1-RV32-LABEL: smin_v4i64: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle64.v v9, (a2) @@ -5280,7 +5280,7 @@ ; ; LMULMAX1-RV64-LABEL: smin_v4i64: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle64.v v9, (a2) @@ -5304,7 +5304,7 @@ ; LMULMAX2-LABEL: smax_v32i8: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: li a2, 32 -; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; LMULMAX2-NEXT: vle8.v v8, (a0) ; LMULMAX2-NEXT: vle8.v v10, (a1) ; LMULMAX2-NEXT: vmax.vv v8, v8, v10 @@ -5313,7 +5313,7 @@ ; ; LMULMAX1-RV32-LABEL: smax_v32i8: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle8.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle8.v v9, (a2) @@ -5328,7 +5328,7 @@ ; ; LMULMAX1-RV64-LABEL: smax_v32i8: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle8.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle8.v v9, (a2) @@ -5351,7 +5351,7 @@ define void @smax_v16i16(<16 x i16>* %x, <16 x i16>* %y) { ; LMULMAX2-LABEL: smax_v16i16: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX2-NEXT: vle16.v v8, (a0) ; LMULMAX2-NEXT: vle16.v v10, (a1) ; LMULMAX2-NEXT: vmax.vv v8, v8, v10 @@ -5360,7 +5360,7 @@ ; ; LMULMAX1-RV32-LABEL: smax_v16i16: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle16.v v9, (a2) @@ -5375,7 +5375,7 @@ ; ; LMULMAX1-RV64-LABEL: smax_v16i16: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle16.v v9, (a2) @@ -5398,7 +5398,7 @@ define void @smax_v8i32(<8 x i32>* %x, <8 x i32>* %y) { ; LMULMAX2-LABEL: smax_v8i32: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vle32.v v8, (a0) ; LMULMAX2-NEXT: vle32.v v10, (a1) ; LMULMAX2-NEXT: vmax.vv v8, v8, v10 @@ -5407,7 +5407,7 @@ ; ; LMULMAX1-RV32-LABEL: smax_v8i32: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle32.v v9, (a2) @@ -5422,7 +5422,7 @@ ; ; LMULMAX1-RV64-LABEL: smax_v8i32: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle32.v v9, (a2) @@ -5445,7 +5445,7 @@ define void @smax_v4i64(<4 x i64>* %x, <4 x i64>* %y) { ; LMULMAX2-LABEL: smax_v4i64: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-NEXT: vle64.v v8, (a0) ; LMULMAX2-NEXT: vle64.v v10, (a1) ; LMULMAX2-NEXT: vmax.vv v8, v8, v10 @@ -5454,7 +5454,7 @@ ; ; LMULMAX1-RV32-LABEL: smax_v4i64: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle64.v v9, (a2) @@ -5469,7 +5469,7 @@ ; ; LMULMAX1-RV64-LABEL: smax_v4i64: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle64.v v9, (a2) @@ -5493,7 +5493,7 @@ ; LMULMAX2-LABEL: umin_v32i8: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: li a2, 32 -; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; LMULMAX2-NEXT: vle8.v v8, (a0) ; LMULMAX2-NEXT: vle8.v v10, (a1) ; LMULMAX2-NEXT: vminu.vv v8, v8, v10 @@ -5502,7 +5502,7 @@ ; ; LMULMAX1-RV32-LABEL: umin_v32i8: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle8.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle8.v v9, (a2) @@ -5517,7 +5517,7 @@ ; ; LMULMAX1-RV64-LABEL: umin_v32i8: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle8.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle8.v v9, (a2) @@ -5540,7 +5540,7 @@ define void @umin_v16i16(<16 x i16>* %x, <16 x i16>* %y) { ; LMULMAX2-LABEL: umin_v16i16: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX2-NEXT: vle16.v v8, (a0) ; LMULMAX2-NEXT: vle16.v v10, (a1) ; LMULMAX2-NEXT: vminu.vv v8, v8, v10 @@ -5549,7 +5549,7 @@ ; ; LMULMAX1-RV32-LABEL: umin_v16i16: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle16.v v9, (a2) @@ -5564,7 +5564,7 @@ ; ; LMULMAX1-RV64-LABEL: umin_v16i16: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle16.v v9, (a2) @@ -5587,7 +5587,7 @@ define void @umin_v8i32(<8 x i32>* %x, <8 x i32>* %y) { ; LMULMAX2-LABEL: umin_v8i32: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vle32.v v8, (a0) ; LMULMAX2-NEXT: vle32.v v10, (a1) ; LMULMAX2-NEXT: vminu.vv v8, v8, v10 @@ -5596,7 +5596,7 @@ ; ; LMULMAX1-RV32-LABEL: umin_v8i32: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle32.v v9, (a2) @@ -5611,7 +5611,7 @@ ; ; LMULMAX1-RV64-LABEL: umin_v8i32: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle32.v v9, (a2) @@ -5634,7 +5634,7 @@ define void @umin_v4i64(<4 x i64>* %x, <4 x i64>* %y) { ; LMULMAX2-LABEL: umin_v4i64: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-NEXT: vle64.v v8, (a0) ; LMULMAX2-NEXT: vle64.v v10, (a1) ; LMULMAX2-NEXT: vminu.vv v8, v8, v10 @@ -5643,7 +5643,7 @@ ; ; LMULMAX1-RV32-LABEL: umin_v4i64: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle64.v v9, (a2) @@ -5658,7 +5658,7 @@ ; ; LMULMAX1-RV64-LABEL: umin_v4i64: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle64.v v9, (a2) @@ -5682,7 +5682,7 @@ ; LMULMAX2-LABEL: umax_v32i8: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: li a2, 32 -; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; LMULMAX2-NEXT: vle8.v v8, (a0) ; LMULMAX2-NEXT: vle8.v v10, (a1) ; LMULMAX2-NEXT: vmaxu.vv v8, v8, v10 @@ -5691,7 +5691,7 @@ ; ; LMULMAX1-RV32-LABEL: umax_v32i8: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle8.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle8.v v9, (a2) @@ -5706,7 +5706,7 @@ ; ; LMULMAX1-RV64-LABEL: umax_v32i8: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle8.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle8.v v9, (a2) @@ -5729,7 +5729,7 @@ define void @umax_v16i16(<16 x i16>* %x, <16 x i16>* %y) { ; LMULMAX2-LABEL: umax_v16i16: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX2-NEXT: vle16.v v8, (a0) ; LMULMAX2-NEXT: vle16.v v10, (a1) ; LMULMAX2-NEXT: vmaxu.vv v8, v8, v10 @@ -5738,7 +5738,7 @@ ; ; LMULMAX1-RV32-LABEL: umax_v16i16: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle16.v v9, (a2) @@ -5753,7 +5753,7 @@ ; ; LMULMAX1-RV64-LABEL: umax_v16i16: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle16.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle16.v v9, (a2) @@ -5776,7 +5776,7 @@ define void @umax_v8i32(<8 x i32>* %x, <8 x i32>* %y) { ; LMULMAX2-LABEL: umax_v8i32: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vle32.v v8, (a0) ; LMULMAX2-NEXT: vle32.v v10, (a1) ; LMULMAX2-NEXT: vmaxu.vv v8, v8, v10 @@ -5785,7 +5785,7 @@ ; ; LMULMAX1-RV32-LABEL: umax_v8i32: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle32.v v9, (a2) @@ -5800,7 +5800,7 @@ ; ; LMULMAX1-RV64-LABEL: umax_v8i32: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle32.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle32.v v9, (a2) @@ -5823,7 +5823,7 @@ define void @umax_v4i64(<4 x i64>* %x, <4 x i64>* %y) { ; LMULMAX2-LABEL: umax_v4i64: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-NEXT: vle64.v v8, (a0) ; LMULMAX2-NEXT: vle64.v v10, (a1) ; LMULMAX2-NEXT: vmaxu.vv v8, v8, v10 @@ -5832,7 +5832,7 @@ ; ; LMULMAX1-RV32-LABEL: umax_v4i64: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV32-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a2, a0, 16 ; LMULMAX1-RV32-NEXT: vle64.v v9, (a2) @@ -5847,7 +5847,7 @@ ; ; LMULMAX1-RV64-LABEL: umax_v4i64: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-RV64-NEXT: vle64.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a2, a1, 16 ; LMULMAX1-RV64-NEXT: vle64.v v9, (a2) @@ -5870,7 +5870,7 @@ define void @add_vi_v16i8(<16 x i8>* %x) { ; CHECK-LABEL: add_vi_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: vse8.v v8, (a0) @@ -5886,7 +5886,7 @@ define void @add_vi_v8i16(<8 x i16>* %x) { ; CHECK-LABEL: add_vi_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: vse16.v v8, (a0) @@ -5902,7 +5902,7 @@ define void @add_vi_v4i32(<4 x i32>* %x) { ; CHECK-LABEL: add_vi_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: vse32.v v8, (a0) @@ -5918,7 +5918,7 @@ define void @add_vi_v2i64(<2 x i64>* %x) { ; CHECK-LABEL: add_vi_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: vse64.v v8, (a0) @@ -5934,7 +5934,7 @@ define void @add_iv_v16i8(<16 x i8>* %x) { ; CHECK-LABEL: add_iv_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vadd.vi v8, v8, 1 ; CHECK-NEXT: vse8.v v8, (a0) @@ -5950,7 +5950,7 @@ define void @add_iv_v8i16(<8 x i16>* %x) { ; CHECK-LABEL: add_iv_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vadd.vi v8, v8, 1 ; CHECK-NEXT: vse16.v v8, (a0) @@ -5966,7 +5966,7 @@ define void @add_iv_v4i32(<4 x i32>* %x) { ; CHECK-LABEL: add_iv_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vadd.vi v8, v8, 1 ; CHECK-NEXT: vse32.v v8, (a0) @@ -5982,7 +5982,7 @@ define void @add_iv_v2i64(<2 x i64>* %x) { ; CHECK-LABEL: add_iv_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vadd.vi v8, v8, 1 ; CHECK-NEXT: vse64.v v8, (a0) @@ -5998,7 +5998,7 @@ define void @add_vx_v16i8(<16 x i8>* %x, i8 %y) { ; CHECK-LABEL: add_vx_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vadd.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) @@ -6014,7 +6014,7 @@ define void @add_vx_v8i16(<8 x i16>* %x, i16 %y) { ; CHECK-LABEL: add_vx_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vadd.vx v8, v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) @@ -6030,7 +6030,7 @@ define void @add_vx_v4i32(<4 x i32>* %x, i32 %y) { ; CHECK-LABEL: add_vx_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vadd.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) @@ -6046,7 +6046,7 @@ define void @add_xv_v16i8(<16 x i8>* %x, i8 %y) { ; CHECK-LABEL: add_xv_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vadd.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) @@ -6062,7 +6062,7 @@ define void @add_xv_v8i16(<8 x i16>* %x, i16 %y) { ; CHECK-LABEL: add_xv_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vadd.vx v8, v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) @@ -6078,7 +6078,7 @@ define void @add_xv_v4i32(<4 x i32>* %x, i32 %y) { ; CHECK-LABEL: add_xv_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vadd.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) @@ -6094,7 +6094,7 @@ define void @sub_vi_v16i8(<16 x i8>* %x) { ; CHECK-LABEL: sub_vi_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a1, -1 ; CHECK-NEXT: vsub.vx v8, v8, a1 @@ -6111,7 +6111,7 @@ define void @sub_vi_v8i16(<8 x i16>* %x) { ; CHECK-LABEL: sub_vi_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: li a1, -1 ; CHECK-NEXT: vsub.vx v8, v8, a1 @@ -6128,7 +6128,7 @@ define void @sub_vi_v4i32(<4 x i32>* %x) { ; CHECK-LABEL: sub_vi_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: li a1, -1 ; CHECK-NEXT: vsub.vx v8, v8, a1 @@ -6145,7 +6145,7 @@ define void @sub_vi_v2i64(<2 x i64>* %x) { ; CHECK-LABEL: sub_vi_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: li a1, -1 ; CHECK-NEXT: vsub.vx v8, v8, a1 @@ -6162,7 +6162,7 @@ define void @sub_iv_v16i8(<16 x i8>* %x) { ; CHECK-LABEL: sub_iv_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vrsub.vi v8, v8, 1 ; CHECK-NEXT: vse8.v v8, (a0) @@ -6178,7 +6178,7 @@ define void @sub_iv_v8i16(<8 x i16>* %x) { ; CHECK-LABEL: sub_iv_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vrsub.vi v8, v8, 1 ; CHECK-NEXT: vse16.v v8, (a0) @@ -6194,7 +6194,7 @@ define void @sub_iv_v4i32(<4 x i32>* %x) { ; CHECK-LABEL: sub_iv_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vrsub.vi v8, v8, 1 ; CHECK-NEXT: vse32.v v8, (a0) @@ -6210,7 +6210,7 @@ define void @sub_iv_v2i64(<2 x i64>* %x) { ; CHECK-LABEL: sub_iv_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vrsub.vi v8, v8, 1 ; CHECK-NEXT: vse64.v v8, (a0) @@ -6226,7 +6226,7 @@ define void @sub_vx_v16i8(<16 x i8>* %x, i8 %y) { ; CHECK-LABEL: sub_vx_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsub.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) @@ -6242,7 +6242,7 @@ define void @sub_vx_v8i16(<8 x i16>* %x, i16 %y) { ; CHECK-LABEL: sub_vx_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsub.vx v8, v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) @@ -6258,7 +6258,7 @@ define void @sub_vx_v4i32(<4 x i32>* %x, i32 %y) { ; CHECK-LABEL: sub_vx_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsub.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) @@ -6274,7 +6274,7 @@ define void @sub_xv_v16i8(<16 x i8>* %x, i8 %y) { ; CHECK-LABEL: sub_xv_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vrsub.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) @@ -6290,7 +6290,7 @@ define void @sub_xv_v8i16(<8 x i16>* %x, i16 %y) { ; CHECK-LABEL: sub_xv_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vrsub.vx v8, v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) @@ -6306,7 +6306,7 @@ define void @sub_xv_v4i32(<4 x i32>* %x, i32 %y) { ; CHECK-LABEL: sub_xv_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vrsub.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) @@ -6322,7 +6322,7 @@ define void @mul_vx_v16i8(<16 x i8>* %x, i8 %y) { ; CHECK-LABEL: mul_vx_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmul.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) @@ -6338,7 +6338,7 @@ define void @mul_vx_v8i16(<8 x i16>* %x, i16 %y) { ; CHECK-LABEL: mul_vx_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmul.vx v8, v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) @@ -6354,7 +6354,7 @@ define void @mul_vx_v4i32(<4 x i32>* %x, i32 %y) { ; CHECK-LABEL: mul_vx_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmul.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) @@ -6370,7 +6370,7 @@ define void @mul_xv_v16i8(<16 x i8>* %x, i8 %y) { ; CHECK-LABEL: mul_xv_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmul.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) @@ -6386,7 +6386,7 @@ define void @mul_xv_v8i16(<8 x i16>* %x, i16 %y) { ; CHECK-LABEL: mul_xv_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmul.vx v8, v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) @@ -6402,7 +6402,7 @@ define void @mul_xv_v4i32(<4 x i32>* %x, i32 %y) { ; CHECK-LABEL: mul_xv_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmul.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) @@ -6418,7 +6418,7 @@ define void @and_vi_v16i8(<16 x i8>* %x) { ; CHECK-LABEL: and_vi_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vand.vi v8, v8, -2 ; CHECK-NEXT: vse8.v v8, (a0) @@ -6434,7 +6434,7 @@ define void @and_vi_v8i16(<8 x i16>* %x) { ; CHECK-LABEL: and_vi_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vand.vi v8, v8, -2 ; CHECK-NEXT: vse16.v v8, (a0) @@ -6450,7 +6450,7 @@ define void @and_vi_v4i32(<4 x i32>* %x) { ; CHECK-LABEL: and_vi_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vand.vi v8, v8, -2 ; CHECK-NEXT: vse32.v v8, (a0) @@ -6466,7 +6466,7 @@ define void @and_vi_v2i64(<2 x i64>* %x) { ; CHECK-LABEL: and_vi_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vand.vi v8, v8, -2 ; CHECK-NEXT: vse64.v v8, (a0) @@ -6482,7 +6482,7 @@ define void @and_iv_v16i8(<16 x i8>* %x) { ; CHECK-LABEL: and_iv_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vse8.v v8, (a0) @@ -6498,7 +6498,7 @@ define void @and_iv_v8i16(<8 x i16>* %x) { ; CHECK-LABEL: and_iv_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vse16.v v8, (a0) @@ -6514,7 +6514,7 @@ define void @and_iv_v4i32(<4 x i32>* %x) { ; CHECK-LABEL: and_iv_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vse32.v v8, (a0) @@ -6530,7 +6530,7 @@ define void @and_iv_v2i64(<2 x i64>* %x) { ; CHECK-LABEL: and_iv_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vse64.v v8, (a0) @@ -6546,7 +6546,7 @@ define void @and_vx_v16i8(<16 x i8>* %x, i8 %y) { ; CHECK-LABEL: and_vx_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vand.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) @@ -6562,7 +6562,7 @@ define void @and_vx_v8i16(<8 x i16>* %x, i16 %y) { ; CHECK-LABEL: and_vx_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vand.vx v8, v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) @@ -6578,7 +6578,7 @@ define void @and_vx_v4i32(<4 x i32>* %x, i32 %y) { ; CHECK-LABEL: and_vx_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vand.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) @@ -6594,7 +6594,7 @@ define void @and_xv_v16i8(<16 x i8>* %x, i8 %y) { ; CHECK-LABEL: and_xv_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vand.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) @@ -6610,7 +6610,7 @@ define void @and_xv_v8i16(<8 x i16>* %x, i16 %y) { ; CHECK-LABEL: and_xv_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vand.vx v8, v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) @@ -6626,7 +6626,7 @@ define void @and_xv_v4i32(<4 x i32>* %x, i32 %y) { ; CHECK-LABEL: and_xv_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vand.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) @@ -6642,7 +6642,7 @@ define void @or_vi_v16i8(<16 x i8>* %x) { ; CHECK-LABEL: or_vi_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vor.vi v8, v8, -2 ; CHECK-NEXT: vse8.v v8, (a0) @@ -6658,7 +6658,7 @@ define void @or_vi_v8i16(<8 x i16>* %x) { ; CHECK-LABEL: or_vi_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vor.vi v8, v8, -2 ; CHECK-NEXT: vse16.v v8, (a0) @@ -6674,7 +6674,7 @@ define void @or_vi_v4i32(<4 x i32>* %x) { ; CHECK-LABEL: or_vi_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vor.vi v8, v8, -2 ; CHECK-NEXT: vse32.v v8, (a0) @@ -6690,7 +6690,7 @@ define void @or_vi_v2i64(<2 x i64>* %x) { ; CHECK-LABEL: or_vi_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vor.vi v8, v8, -2 ; CHECK-NEXT: vse64.v v8, (a0) @@ -6706,7 +6706,7 @@ define void @or_iv_v16i8(<16 x i8>* %x) { ; CHECK-LABEL: or_iv_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vor.vi v8, v8, 1 ; CHECK-NEXT: vse8.v v8, (a0) @@ -6722,7 +6722,7 @@ define void @or_iv_v8i16(<8 x i16>* %x) { ; CHECK-LABEL: or_iv_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vor.vi v8, v8, 1 ; CHECK-NEXT: vse16.v v8, (a0) @@ -6738,7 +6738,7 @@ define void @or_iv_v4i32(<4 x i32>* %x) { ; CHECK-LABEL: or_iv_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vor.vi v8, v8, 1 ; CHECK-NEXT: vse32.v v8, (a0) @@ -6754,7 +6754,7 @@ define void @or_iv_v2i64(<2 x i64>* %x) { ; CHECK-LABEL: or_iv_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vor.vi v8, v8, 1 ; CHECK-NEXT: vse64.v v8, (a0) @@ -6770,7 +6770,7 @@ define void @or_vx_v16i8(<16 x i8>* %x, i8 %y) { ; CHECK-LABEL: or_vx_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vor.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) @@ -6786,7 +6786,7 @@ define void @or_vx_v8i16(<8 x i16>* %x, i16 %y) { ; CHECK-LABEL: or_vx_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vor.vx v8, v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) @@ -6802,7 +6802,7 @@ define void @or_vx_v4i32(<4 x i32>* %x, i32 %y) { ; CHECK-LABEL: or_vx_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vor.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) @@ -6818,7 +6818,7 @@ define void @or_xv_v16i8(<16 x i8>* %x, i8 %y) { ; CHECK-LABEL: or_xv_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vor.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) @@ -6834,7 +6834,7 @@ define void @or_xv_v8i16(<8 x i16>* %x, i16 %y) { ; CHECK-LABEL: or_xv_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vor.vx v8, v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) @@ -6850,7 +6850,7 @@ define void @or_xv_v4i32(<4 x i32>* %x, i32 %y) { ; CHECK-LABEL: or_xv_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vor.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) @@ -6866,7 +6866,7 @@ define void @xor_vi_v16i8(<16 x i8>* %x) { ; CHECK-LABEL: xor_vi_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: vse8.v v8, (a0) @@ -6882,7 +6882,7 @@ define void @xor_vi_v8i16(<8 x i16>* %x) { ; CHECK-LABEL: xor_vi_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: vse16.v v8, (a0) @@ -6898,7 +6898,7 @@ define void @xor_vi_v4i32(<4 x i32>* %x) { ; CHECK-LABEL: xor_vi_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: vse32.v v8, (a0) @@ -6914,7 +6914,7 @@ define void @xor_vi_v2i64(<2 x i64>* %x) { ; CHECK-LABEL: xor_vi_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: vse64.v v8, (a0) @@ -6930,7 +6930,7 @@ define void @xor_iv_v16i8(<16 x i8>* %x) { ; CHECK-LABEL: xor_iv_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vxor.vi v8, v8, 1 ; CHECK-NEXT: vse8.v v8, (a0) @@ -6946,7 +6946,7 @@ define void @xor_iv_v8i16(<8 x i16>* %x) { ; CHECK-LABEL: xor_iv_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vxor.vi v8, v8, 1 ; CHECK-NEXT: vse16.v v8, (a0) @@ -6962,7 +6962,7 @@ define void @xor_iv_v4i32(<4 x i32>* %x) { ; CHECK-LABEL: xor_iv_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vxor.vi v8, v8, 1 ; CHECK-NEXT: vse32.v v8, (a0) @@ -6978,7 +6978,7 @@ define void @xor_iv_v2i64(<2 x i64>* %x) { ; CHECK-LABEL: xor_iv_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vxor.vi v8, v8, 1 ; CHECK-NEXT: vse64.v v8, (a0) @@ -6994,7 +6994,7 @@ define void @xor_vx_v16i8(<16 x i8>* %x, i8 %y) { ; CHECK-LABEL: xor_vx_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vxor.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) @@ -7010,7 +7010,7 @@ define void @xor_vx_v8i16(<8 x i16>* %x, i16 %y) { ; CHECK-LABEL: xor_vx_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vxor.vx v8, v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) @@ -7026,7 +7026,7 @@ define void @xor_vx_v4i32(<4 x i32>* %x, i32 %y) { ; CHECK-LABEL: xor_vx_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vxor.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) @@ -7042,7 +7042,7 @@ define void @xor_xv_v16i8(<16 x i8>* %x, i8 %y) { ; CHECK-LABEL: xor_xv_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vxor.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) @@ -7058,7 +7058,7 @@ define void @xor_xv_v8i16(<8 x i16>* %x, i16 %y) { ; CHECK-LABEL: xor_xv_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vxor.vx v8, v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) @@ -7074,7 +7074,7 @@ define void @xor_xv_v4i32(<4 x i32>* %x, i32 %y) { ; CHECK-LABEL: xor_xv_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vxor.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) @@ -7090,7 +7090,7 @@ define void @lshr_vi_v16i8(<16 x i8>* %x) { ; CHECK-LABEL: lshr_vi_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsrl.vi v8, v8, 7 ; CHECK-NEXT: vse8.v v8, (a0) @@ -7106,7 +7106,7 @@ define void @lshr_vi_v8i16(<8 x i16>* %x) { ; CHECK-LABEL: lshr_vi_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsrl.vi v8, v8, 15 ; CHECK-NEXT: vse16.v v8, (a0) @@ -7122,7 +7122,7 @@ define void @lshr_vi_v4i32(<4 x i32>* %x) { ; CHECK-LABEL: lshr_vi_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsrl.vi v8, v8, 31 ; CHECK-NEXT: vse32.v v8, (a0) @@ -7138,7 +7138,7 @@ define void @lshr_vi_v2i64(<2 x i64>* %x) { ; CHECK-LABEL: lshr_vi_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vsrl.vi v8, v8, 31 ; CHECK-NEXT: vse64.v v8, (a0) @@ -7154,7 +7154,7 @@ define void @lshr_vx_v16i8(<16 x i8>* %x, i8 %y) { ; CHECK-LABEL: lshr_vx_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) @@ -7170,7 +7170,7 @@ define void @lshr_vx_v8i16(<8 x i16>* %x, i16 %y) { ; CHECK-LABEL: lshr_vx_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) @@ -7186,7 +7186,7 @@ define void @lshr_vx_v4i32(<4 x i32>* %x, i32 %y) { ; CHECK-LABEL: lshr_vx_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsrl.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) @@ -7202,7 +7202,7 @@ define void @ashr_vi_v16i8(<16 x i8>* %x) { ; CHECK-LABEL: ashr_vi_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsra.vi v8, v8, 7 ; CHECK-NEXT: vse8.v v8, (a0) @@ -7218,7 +7218,7 @@ define void @ashr_vi_v8i16(<8 x i16>* %x) { ; CHECK-LABEL: ashr_vi_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsra.vi v8, v8, 15 ; CHECK-NEXT: vse16.v v8, (a0) @@ -7234,7 +7234,7 @@ define void @ashr_vi_v4i32(<4 x i32>* %x) { ; CHECK-LABEL: ashr_vi_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsra.vi v8, v8, 31 ; CHECK-NEXT: vse32.v v8, (a0) @@ -7250,7 +7250,7 @@ define void @ashr_vi_v2i64(<2 x i64>* %x) { ; CHECK-LABEL: ashr_vi_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vsra.vi v8, v8, 31 ; CHECK-NEXT: vse64.v v8, (a0) @@ -7266,7 +7266,7 @@ define void @ashr_vx_v16i8(<16 x i8>* %x, i8 %y) { ; CHECK-LABEL: ashr_vx_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsra.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) @@ -7282,7 +7282,7 @@ define void @ashr_vx_v8i16(<8 x i16>* %x, i16 %y) { ; CHECK-LABEL: ashr_vx_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsra.vx v8, v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) @@ -7298,7 +7298,7 @@ define void @ashr_vx_v4i32(<4 x i32>* %x, i32 %y) { ; CHECK-LABEL: ashr_vx_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsra.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) @@ -7314,7 +7314,7 @@ define void @shl_vi_v16i8(<16 x i8>* %x) { ; CHECK-LABEL: shl_vi_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsll.vi v8, v8, 7 ; CHECK-NEXT: vse8.v v8, (a0) @@ -7330,7 +7330,7 @@ define void @shl_vi_v8i16(<8 x i16>* %x) { ; CHECK-LABEL: shl_vi_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsll.vi v8, v8, 15 ; CHECK-NEXT: vse16.v v8, (a0) @@ -7346,7 +7346,7 @@ define void @shl_vi_v4i32(<4 x i32>* %x) { ; CHECK-LABEL: shl_vi_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsll.vi v8, v8, 31 ; CHECK-NEXT: vse32.v v8, (a0) @@ -7362,7 +7362,7 @@ define void @shl_vi_v2i64(<2 x i64>* %x) { ; CHECK-LABEL: shl_vi_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vsll.vi v8, v8, 31 ; CHECK-NEXT: vse64.v v8, (a0) @@ -7378,7 +7378,7 @@ define void @shl_vx_v16i8(<16 x i8>* %x, i8 %y) { ; CHECK-LABEL: shl_vx_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsll.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) @@ -7394,7 +7394,7 @@ define void @shl_vx_v8i16(<8 x i16>* %x, i16 %y) { ; CHECK-LABEL: shl_vx_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsll.vx v8, v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) @@ -7410,7 +7410,7 @@ define void @shl_vx_v4i32(<4 x i32>* %x, i32 %y) { ; CHECK-LABEL: shl_vx_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsll.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) @@ -7426,7 +7426,7 @@ define void @sdiv_vx_v16i8(<16 x i8>* %x, i8 %y) { ; CHECK-LABEL: sdiv_vx_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vdiv.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) @@ -7442,7 +7442,7 @@ define void @sdiv_vx_v8i16(<8 x i16>* %x, i16 %y) { ; CHECK-LABEL: sdiv_vx_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vdiv.vx v8, v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) @@ -7458,7 +7458,7 @@ define void @sdiv_vx_v4i32(<4 x i32>* %x, i32 %y) { ; CHECK-LABEL: sdiv_vx_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vdiv.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) @@ -7474,7 +7474,7 @@ define void @srem_vx_v16i8(<16 x i8>* %x, i8 %y) { ; CHECK-LABEL: srem_vx_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vrem.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) @@ -7490,7 +7490,7 @@ define void @srem_vx_v8i16(<8 x i16>* %x, i16 %y) { ; CHECK-LABEL: srem_vx_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vrem.vx v8, v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) @@ -7506,7 +7506,7 @@ define void @srem_vx_v4i32(<4 x i32>* %x, i32 %y) { ; CHECK-LABEL: srem_vx_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vrem.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) @@ -7522,7 +7522,7 @@ define void @udiv_vx_v16i8(<16 x i8>* %x, i8 %y) { ; CHECK-LABEL: udiv_vx_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vdivu.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) @@ -7538,7 +7538,7 @@ define void @udiv_vx_v8i16(<8 x i16>* %x, i16 %y) { ; CHECK-LABEL: udiv_vx_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vdivu.vx v8, v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) @@ -7554,7 +7554,7 @@ define void @udiv_vx_v4i32(<4 x i32>* %x, i32 %y) { ; CHECK-LABEL: udiv_vx_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vdivu.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) @@ -7570,7 +7570,7 @@ define void @urem_vx_v16i8(<16 x i8>* %x, i8 %y) { ; CHECK-LABEL: urem_vx_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vremu.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) @@ -7586,7 +7586,7 @@ define void @urem_vx_v8i16(<8 x i16>* %x, i16 %y) { ; CHECK-LABEL: urem_vx_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vremu.vx v8, v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) @@ -7602,7 +7602,7 @@ define void @urem_vx_v4i32(<4 x i32>* %x, i32 %y) { ; CHECK-LABEL: urem_vx_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vremu.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) @@ -7618,7 +7618,7 @@ define void @mulhu_vx_v16i8(<16 x i8>* %x) { ; CHECK-LABEL: mulhu_vx_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a1, 57 ; CHECK-NEXT: vmulhu.vx v8, v8, a1 @@ -7634,7 +7634,7 @@ define void @mulhu_vx_v8i16(<8 x i16>* %x) { ; RV32-LABEL: mulhu_vx_v8i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV32-NEXT: vle16.v v8, (a0) ; RV32-NEXT: lui a1, 2 ; RV32-NEXT: addi a1, a1, 1171 @@ -7648,7 +7648,7 @@ ; ; RV64-LABEL: mulhu_vx_v8i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64-NEXT: vle16.v v8, (a0) ; RV64-NEXT: lui a1, 2 ; RV64-NEXT: addiw a1, a1, 1171 @@ -7668,7 +7668,7 @@ define void @mulhu_vx_v4i32(<4 x i32>* %x) { ; RV32-LABEL: mulhu_vx_v4i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: lui a1, 838861 ; RV32-NEXT: addi a1, a1, -819 @@ -7679,7 +7679,7 @@ ; ; RV64-LABEL: mulhu_vx_v4i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: lui a1, 838861 ; RV64-NEXT: addiw a1, a1, -819 @@ -7698,7 +7698,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: lui a1, 699051 ; RV32-NEXT: addi a2, a1, -1366 @@ -7715,7 +7715,7 @@ ; ; RV64-LABEL: mulhu_vx_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: lui a1, %hi(.LCPI289_0) ; RV64-NEXT: ld a1, %lo(.LCPI289_0)(a1) @@ -7732,7 +7732,7 @@ define void @mulhs_vx_v16i8(<16 x i8>* %x) { ; CHECK-LABEL: mulhs_vx_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a1, -123 ; CHECK-NEXT: vmulhu.vx v8, v8, a1 @@ -7748,7 +7748,7 @@ define void @mulhs_vx_v8i16(<8 x i16>* %x) { ; RV32-LABEL: mulhs_vx_v8i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV32-NEXT: vle16.v v8, (a0) ; RV32-NEXT: lui a1, 5 ; RV32-NEXT: addi a1, a1, -1755 @@ -7761,7 +7761,7 @@ ; ; RV64-LABEL: mulhs_vx_v8i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64-NEXT: vle16.v v8, (a0) ; RV64-NEXT: lui a1, 5 ; RV64-NEXT: addiw a1, a1, -1755 @@ -7780,7 +7780,7 @@ define void @mulhs_vx_v4i32(<4 x i32>* %x) { ; RV32-LABEL: mulhs_vx_v4i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: lui a1, 629146 ; RV32-NEXT: addi a1, a1, -1639 @@ -7793,7 +7793,7 @@ ; ; RV64-LABEL: mulhs_vx_v4i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: lui a1, 629146 ; RV64-NEXT: addiw a1, a1, -1639 @@ -7814,7 +7814,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: lui a1, 349525 ; RV32-NEXT: addi a2, a1, 1365 @@ -7833,7 +7833,7 @@ ; ; RV64-LABEL: mulhs_vx_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: lui a1, %hi(.LCPI293_0) ; RV64-NEXT: ld a1, %lo(.LCPI293_0)(a1) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-marith-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-marith-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-marith-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-marith-vp.ll @@ -9,7 +9,7 @@ define <1 x i1> @and_v1i1(<1 x i1> %b, <1 x i1> %c, <1 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: and_v1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmand.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <1 x i1> @llvm.vp.and.v1i1(<1 x i1> %b, <1 x i1> %c, <1 x i1> %a, i32 %evl) @@ -21,7 +21,7 @@ define <2 x i1> @and_v2i1(<2 x i1> %b, <2 x i1> %c, <2 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: and_v2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmand.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <2 x i1> @llvm.vp.and.v2i1(<2 x i1> %b, <2 x i1> %c, <2 x i1> %a, i32 %evl) @@ -33,7 +33,7 @@ define <4 x i1> @and_v4i1(<4 x i1> %b, <4 x i1> %c, <4 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: and_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmand.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <4 x i1> @llvm.vp.and.v4i1(<4 x i1> %b, <4 x i1> %c, <4 x i1> %a, i32 %evl) @@ -45,7 +45,7 @@ define <8 x i1> @and_v8i1(<8 x i1> %b, <8 x i1> %c, <8 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: and_v8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmand.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <8 x i1> @llvm.vp.and.v8i1(<8 x i1> %b, <8 x i1> %c, <8 x i1> %a, i32 %evl) @@ -57,7 +57,7 @@ define <16 x i1> @and_v16i1(<16 x i1> %b, <16 x i1> %c, <16 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: and_v16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmand.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <16 x i1> @llvm.vp.and.v16i1(<16 x i1> %b, <16 x i1> %c, <16 x i1> %a, i32 %evl) @@ -69,7 +69,7 @@ define <1 x i1> @or_v1i1(<1 x i1> %b, <1 x i1> %c, <1 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: or_v1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <1 x i1> @llvm.vp.or.v1i1(<1 x i1> %b, <1 x i1> %c, <1 x i1> %a, i32 %evl) @@ -81,7 +81,7 @@ define <2 x i1> @or_v2i1(<2 x i1> %b, <2 x i1> %c, <2 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: or_v2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <2 x i1> @llvm.vp.or.v2i1(<2 x i1> %b, <2 x i1> %c, <2 x i1> %a, i32 %evl) @@ -93,7 +93,7 @@ define <4 x i1> @or_v4i1(<4 x i1> %b, <4 x i1> %c, <4 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: or_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <4 x i1> @llvm.vp.or.v4i1(<4 x i1> %b, <4 x i1> %c, <4 x i1> %a, i32 %evl) @@ -105,7 +105,7 @@ define <8 x i1> @or_v8i1(<8 x i1> %b, <8 x i1> %c, <8 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: or_v8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <8 x i1> @llvm.vp.or.v8i1(<8 x i1> %b, <8 x i1> %c, <8 x i1> %a, i32 %evl) @@ -117,7 +117,7 @@ define <16 x i1> @or_v16i1(<16 x i1> %b, <16 x i1> %c, <16 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: or_v16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <16 x i1> @llvm.vp.or.v16i1(<16 x i1> %b, <16 x i1> %c, <16 x i1> %a, i32 %evl) @@ -129,7 +129,7 @@ define <1 x i1> @xor_v1i1(<1 x i1> %b, <1 x i1> %c, <1 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_v1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <1 x i1> @llvm.vp.xor.v1i1(<1 x i1> %b, <1 x i1> %c, <1 x i1> %a, i32 %evl) @@ -141,7 +141,7 @@ define <2 x i1> @xor_v2i1(<2 x i1> %b, <2 x i1> %c, <2 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_v2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <2 x i1> @llvm.vp.xor.v2i1(<2 x i1> %b, <2 x i1> %c, <2 x i1> %a, i32 %evl) @@ -153,7 +153,7 @@ define <4 x i1> @xor_v4i1(<4 x i1> %b, <4 x i1> %c, <4 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <4 x i1> @llvm.vp.xor.v4i1(<4 x i1> %b, <4 x i1> %c, <4 x i1> %a, i32 %evl) @@ -165,7 +165,7 @@ define <8 x i1> @xor_v8i1(<8 x i1> %b, <8 x i1> %c, <8 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_v8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <8 x i1> @llvm.vp.xor.v8i1(<8 x i1> %b, <8 x i1> %c, <8 x i1> %a, i32 %evl) @@ -177,7 +177,7 @@ define <16 x i1> @xor_v16i1(<16 x i1> %b, <16 x i1> %c, <16 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_v16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <16 x i1> @llvm.vp.xor.v16i1(<16 x i1> %b, <16 x i1> %c, <16 x i1> %a, i32 %evl) @@ -189,7 +189,7 @@ define @xor_nxv1i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv1i1( %b, %c, %a, i32 %evl) @@ -201,7 +201,7 @@ define @xor_nxv2i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv2i1( %b, %c, %a, i32 %evl) @@ -213,7 +213,7 @@ define @xor_nxv4i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv4i1( %b, %c, %a, i32 %evl) @@ -225,7 +225,7 @@ define @xor_nxv8i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv8i1( %b, %c, %a, i32 %evl) @@ -237,7 +237,7 @@ define @xor_nxv16i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv16i1( %b, %c, %a, i32 %evl) @@ -249,7 +249,7 @@ define @xor_nxv32i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_nxv32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv32i1( %b, %c, %a, i32 %evl) @@ -261,7 +261,7 @@ define @xor_nxv64i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_nxv64i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv64i1( %b, %c, %a, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll @@ -14,7 +14,7 @@ ; CHECK-LABEL: buildvec_mask_nonconst_v1i1: ; CHECK: # %bb.0: ; CHECK-NEXT: andi a0, a0, 1 -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -22,7 +22,7 @@ ; ZVE32F-LABEL: buildvec_mask_nonconst_v1i1: ; ZVE32F: # %bb.0: ; ZVE32F-NEXT: andi a0, a0, 1 -; ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; ZVE32F-NEXT: vmv.v.x v8, a0 ; ZVE32F-NEXT: vmsne.vi v0, v8, 0 ; ZVE32F-NEXT: ret @@ -34,7 +34,7 @@ ; CHECK-LABEL: buildvec_mask_optsize_nonconst_v1i1: ; CHECK: # %bb.0: ; CHECK-NEXT: andi a0, a0, 1 -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -42,7 +42,7 @@ ; ZVE32F-LABEL: buildvec_mask_optsize_nonconst_v1i1: ; ZVE32F: # %bb.0: ; ZVE32F-NEXT: andi a0, a0, 1 -; ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; ZVE32F-NEXT: vmv.v.x v8, a0 ; ZVE32F-NEXT: vmsne.vi v0, v8, 0 ; ZVE32F-NEXT: ret @@ -53,22 +53,22 @@ define <2 x i1> @buildvec_mask_nonconst_v2i1(i1 %x, i1 %y) { ; CHECK-LABEL: buildvec_mask_nonconst_v2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.x v8, a1 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret ; ; ZVE32F-LABEL: buildvec_mask_nonconst_v2i1: ; ZVE32F: # %bb.0: -; ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; ZVE32F-NEXT: vmv.v.x v8, a1 -; ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, tu, mu +; ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, tu, ma ; ZVE32F-NEXT: vmv.s.x v8, a0 -; ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; ZVE32F-NEXT: vand.vi v8, v8, 1 ; ZVE32F-NEXT: vmsne.vi v0, v8, 0 ; ZVE32F-NEXT: ret @@ -86,7 +86,7 @@ ; CHECK-NEXT: sb a1, 15(sp) ; CHECK-NEXT: sb a0, 14(sp) ; CHECK-NEXT: addi a0, sp, 14 -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -100,7 +100,7 @@ ; ZVE32F-NEXT: sb a1, 15(sp) ; ZVE32F-NEXT: sb a0, 14(sp) ; ZVE32F-NEXT: addi a0, sp, 14 -; ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; ZVE32F-NEXT: vle8.v v8, (a0) ; ZVE32F-NEXT: vand.vi v8, v8, 1 ; ZVE32F-NEXT: vmsne.vi v0, v8, 0 @@ -115,14 +115,14 @@ ; CHECK-LABEL: buildvec_mask_v1i1: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.s.x v0, a0 ; CHECK-NEXT: ret ; ; ZVE32F-LABEL: buildvec_mask_v1i1: ; ZVE32F: # %bb.0: ; ZVE32F-NEXT: li a0, 2 -; ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; ZVE32F-NEXT: vmv.s.x v0, a0 ; ZVE32F-NEXT: ret ret <3 x i1> @@ -132,14 +132,14 @@ ; CHECK-LABEL: buildvec_mask_optsize_v1i1: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.s.x v0, a0 ; CHECK-NEXT: ret ; ; ZVE32F-LABEL: buildvec_mask_optsize_v1i1: ; ZVE32F: # %bb.0: ; ZVE32F-NEXT: li a0, 2 -; ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; ZVE32F-NEXT: vmv.s.x v0, a0 ; ZVE32F-NEXT: ret ret <3 x i1> @@ -149,14 +149,14 @@ ; CHECK-LABEL: buildvec_mask_v4i1: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 6 -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.s.x v0, a0 ; CHECK-NEXT: ret ; ; ZVE32F-LABEL: buildvec_mask_v4i1: ; ZVE32F: # %bb.0: ; ZVE32F-NEXT: li a0, 6 -; ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; ZVE32F-NEXT: vmv.s.x v0, a0 ; ZVE32F-NEXT: ret ret <4 x i1> @@ -166,9 +166,9 @@ ; CHECK-LABEL: buildvec_mask_nonconst_v4i1: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 3 -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.s.x v0, a2 -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.x v8, a1 ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: vand.vi v8, v8, 1 @@ -178,9 +178,9 @@ ; ZVE32F-LABEL: buildvec_mask_nonconst_v4i1: ; ZVE32F: # %bb.0: ; ZVE32F-NEXT: li a2, 3 -; ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; ZVE32F-NEXT: vmv.s.x v0, a2 -; ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; ZVE32F-NEXT: vmv.v.x v8, a1 ; ZVE32F-NEXT: vmerge.vxm v8, v8, a0, v0 ; ZVE32F-NEXT: vand.vi v8, v8, 1 @@ -204,7 +204,7 @@ ; CHECK-NEXT: sb a0, 13(sp) ; CHECK-NEXT: sb a0, 12(sp) ; CHECK-NEXT: addi a0, sp, 12 -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -220,7 +220,7 @@ ; ZVE32F-NEXT: sb a0, 13(sp) ; ZVE32F-NEXT: sb a0, 12(sp) ; ZVE32F-NEXT: addi a0, sp, 12 -; ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; ZVE32F-NEXT: vle8.v v8, (a0) ; ZVE32F-NEXT: vand.vi v8, v8, 1 ; ZVE32F-NEXT: vmsne.vi v0, v8, 0 @@ -244,7 +244,7 @@ ; CHECK-NEXT: sb a0, 13(sp) ; CHECK-NEXT: sb zero, 12(sp) ; CHECK-NEXT: addi a0, sp, 12 -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -261,7 +261,7 @@ ; ZVE32F-NEXT: sb a0, 13(sp) ; ZVE32F-NEXT: sb zero, 12(sp) ; ZVE32F-NEXT: addi a0, sp, 12 -; ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; ZVE32F-NEXT: vle8.v v8, (a0) ; ZVE32F-NEXT: vand.vi v8, v8, 1 ; ZVE32F-NEXT: vmsne.vi v0, v8, 0 @@ -278,14 +278,14 @@ ; CHECK-LABEL: buildvec_mask_v8i1: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 182 -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.s.x v0, a0 ; CHECK-NEXT: ret ; ; ZVE32F-LABEL: buildvec_mask_v8i1: ; ZVE32F: # %bb.0: ; ZVE32F-NEXT: li a0, 182 -; ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; ZVE32F-NEXT: vmv.s.x v0, a0 ; ZVE32F-NEXT: ret ret <8 x i1> @@ -295,9 +295,9 @@ ; CHECK-LABEL: buildvec_mask_nonconst_v8i1: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 19 -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.s.x v0, a2 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v8, a1 ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: vand.vi v8, v8, 1 @@ -307,9 +307,9 @@ ; ZVE32F-LABEL: buildvec_mask_nonconst_v8i1: ; ZVE32F: # %bb.0: ; ZVE32F-NEXT: li a2, 19 -; ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; ZVE32F-NEXT: vmv.s.x v0, a2 -; ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; ZVE32F-NEXT: vmv.v.x v8, a1 ; ZVE32F-NEXT: vmerge.vxm v8, v8, a0, v0 ; ZVE32F-NEXT: vand.vi v8, v8, 1 @@ -341,7 +341,7 @@ ; CHECK-NEXT: sb a0, 9(sp) ; CHECK-NEXT: sb a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -362,7 +362,7 @@ ; ZVE32F-NEXT: sb a0, 9(sp) ; ZVE32F-NEXT: sb a0, 8(sp) ; ZVE32F-NEXT: addi a0, sp, 8 -; ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; ZVE32F-NEXT: vle8.v v8, (a0) ; ZVE32F-NEXT: vand.vi v8, v8, 1 ; ZVE32F-NEXT: vmsne.vi v0, v8, 0 @@ -394,7 +394,7 @@ ; CHECK-NEXT: sb a0, 9(sp) ; CHECK-NEXT: sb a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -415,7 +415,7 @@ ; ZVE32F-NEXT: sb a0, 9(sp) ; ZVE32F-NEXT: sb a0, 8(sp) ; ZVE32F-NEXT: addi a0, sp, 8 -; ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; ZVE32F-NEXT: vle8.v v8, (a0) ; ZVE32F-NEXT: vand.vi v8, v8, 1 ; ZVE32F-NEXT: vmsne.vi v0, v8, 0 @@ -446,7 +446,7 @@ ; CHECK-NEXT: sb a0, 9(sp) ; CHECK-NEXT: sb a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -466,7 +466,7 @@ ; ZVE32F-NEXT: sb a0, 9(sp) ; ZVE32F-NEXT: sb a0, 8(sp) ; ZVE32F-NEXT: addi a0, sp, 8 -; ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; ZVE32F-NEXT: vle8.v v8, (a0) ; ZVE32F-NEXT: vand.vi v8, v8, 1 ; ZVE32F-NEXT: vmsne.vi v0, v8, 0 @@ -487,14 +487,14 @@ ; CHECK-LABEL: buildvec_mask_v10i1: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 949 -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vmv.s.x v0, a0 ; CHECK-NEXT: ret ; ; ZVE32F-LABEL: buildvec_mask_v10i1: ; ZVE32F: # %bb.0: ; ZVE32F-NEXT: li a0, 949 -; ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; ZVE32F-NEXT: vmv.s.x v0, a0 ; ZVE32F-NEXT: ret ret <10 x i1> @@ -505,7 +505,7 @@ ; CHECK-RV32: # %bb.0: ; CHECK-RV32-NEXT: lui a0, 11 ; CHECK-RV32-NEXT: addi a0, a0, 1718 -; CHECK-RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-RV32-NEXT: vmv.s.x v0, a0 ; CHECK-RV32-NEXT: ret ; @@ -513,7 +513,7 @@ ; CHECK-RV64: # %bb.0: ; CHECK-RV64-NEXT: lui a0, 11 ; CHECK-RV64-NEXT: addiw a0, a0, 1718 -; CHECK-RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-RV64-NEXT: vmv.s.x v0, a0 ; CHECK-RV64-NEXT: ret ret <16 x i1> @@ -523,14 +523,14 @@ ; CHECK-LABEL: buildvec_mask_v16i1_undefs: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1722 -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vmv.s.x v0, a0 ; CHECK-NEXT: ret ; ; ZVE32F-LABEL: buildvec_mask_v16i1_undefs: ; ZVE32F: # %bb.0: ; ZVE32F-NEXT: li a0, 1722 -; ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; ZVE32F-NEXT: vmv.s.x v0, a0 ; ZVE32F-NEXT: ret ret <16 x i1> @@ -540,7 +540,7 @@ ; RV32-LMULMAX1-LABEL: buildvec_mask_v32i1: ; RV32-LMULMAX1: # %bb.0: ; RV32-LMULMAX1-NEXT: li a0, 1776 -; RV32-LMULMAX1-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; RV32-LMULMAX1-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; RV32-LMULMAX1-NEXT: vmv.s.x v0, a0 ; RV32-LMULMAX1-NEXT: lui a0, 11 ; RV32-LMULMAX1-NEXT: addi a0, a0, 1718 @@ -550,7 +550,7 @@ ; RV64-LMULMAX1-LABEL: buildvec_mask_v32i1: ; RV64-LMULMAX1: # %bb.0: ; RV64-LMULMAX1-NEXT: li a0, 1776 -; RV64-LMULMAX1-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; RV64-LMULMAX1-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; RV64-LMULMAX1-NEXT: vmv.s.x v0, a0 ; RV64-LMULMAX1-NEXT: lui a0, 11 ; RV64-LMULMAX1-NEXT: addiw a0, a0, 1718 @@ -561,7 +561,7 @@ ; RV32-LMULMAX2: # %bb.0: ; RV32-LMULMAX2-NEXT: lui a0, 748384 ; RV32-LMULMAX2-NEXT: addi a0, a0, 1776 -; RV32-LMULMAX2-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; RV32-LMULMAX2-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV32-LMULMAX2-NEXT: vmv.s.x v0, a0 ; RV32-LMULMAX2-NEXT: ret ; @@ -569,7 +569,7 @@ ; RV64-LMULMAX2: # %bb.0: ; RV64-LMULMAX2-NEXT: lui a0, 748384 ; RV64-LMULMAX2-NEXT: addiw a0, a0, 1776 -; RV64-LMULMAX2-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; RV64-LMULMAX2-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV64-LMULMAX2-NEXT: vmv.s.x v0, a0 ; RV64-LMULMAX2-NEXT: ret ; @@ -577,7 +577,7 @@ ; RV32-LMULMAX4: # %bb.0: ; RV32-LMULMAX4-NEXT: lui a0, 748384 ; RV32-LMULMAX4-NEXT: addi a0, a0, 1776 -; RV32-LMULMAX4-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; RV32-LMULMAX4-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV32-LMULMAX4-NEXT: vmv.s.x v0, a0 ; RV32-LMULMAX4-NEXT: ret ; @@ -585,7 +585,7 @@ ; RV64-LMULMAX4: # %bb.0: ; RV64-LMULMAX4-NEXT: lui a0, 748384 ; RV64-LMULMAX4-NEXT: addiw a0, a0, 1776 -; RV64-LMULMAX4-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; RV64-LMULMAX4-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV64-LMULMAX4-NEXT: vmv.s.x v0, a0 ; RV64-LMULMAX4-NEXT: ret ; @@ -593,7 +593,7 @@ ; RV32-LMULMAX8: # %bb.0: ; RV32-LMULMAX8-NEXT: lui a0, 748384 ; RV32-LMULMAX8-NEXT: addi a0, a0, 1776 -; RV32-LMULMAX8-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; RV32-LMULMAX8-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV32-LMULMAX8-NEXT: vmv.s.x v0, a0 ; RV32-LMULMAX8-NEXT: ret ; @@ -601,7 +601,7 @@ ; RV64-LMULMAX8: # %bb.0: ; RV64-LMULMAX8-NEXT: lui a0, 748384 ; RV64-LMULMAX8-NEXT: addiw a0, a0, 1776 -; RV64-LMULMAX8-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; RV64-LMULMAX8-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV64-LMULMAX8-NEXT: vmv.s.x v0, a0 ; RV64-LMULMAX8-NEXT: ret ret <32 x i1> @@ -611,7 +611,7 @@ ; RV32-LMULMAX1-LABEL: buildvec_mask_v64i1: ; RV32-LMULMAX1: # %bb.0: ; RV32-LMULMAX1-NEXT: li a0, 1776 -; RV32-LMULMAX1-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; RV32-LMULMAX1-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; RV32-LMULMAX1-NEXT: vmv.s.x v0, a0 ; RV32-LMULMAX1-NEXT: lui a0, 4 ; RV32-LMULMAX1-NEXT: addi a0, a0, -1793 @@ -625,7 +625,7 @@ ; RV64-LMULMAX1-LABEL: buildvec_mask_v64i1: ; RV64-LMULMAX1: # %bb.0: ; RV64-LMULMAX1-NEXT: li a0, 1776 -; RV64-LMULMAX1-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; RV64-LMULMAX1-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; RV64-LMULMAX1-NEXT: vmv.s.x v0, a0 ; RV64-LMULMAX1-NEXT: lui a0, 4 ; RV64-LMULMAX1-NEXT: addiw a0, a0, -1793 @@ -640,7 +640,7 @@ ; RV32-LMULMAX2: # %bb.0: ; RV32-LMULMAX2-NEXT: lui a0, 748384 ; RV32-LMULMAX2-NEXT: addi a0, a0, 1776 -; RV32-LMULMAX2-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; RV32-LMULMAX2-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV32-LMULMAX2-NEXT: vmv.s.x v0, a0 ; RV32-LMULMAX2-NEXT: lui a0, 748388 ; RV32-LMULMAX2-NEXT: addi a0, a0, -1793 @@ -651,7 +651,7 @@ ; RV64-LMULMAX2: # %bb.0: ; RV64-LMULMAX2-NEXT: lui a0, 748384 ; RV64-LMULMAX2-NEXT: addiw a0, a0, 1776 -; RV64-LMULMAX2-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; RV64-LMULMAX2-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV64-LMULMAX2-NEXT: vmv.s.x v0, a0 ; RV64-LMULMAX2-NEXT: lui a0, 748388 ; RV64-LMULMAX2-NEXT: addiw a0, a0, -1793 @@ -662,12 +662,12 @@ ; RV32-LMULMAX4: # %bb.0: ; RV32-LMULMAX4-NEXT: lui a0, 748388 ; RV32-LMULMAX4-NEXT: addi a0, a0, -1793 -; RV32-LMULMAX4-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV32-LMULMAX4-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV32-LMULMAX4-NEXT: vmv.s.x v8, a0 ; RV32-LMULMAX4-NEXT: lui a0, 748384 ; RV32-LMULMAX4-NEXT: addi a0, a0, 1776 ; RV32-LMULMAX4-NEXT: vmv.s.x v0, a0 -; RV32-LMULMAX4-NEXT: vsetvli zero, zero, e32, mf2, tu, mu +; RV32-LMULMAX4-NEXT: vsetvli zero, zero, e32, mf2, tu, ma ; RV32-LMULMAX4-NEXT: vslideup.vi v0, v8, 1 ; RV32-LMULMAX4-NEXT: ret ; @@ -675,7 +675,7 @@ ; RV64-LMULMAX4: # %bb.0: ; RV64-LMULMAX4-NEXT: lui a0, %hi(.LCPI19_0) ; RV64-LMULMAX4-NEXT: addi a0, a0, %lo(.LCPI19_0) -; RV64-LMULMAX4-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-LMULMAX4-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-LMULMAX4-NEXT: vlse64.v v0, (a0), zero ; RV64-LMULMAX4-NEXT: ret ; @@ -683,12 +683,12 @@ ; RV32-LMULMAX8: # %bb.0: ; RV32-LMULMAX8-NEXT: lui a0, 748388 ; RV32-LMULMAX8-NEXT: addi a0, a0, -1793 -; RV32-LMULMAX8-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV32-LMULMAX8-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV32-LMULMAX8-NEXT: vmv.s.x v8, a0 ; RV32-LMULMAX8-NEXT: lui a0, 748384 ; RV32-LMULMAX8-NEXT: addi a0, a0, 1776 ; RV32-LMULMAX8-NEXT: vmv.s.x v0, a0 -; RV32-LMULMAX8-NEXT: vsetvli zero, zero, e32, mf2, tu, mu +; RV32-LMULMAX8-NEXT: vsetvli zero, zero, e32, mf2, tu, ma ; RV32-LMULMAX8-NEXT: vslideup.vi v0, v8, 1 ; RV32-LMULMAX8-NEXT: ret ; @@ -696,7 +696,7 @@ ; RV64-LMULMAX8: # %bb.0: ; RV64-LMULMAX8-NEXT: lui a0, %hi(.LCPI19_0) ; RV64-LMULMAX8-NEXT: addi a0, a0, %lo(.LCPI19_0) -; RV64-LMULMAX8-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-LMULMAX8-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-LMULMAX8-NEXT: vlse64.v v0, (a0), zero ; RV64-LMULMAX8-NEXT: ret ret <64 x i1> @@ -706,7 +706,7 @@ ; RV32-LMULMAX1-LABEL: buildvec_mask_v128i1: ; RV32-LMULMAX1: # %bb.0: ; RV32-LMULMAX1-NEXT: li a0, 1776 -; RV32-LMULMAX1-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; RV32-LMULMAX1-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; RV32-LMULMAX1-NEXT: vmv.s.x v0, a0 ; RV32-LMULMAX1-NEXT: lui a0, 11 ; RV32-LMULMAX1-NEXT: addi a0, a0, 1718 @@ -728,7 +728,7 @@ ; RV64-LMULMAX1-LABEL: buildvec_mask_v128i1: ; RV64-LMULMAX1: # %bb.0: ; RV64-LMULMAX1-NEXT: li a0, 1776 -; RV64-LMULMAX1-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; RV64-LMULMAX1-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; RV64-LMULMAX1-NEXT: vmv.s.x v0, a0 ; RV64-LMULMAX1-NEXT: lui a0, 11 ; RV64-LMULMAX1-NEXT: addiw a0, a0, 1718 @@ -751,7 +751,7 @@ ; RV32-LMULMAX2: # %bb.0: ; RV32-LMULMAX2-NEXT: lui a0, 748384 ; RV32-LMULMAX2-NEXT: addi a0, a0, 1776 -; RV32-LMULMAX2-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; RV32-LMULMAX2-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV32-LMULMAX2-NEXT: vmv.s.x v0, a0 ; RV32-LMULMAX2-NEXT: lui a0, 748388 ; RV32-LMULMAX2-NEXT: addi a0, a0, -1793 @@ -768,7 +768,7 @@ ; RV64-LMULMAX2: # %bb.0: ; RV64-LMULMAX2-NEXT: lui a0, 748384 ; RV64-LMULMAX2-NEXT: addiw a0, a0, 1776 -; RV64-LMULMAX2-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; RV64-LMULMAX2-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV64-LMULMAX2-NEXT: vmv.s.x v0, a0 ; RV64-LMULMAX2-NEXT: lui a0, 748388 ; RV64-LMULMAX2-NEXT: addiw a0, a0, -1793 @@ -785,12 +785,12 @@ ; RV32-LMULMAX4: # %bb.0: ; RV32-LMULMAX4-NEXT: lui a0, 748388 ; RV32-LMULMAX4-NEXT: addi a0, a0, -1793 -; RV32-LMULMAX4-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV32-LMULMAX4-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV32-LMULMAX4-NEXT: vmv.s.x v8, a0 ; RV32-LMULMAX4-NEXT: lui a0, 748384 ; RV32-LMULMAX4-NEXT: addi a0, a0, 1776 ; RV32-LMULMAX4-NEXT: vmv.s.x v0, a0 -; RV32-LMULMAX4-NEXT: vsetvli zero, zero, e32, mf2, tu, mu +; RV32-LMULMAX4-NEXT: vsetvli zero, zero, e32, mf2, tu, ma ; RV32-LMULMAX4-NEXT: vslideup.vi v0, v8, 1 ; RV32-LMULMAX4-NEXT: lui a0, 945060 ; RV32-LMULMAX4-NEXT: addi a0, a0, -1793 @@ -805,7 +805,7 @@ ; RV64-LMULMAX4: # %bb.0: ; RV64-LMULMAX4-NEXT: lui a0, %hi(.LCPI20_0) ; RV64-LMULMAX4-NEXT: addi a0, a0, %lo(.LCPI20_0) -; RV64-LMULMAX4-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-LMULMAX4-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-LMULMAX4-NEXT: vlse64.v v0, (a0), zero ; RV64-LMULMAX4-NEXT: lui a0, %hi(.LCPI20_1) ; RV64-LMULMAX4-NEXT: addi a0, a0, %lo(.LCPI20_1) @@ -816,22 +816,22 @@ ; RV32-LMULMAX8: # %bb.0: ; RV32-LMULMAX8-NEXT: lui a0, 748388 ; RV32-LMULMAX8-NEXT: addi a0, a0, -1793 -; RV32-LMULMAX8-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV32-LMULMAX8-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-LMULMAX8-NEXT: vmv.s.x v8, a0 ; RV32-LMULMAX8-NEXT: lui a0, 748384 ; RV32-LMULMAX8-NEXT: addi a0, a0, 1776 ; RV32-LMULMAX8-NEXT: vmv.s.x v0, a0 -; RV32-LMULMAX8-NEXT: vsetivli zero, 2, e32, m1, tu, mu +; RV32-LMULMAX8-NEXT: vsetivli zero, 2, e32, m1, tu, ma ; RV32-LMULMAX8-NEXT: vslideup.vi v0, v8, 1 ; RV32-LMULMAX8-NEXT: lui a0, 551776 ; RV32-LMULMAX8-NEXT: addi a0, a0, 1776 ; RV32-LMULMAX8-NEXT: vmv.s.x v8, a0 -; RV32-LMULMAX8-NEXT: vsetivli zero, 3, e32, m1, tu, mu +; RV32-LMULMAX8-NEXT: vsetivli zero, 3, e32, m1, tu, ma ; RV32-LMULMAX8-NEXT: vslideup.vi v0, v8, 2 ; RV32-LMULMAX8-NEXT: lui a0, 945060 ; RV32-LMULMAX8-NEXT: addi a0, a0, -1793 ; RV32-LMULMAX8-NEXT: vmv.s.x v8, a0 -; RV32-LMULMAX8-NEXT: vsetivli zero, 4, e32, m1, tu, mu +; RV32-LMULMAX8-NEXT: vsetivli zero, 4, e32, m1, tu, ma ; RV32-LMULMAX8-NEXT: vslideup.vi v0, v8, 3 ; RV32-LMULMAX8-NEXT: ret ; @@ -841,10 +841,10 @@ ; RV64-LMULMAX8-NEXT: ld a0, %lo(.LCPI20_0)(a0) ; RV64-LMULMAX8-NEXT: lui a1, %hi(.LCPI20_1) ; RV64-LMULMAX8-NEXT: ld a1, %lo(.LCPI20_1)(a1) -; RV64-LMULMAX8-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-LMULMAX8-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-LMULMAX8-NEXT: vmv.s.x v8, a0 ; RV64-LMULMAX8-NEXT: vmv.s.x v0, a1 -; RV64-LMULMAX8-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; RV64-LMULMAX8-NEXT: vsetvli zero, zero, e64, m1, tu, ma ; RV64-LMULMAX8-NEXT: vslideup.vi v0, v8, 1 ; RV64-LMULMAX8-NEXT: ret ret <128 x i1> @@ -854,7 +854,7 @@ ; RV32-LMULMAX1-LABEL: buildvec_mask_optsize_v128i1: ; RV32-LMULMAX1: # %bb.0: ; RV32-LMULMAX1-NEXT: li a0, 1776 -; RV32-LMULMAX1-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; RV32-LMULMAX1-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; RV32-LMULMAX1-NEXT: vmv.s.x v0, a0 ; RV32-LMULMAX1-NEXT: lui a0, 11 ; RV32-LMULMAX1-NEXT: addi a0, a0, 1718 @@ -876,7 +876,7 @@ ; RV64-LMULMAX1-LABEL: buildvec_mask_optsize_v128i1: ; RV64-LMULMAX1: # %bb.0: ; RV64-LMULMAX1-NEXT: li a0, 1776 -; RV64-LMULMAX1-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; RV64-LMULMAX1-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; RV64-LMULMAX1-NEXT: vmv.s.x v0, a0 ; RV64-LMULMAX1-NEXT: lui a0, 11 ; RV64-LMULMAX1-NEXT: addiw a0, a0, 1718 @@ -899,7 +899,7 @@ ; RV32-LMULMAX2: # %bb.0: ; RV32-LMULMAX2-NEXT: lui a0, 748384 ; RV32-LMULMAX2-NEXT: addi a0, a0, 1776 -; RV32-LMULMAX2-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; RV32-LMULMAX2-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV32-LMULMAX2-NEXT: vmv.s.x v0, a0 ; RV32-LMULMAX2-NEXT: lui a0, 748388 ; RV32-LMULMAX2-NEXT: addi a0, a0, -1793 @@ -916,7 +916,7 @@ ; RV64-LMULMAX2: # %bb.0: ; RV64-LMULMAX2-NEXT: lui a0, 748384 ; RV64-LMULMAX2-NEXT: addiw a0, a0, 1776 -; RV64-LMULMAX2-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; RV64-LMULMAX2-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV64-LMULMAX2-NEXT: vmv.s.x v0, a0 ; RV64-LMULMAX2-NEXT: lui a0, 748388 ; RV64-LMULMAX2-NEXT: addiw a0, a0, -1793 @@ -934,7 +934,7 @@ ; RV32-LMULMAX4-NEXT: lui a0, %hi(.LCPI21_0) ; RV32-LMULMAX4-NEXT: addi a0, a0, %lo(.LCPI21_0) ; RV32-LMULMAX4-NEXT: li a1, 64 -; RV32-LMULMAX4-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; RV32-LMULMAX4-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; RV32-LMULMAX4-NEXT: vlm.v v0, (a0) ; RV32-LMULMAX4-NEXT: lui a0, %hi(.LCPI21_1) ; RV32-LMULMAX4-NEXT: addi a0, a0, %lo(.LCPI21_1) @@ -945,7 +945,7 @@ ; RV64-LMULMAX4: # %bb.0: ; RV64-LMULMAX4-NEXT: lui a0, %hi(.LCPI21_0) ; RV64-LMULMAX4-NEXT: addi a0, a0, %lo(.LCPI21_0) -; RV64-LMULMAX4-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-LMULMAX4-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-LMULMAX4-NEXT: vlse64.v v0, (a0), zero ; RV64-LMULMAX4-NEXT: lui a0, %hi(.LCPI21_1) ; RV64-LMULMAX4-NEXT: addi a0, a0, %lo(.LCPI21_1) @@ -957,7 +957,7 @@ ; RV32-LMULMAX8-NEXT: lui a0, %hi(.LCPI21_0) ; RV32-LMULMAX8-NEXT: addi a0, a0, %lo(.LCPI21_0) ; RV32-LMULMAX8-NEXT: li a1, 128 -; RV32-LMULMAX8-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; RV32-LMULMAX8-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; RV32-LMULMAX8-NEXT: vlm.v v0, (a0) ; RV32-LMULMAX8-NEXT: ret ; @@ -966,7 +966,7 @@ ; RV64-LMULMAX8-NEXT: lui a0, %hi(.LCPI21_0) ; RV64-LMULMAX8-NEXT: addi a0, a0, %lo(.LCPI21_0) ; RV64-LMULMAX8-NEXT: li a1, 128 -; RV64-LMULMAX8-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; RV64-LMULMAX8-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; RV64-LMULMAX8-NEXT: vlm.v v0, (a0) ; RV64-LMULMAX8-NEXT: ret ; @@ -975,7 +975,7 @@ ; ZVE32F-NEXT: lui a0, %hi(.LCPI21_0) ; ZVE32F-NEXT: addi a0, a0, %lo(.LCPI21_0) ; ZVE32F-NEXT: li a1, 128 -; ZVE32F-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; ZVE32F-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; ZVE32F-NEXT: vlm.v v0, (a0) ; ZVE32F-NEXT: ret ret <128 x i1> diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-load-store.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-load-store.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-load-store.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-load-store.ll @@ -7,15 +7,15 @@ define void @load_store_v1i1(<1 x i1>* %x, <1 x i1>* %y) { ; CHECK-LABEL: load_store_v1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 1, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a1) ; CHECK-NEXT: ret @@ -27,15 +27,15 @@ define void @load_store_v2i1(<2 x i1>* %x, <2 x i1>* %y) { ; CHECK-LABEL: load_store_v2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a1) ; CHECK-NEXT: ret @@ -47,15 +47,15 @@ define void @load_store_v4i1(<4 x i1>* %x, <4 x i1>* %y) { ; CHECK-LABEL: load_store_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a1) ; CHECK-NEXT: ret @@ -67,7 +67,7 @@ define void @load_store_v8i1(<8 x i1>* %x, <8 x i1>* %y) { ; CHECK-LABEL: load_store_v8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vlm.v v8, (a0) ; CHECK-NEXT: vsm.v v8, (a1) ; CHECK-NEXT: ret @@ -79,7 +79,7 @@ define void @load_store_v16i1(<16 x i1>* %x, <16 x i1>* %y) { ; CHECK-LABEL: load_store_v16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vlm.v v8, (a0) ; CHECK-NEXT: vsm.v v8, (a1) ; CHECK-NEXT: ret @@ -92,7 +92,7 @@ ; LMULMAX2-LABEL: load_store_v32i1: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: li a2, 32 -; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; LMULMAX2-NEXT: vlm.v v8, (a0) ; LMULMAX2-NEXT: vsm.v v8, (a1) ; LMULMAX2-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-logic.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-logic.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-logic.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-logic.ll @@ -7,7 +7,7 @@ define void @and_v8i1(<8 x i1>* %x, <8 x i1>* %y) { ; CHECK-LABEL: and_v8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vlm.v v8, (a0) ; CHECK-NEXT: vlm.v v9, (a1) ; CHECK-NEXT: vmand.mm v8, v8, v9 @@ -23,7 +23,7 @@ define void @or_v16i1(<16 x i1>* %x, <16 x i1>* %y) { ; CHECK-LABEL: or_v16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vlm.v v8, (a0) ; CHECK-NEXT: vlm.v v9, (a1) ; CHECK-NEXT: vmor.mm v8, v8, v9 @@ -40,7 +40,7 @@ ; CHECK-LABEL: xor_v32i1: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vlm.v v8, (a0) ; CHECK-NEXT: vlm.v v9, (a1) ; CHECK-NEXT: vmxor.mm v8, v8, v9 @@ -57,7 +57,7 @@ ; CHECK-LABEL: not_v64i1: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vlm.v v8, (a0) ; CHECK-NEXT: vmnot.m v8, v8 ; CHECK-NEXT: vsm.v v8, (a0) @@ -72,7 +72,7 @@ define void @andnot_v8i1(<8 x i1>* %x, <8 x i1>* %y) { ; CHECK-LABEL: andnot_v8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vlm.v v8, (a0) ; CHECK-NEXT: vlm.v v9, (a1) ; CHECK-NEXT: vmandn.mm v8, v9, v8 @@ -89,7 +89,7 @@ define void @ornot_v16i1(<16 x i1>* %x, <16 x i1>* %y) { ; CHECK-LABEL: ornot_v16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vlm.v v8, (a0) ; CHECK-NEXT: vlm.v v9, (a1) ; CHECK-NEXT: vmorn.mm v8, v9, v8 @@ -107,7 +107,7 @@ ; CHECK-LABEL: xornot_v32i1: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vlm.v v8, (a0) ; CHECK-NEXT: vlm.v v9, (a1) ; CHECK-NEXT: vmxnor.mm v8, v8, v9 @@ -124,7 +124,7 @@ define void @nand_v8i1(<8 x i1>* %x, <8 x i1>* %y) { ; CHECK-LABEL: nand_v8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vlm.v v8, (a0) ; CHECK-NEXT: vlm.v v9, (a1) ; CHECK-NEXT: vmnand.mm v8, v8, v9 @@ -141,7 +141,7 @@ define void @nor_v16i1(<16 x i1>* %x, <16 x i1>* %y) { ; CHECK-LABEL: nor_v16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vlm.v v8, (a0) ; CHECK-NEXT: vlm.v v9, (a1) ; CHECK-NEXT: vmnor.mm v8, v8, v9 @@ -159,7 +159,7 @@ ; CHECK-LABEL: xnor_v32i1: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vlm.v v8, (a0) ; CHECK-NEXT: vlm.v v9, (a1) ; CHECK-NEXT: vmxnor.mm v8, v8, v9 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll @@ -7,15 +7,15 @@ define void @splat_ones_v1i1(<1 x i1>* %x) { ; CHECK-LABEL: splat_ones_v1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 1, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a0) ; CHECK-NEXT: ret @@ -26,15 +26,15 @@ define void @splat_zeros_v2i1(<2 x i1>* %x) { ; CHECK-LABEL: splat_zeros_v2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmclr.m v0 ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a0) ; CHECK-NEXT: ret @@ -46,16 +46,16 @@ ; CHECK-LABEL: splat_v1i1: ; CHECK: # %bb.0: ; CHECK-NEXT: andi a1, a1, 1 -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.x v8, a1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 1, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a0) ; CHECK-NEXT: ret @@ -70,16 +70,16 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: xor a1, a1, a2 ; CHECK-NEXT: seqz a1, a1 -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.x v8, a1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 1, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a0) ; CHECK-NEXT: ret @@ -93,15 +93,15 @@ define void @splat_ones_v4i1(<4 x i1>* %x) { ; CHECK-LABEL: splat_ones_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a0) ; CHECK-NEXT: ret @@ -113,16 +113,16 @@ ; CHECK-LABEL: splat_v4i1: ; CHECK: # %bb.0: ; CHECK-NEXT: andi a1, a1, 1 -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.x v8, a1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a0) ; CHECK-NEXT: ret @@ -135,7 +135,7 @@ define void @splat_zeros_v8i1(<8 x i1>* %x) { ; CHECK-LABEL: splat_zeros_v8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmclr.m v8 ; CHECK-NEXT: vsm.v v8, (a0) ; CHECK-NEXT: ret @@ -147,7 +147,7 @@ ; CHECK-LABEL: splat_v8i1: ; CHECK: # %bb.0: ; CHECK-NEXT: andi a1, a1, 1 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v8, a1 ; CHECK-NEXT: vmsne.vi v8, v8, 0 ; CHECK-NEXT: vsm.v v8, (a0) @@ -161,7 +161,7 @@ define void @splat_ones_v16i1(<16 x i1>* %x) { ; CHECK-LABEL: splat_ones_v16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmset.m v8 ; CHECK-NEXT: vsm.v v8, (a0) ; CHECK-NEXT: ret @@ -173,7 +173,7 @@ ; CHECK-LABEL: splat_v16i1: ; CHECK: # %bb.0: ; CHECK-NEXT: andi a1, a1, 1 -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.x v8, a1 ; CHECK-NEXT: vmsne.vi v8, v8, 0 ; CHECK-NEXT: vsm.v v8, (a0) @@ -188,14 +188,14 @@ ; LMULMAX2-LABEL: splat_zeros_v32i1: ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: li a1, 32 -; LMULMAX2-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; LMULMAX2-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; LMULMAX2-NEXT: vmclr.m v8 ; LMULMAX2-NEXT: vsm.v v8, (a0) ; LMULMAX2-NEXT: ret ; ; LMULMAX1-RV32-LABEL: splat_zeros_v32i1: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmclr.m v8 ; LMULMAX1-RV32-NEXT: vsm.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a0, a0, 2 @@ -204,7 +204,7 @@ ; ; LMULMAX1-RV64-LABEL: splat_zeros_v32i1: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-RV64-NEXT: vmclr.m v8 ; LMULMAX1-RV64-NEXT: vsm.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a0, a0, 2 @@ -219,7 +219,7 @@ ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: andi a1, a1, 1 ; LMULMAX2-NEXT: li a2, 32 -; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; LMULMAX2-NEXT: vmv.v.x v8, a1 ; LMULMAX2-NEXT: vmsne.vi v10, v8, 0 ; LMULMAX2-NEXT: vsm.v v10, (a0) @@ -228,7 +228,7 @@ ; LMULMAX1-RV32-LABEL: splat_v32i1: ; LMULMAX1-RV32: # %bb.0: ; LMULMAX1-RV32-NEXT: andi a1, a1, 1 -; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.x v8, a1 ; LMULMAX1-RV32-NEXT: vmsne.vi v8, v8, 0 ; LMULMAX1-RV32-NEXT: addi a1, a0, 2 @@ -239,7 +239,7 @@ ; LMULMAX1-RV64-LABEL: splat_v32i1: ; LMULMAX1-RV64: # %bb.0: ; LMULMAX1-RV64-NEXT: andi a1, a1, 1 -; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-RV64-NEXT: vmv.v.x v8, a1 ; LMULMAX1-RV64-NEXT: vmsne.vi v8, v8, 0 ; LMULMAX1-RV64-NEXT: addi a1, a0, 2 @@ -257,7 +257,7 @@ ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: addi a1, a0, 4 ; LMULMAX2-NEXT: li a2, 32 -; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; LMULMAX2-NEXT: vmset.m v8 ; LMULMAX2-NEXT: vsm.v v8, (a1) ; LMULMAX2-NEXT: vsm.v v8, (a0) @@ -265,7 +265,7 @@ ; ; LMULMAX1-RV32-LABEL: splat_ones_v64i1: ; LMULMAX1-RV32: # %bb.0: -; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmset.m v8 ; LMULMAX1-RV32-NEXT: vsm.v v8, (a0) ; LMULMAX1-RV32-NEXT: addi a1, a0, 6 @@ -278,7 +278,7 @@ ; ; LMULMAX1-RV64-LABEL: splat_ones_v64i1: ; LMULMAX1-RV64: # %bb.0: -; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-RV64-NEXT: vmset.m v8 ; LMULMAX1-RV64-NEXT: vsm.v v8, (a0) ; LMULMAX1-RV64-NEXT: addi a1, a0, 6 @@ -297,7 +297,7 @@ ; LMULMAX2: # %bb.0: ; LMULMAX2-NEXT: andi a1, a1, 1 ; LMULMAX2-NEXT: li a2, 32 -; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; LMULMAX2-NEXT: vmv.v.x v8, a1 ; LMULMAX2-NEXT: vmsne.vi v10, v8, 0 ; LMULMAX2-NEXT: addi a1, a0, 4 @@ -308,7 +308,7 @@ ; LMULMAX1-RV32-LABEL: splat_v64i1: ; LMULMAX1-RV32: # %bb.0: ; LMULMAX1-RV32-NEXT: andi a1, a1, 1 -; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-RV32-NEXT: vmv.v.x v8, a1 ; LMULMAX1-RV32-NEXT: vmsne.vi v8, v8, 0 ; LMULMAX1-RV32-NEXT: addi a1, a0, 6 @@ -323,7 +323,7 @@ ; LMULMAX1-RV64-LABEL: splat_v64i1: ; LMULMAX1-RV64: # %bb.0: ; LMULMAX1-RV64-NEXT: andi a1, a1, 1 -; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-RV64-NEXT: vmv.v.x v8, a1 ; LMULMAX1-RV64-NEXT: vmsne.vi v8, v8, 0 ; LMULMAX1-RV64-NEXT: addi a1, a0, 6 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll @@ -34,14 +34,14 @@ ; ; RV64ZVE32F-LABEL: mgather_v1i8: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.v.i v9, 0 ; RV64ZVE32F-NEXT: vmerge.vim v9, v9, 1, v0 ; RV64ZVE32F-NEXT: vmv.x.s a1, v9 ; RV64ZVE32F-NEXT: andi a1, a1, 1 ; RV64ZVE32F-NEXT: beqz a1, .LBB0_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vlse8.v v8, (a0), zero ; RV64ZVE32F-NEXT: .LBB0_2: # %else ; RV64ZVE32F-NEXT: ret @@ -75,7 +75,7 @@ ; ; RV64ZVE32F-LABEL: mgather_v2i8: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v0 ; RV64ZVE32F-NEXT: andi a3, a2, 1 ; RV64ZVE32F-NEXT: bnez a3, .LBB1_3 @@ -86,15 +86,15 @@ ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB1_3: # %cond.load ; RV64ZVE32F-NEXT: lb a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a0 ; RV64ZVE32F-NEXT: andi a0, a2, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB1_2 ; RV64ZVE32F-NEXT: .LBB1_4: # %cond.load1 ; RV64ZVE32F-NEXT: lb a0, 0(a1) -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1 ; RV64ZVE32F-NEXT: ret %v = call <2 x i8> @llvm.masked.gather.v2i8.v2p0i8(<2 x i8*> %ptrs, i32 1, <2 x i1> %m, <2 x i8> %passthru) @@ -106,7 +106,7 @@ ; RV32V: # %bb.0: ; RV32V-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; RV32V-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32V-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; RV32V-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; RV32V-NEXT: vsext.vf2 v8, v9 ; RV32V-NEXT: ret ; @@ -114,7 +114,7 @@ ; RV64V: # %bb.0: ; RV64V-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; RV64V-NEXT: vluxei64.v v9, (zero), v8, v0.t -; RV64V-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; RV64V-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; RV64V-NEXT: vsext.vf2 v8, v9 ; RV64V-NEXT: ret ; @@ -122,31 +122,31 @@ ; RV32ZVE32F: # %bb.0: ; RV32ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu ; RV32ZVE32F-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV32ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV32ZVE32F-NEXT: vsext.vf2 v8, v9 ; RV32ZVE32F-NEXT: ret ; ; RV64ZVE32F-LABEL: mgather_v2i8_sextload_v2i16: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v0 ; RV64ZVE32F-NEXT: andi a3, a2, 1 ; RV64ZVE32F-NEXT: beqz a3, .LBB2_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load ; RV64ZVE32F-NEXT: lb a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a0 ; RV64ZVE32F-NEXT: .LBB2_2: # %else ; RV64ZVE32F-NEXT: andi a0, a2, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB2_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 ; RV64ZVE32F-NEXT: lb a0, 0(a1) -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1 ; RV64ZVE32F-NEXT: .LBB2_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vsext.vf2 v9, v8 ; RV64ZVE32F-NEXT: vmv1r.v v8, v9 ; RV64ZVE32F-NEXT: ret @@ -160,7 +160,7 @@ ; RV32V: # %bb.0: ; RV32V-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; RV32V-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32V-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; RV32V-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; RV32V-NEXT: vzext.vf2 v8, v9 ; RV32V-NEXT: ret ; @@ -168,7 +168,7 @@ ; RV64V: # %bb.0: ; RV64V-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; RV64V-NEXT: vluxei64.v v9, (zero), v8, v0.t -; RV64V-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; RV64V-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; RV64V-NEXT: vzext.vf2 v8, v9 ; RV64V-NEXT: ret ; @@ -176,31 +176,31 @@ ; RV32ZVE32F: # %bb.0: ; RV32ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu ; RV32ZVE32F-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV32ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV32ZVE32F-NEXT: vzext.vf2 v8, v9 ; RV32ZVE32F-NEXT: ret ; ; RV64ZVE32F-LABEL: mgather_v2i8_zextload_v2i16: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v0 ; RV64ZVE32F-NEXT: andi a3, a2, 1 ; RV64ZVE32F-NEXT: beqz a3, .LBB3_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load ; RV64ZVE32F-NEXT: lb a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a0 ; RV64ZVE32F-NEXT: .LBB3_2: # %else ; RV64ZVE32F-NEXT: andi a0, a2, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB3_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 ; RV64ZVE32F-NEXT: lb a0, 0(a1) -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1 ; RV64ZVE32F-NEXT: .LBB3_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vzext.vf2 v9, v8 ; RV64ZVE32F-NEXT: vmv1r.v v8, v9 ; RV64ZVE32F-NEXT: ret @@ -214,7 +214,7 @@ ; RV32V: # %bb.0: ; RV32V-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; RV32V-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32V-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; RV32V-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; RV32V-NEXT: vsext.vf4 v8, v9 ; RV32V-NEXT: ret ; @@ -222,7 +222,7 @@ ; RV64V: # %bb.0: ; RV64V-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; RV64V-NEXT: vluxei64.v v9, (zero), v8, v0.t -; RV64V-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; RV64V-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; RV64V-NEXT: vsext.vf4 v8, v9 ; RV64V-NEXT: ret ; @@ -230,31 +230,31 @@ ; RV32ZVE32F: # %bb.0: ; RV32ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu ; RV32ZVE32F-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vsext.vf4 v8, v9 ; RV32ZVE32F-NEXT: ret ; ; RV64ZVE32F-LABEL: mgather_v2i8_sextload_v2i32: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v0 ; RV64ZVE32F-NEXT: andi a3, a2, 1 ; RV64ZVE32F-NEXT: beqz a3, .LBB4_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load ; RV64ZVE32F-NEXT: lb a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a0 ; RV64ZVE32F-NEXT: .LBB4_2: # %else ; RV64ZVE32F-NEXT: andi a0, a2, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB4_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 ; RV64ZVE32F-NEXT: lb a0, 0(a1) -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1 ; RV64ZVE32F-NEXT: .LBB4_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vsext.vf4 v9, v8 ; RV64ZVE32F-NEXT: vmv.v.v v8, v9 ; RV64ZVE32F-NEXT: ret @@ -268,7 +268,7 @@ ; RV32V: # %bb.0: ; RV32V-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; RV32V-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32V-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; RV32V-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; RV32V-NEXT: vzext.vf4 v8, v9 ; RV32V-NEXT: ret ; @@ -276,7 +276,7 @@ ; RV64V: # %bb.0: ; RV64V-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; RV64V-NEXT: vluxei64.v v9, (zero), v8, v0.t -; RV64V-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; RV64V-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; RV64V-NEXT: vzext.vf4 v8, v9 ; RV64V-NEXT: ret ; @@ -284,31 +284,31 @@ ; RV32ZVE32F: # %bb.0: ; RV32ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu ; RV32ZVE32F-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vzext.vf4 v8, v9 ; RV32ZVE32F-NEXT: ret ; ; RV64ZVE32F-LABEL: mgather_v2i8_zextload_v2i32: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v0 ; RV64ZVE32F-NEXT: andi a3, a2, 1 ; RV64ZVE32F-NEXT: beqz a3, .LBB5_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load ; RV64ZVE32F-NEXT: lb a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a0 ; RV64ZVE32F-NEXT: .LBB5_2: # %else ; RV64ZVE32F-NEXT: andi a0, a2, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB5_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 ; RV64ZVE32F-NEXT: lb a0, 0(a1) -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1 ; RV64ZVE32F-NEXT: .LBB5_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vzext.vf4 v9, v8 ; RV64ZVE32F-NEXT: vmv.v.v v8, v9 ; RV64ZVE32F-NEXT: ret @@ -322,7 +322,7 @@ ; RV32V: # %bb.0: ; RV32V-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; RV32V-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32V-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32V-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; RV32V-NEXT: vsext.vf8 v8, v9 ; RV32V-NEXT: ret ; @@ -330,7 +330,7 @@ ; RV64V: # %bb.0: ; RV64V-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; RV64V-NEXT: vluxei64.v v9, (zero), v8, v0.t -; RV64V-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64V-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; RV64V-NEXT: vsext.vf8 v8, v9 ; RV64V-NEXT: ret ; @@ -338,7 +338,7 @@ ; RV32ZVE32F: # %bb.0: ; RV32ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu ; RV32ZVE32F-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v9, 1 ; RV32ZVE32F-NEXT: vmv.x.s a1, v8 ; RV32ZVE32F-NEXT: srai a2, a1, 31 @@ -352,25 +352,25 @@ ; ; RV64ZVE32F-LABEL: mgather_v2i8_sextload_v2i64: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v0 ; RV64ZVE32F-NEXT: andi a3, a2, 1 ; RV64ZVE32F-NEXT: beqz a3, .LBB6_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load ; RV64ZVE32F-NEXT: lb a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a0 ; RV64ZVE32F-NEXT: .LBB6_2: # %else ; RV64ZVE32F-NEXT: andi a0, a2, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB6_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 ; RV64ZVE32F-NEXT: lb a0, 0(a1) -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1 ; RV64ZVE32F-NEXT: .LBB6_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v9 ; RV64ZVE32F-NEXT: vmv.x.s a0, v8 @@ -385,7 +385,7 @@ ; RV32V: # %bb.0: ; RV32V-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; RV32V-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32V-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32V-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; RV32V-NEXT: vzext.vf8 v8, v9 ; RV32V-NEXT: ret ; @@ -393,7 +393,7 @@ ; RV64V: # %bb.0: ; RV64V-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; RV64V-NEXT: vluxei64.v v9, (zero), v8, v0.t -; RV64V-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64V-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; RV64V-NEXT: vzext.vf8 v8, v9 ; RV64V-NEXT: ret ; @@ -401,7 +401,7 @@ ; RV32ZVE32F: # %bb.0: ; RV32ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu ; RV32ZVE32F-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v9, 1 ; RV32ZVE32F-NEXT: vmv.x.s a1, v8 ; RV32ZVE32F-NEXT: andi a1, a1, 255 @@ -415,25 +415,25 @@ ; ; RV64ZVE32F-LABEL: mgather_v2i8_zextload_v2i64: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v0 ; RV64ZVE32F-NEXT: andi a3, a2, 1 ; RV64ZVE32F-NEXT: beqz a3, .LBB7_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load ; RV64ZVE32F-NEXT: lb a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a0 ; RV64ZVE32F-NEXT: .LBB7_2: # %else ; RV64ZVE32F-NEXT: andi a0, a2, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB7_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 ; RV64ZVE32F-NEXT: lb a0, 0(a1) -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1 ; RV64ZVE32F-NEXT: .LBB7_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a0, v9 ; RV64ZVE32F-NEXT: andi a1, a0, 255 @@ -464,7 +464,7 @@ ; ; RV64ZVE32F-LABEL: mgather_v4i8: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: bnez a2, .LBB8_5 @@ -482,34 +482,34 @@ ; RV64ZVE32F-NEXT: .LBB8_5: # %cond.load ; RV64ZVE32F-NEXT: ld a2, 0(a0) ; RV64ZVE32F-NEXT: lb a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a2 ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB8_2 ; RV64ZVE32F-NEXT: .LBB8_6: # %cond.load1 ; RV64ZVE32F-NEXT: ld a2, 8(a0) ; RV64ZVE32F-NEXT: lb a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1 ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: beqz a2, .LBB8_3 ; RV64ZVE32F-NEXT: .LBB8_7: # %cond.load4 ; RV64ZVE32F-NEXT: ld a2, 16(a0) ; RV64ZVE32F-NEXT: lb a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 3, e8, mf4, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 3, e8, mf4, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 2 ; RV64ZVE32F-NEXT: andi a1, a1, 8 ; RV64ZVE32F-NEXT: beqz a1, .LBB8_4 ; RV64ZVE32F-NEXT: .LBB8_8: # %cond.load7 ; RV64ZVE32F-NEXT: ld a0, 24(a0) ; RV64ZVE32F-NEXT: lb a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 3 ; RV64ZVE32F-NEXT: ret %v = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*> %ptrs, i32 1, <4 x i1> %m, <4 x i8> %passthru) @@ -519,21 +519,21 @@ define <4 x i8> @mgather_truemask_v4i8(<4 x i8*> %ptrs, <4 x i8> %passthru) { ; RV32-LABEL: mgather_truemask_v4i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; RV32-NEXT: vluxei32.v v9, (zero), v8 ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64V-LABEL: mgather_truemask_v4i8: ; RV64V: # %bb.0: -; RV64V-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; RV64V-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; RV64V-NEXT: vluxei64.v v10, (zero), v8 ; RV64V-NEXT: vmv1r.v v8, v10 ; RV64V-NEXT: ret ; ; RV64ZVE32F-LABEL: mgather_truemask_v4i8: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: vmset.m v9 ; RV64ZVE32F-NEXT: vmv.x.s a1, v9 ; RV64ZVE32F-NEXT: beqz zero, .LBB9_5 @@ -551,34 +551,34 @@ ; RV64ZVE32F-NEXT: .LBB9_5: # %cond.load ; RV64ZVE32F-NEXT: ld a2, 0(a0) ; RV64ZVE32F-NEXT: lb a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a2 ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB9_2 ; RV64ZVE32F-NEXT: .LBB9_6: # %cond.load1 ; RV64ZVE32F-NEXT: ld a2, 8(a0) ; RV64ZVE32F-NEXT: lb a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1 ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: beqz a2, .LBB9_3 ; RV64ZVE32F-NEXT: .LBB9_7: # %cond.load4 ; RV64ZVE32F-NEXT: ld a2, 16(a0) ; RV64ZVE32F-NEXT: lb a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 3, e8, mf4, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 3, e8, mf4, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 2 ; RV64ZVE32F-NEXT: andi a1, a1, 8 ; RV64ZVE32F-NEXT: beqz a1, .LBB9_4 ; RV64ZVE32F-NEXT: .LBB9_8: # %cond.load7 ; RV64ZVE32F-NEXT: ld a0, 24(a0) ; RV64ZVE32F-NEXT: lb a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 3 ; RV64ZVE32F-NEXT: ret %mhead = insertelement <4 x i1> poison, i1 1, i32 0 @@ -624,7 +624,7 @@ ; ; RV64ZVE32F-LABEL: mgather_v8i8: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: bnez a2, .LBB11_9 @@ -654,70 +654,70 @@ ; RV64ZVE32F-NEXT: .LBB11_9: # %cond.load ; RV64ZVE32F-NEXT: ld a2, 0(a0) ; RV64ZVE32F-NEXT: lb a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a2 ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB11_2 ; RV64ZVE32F-NEXT: .LBB11_10: # %cond.load1 ; RV64ZVE32F-NEXT: ld a2, 8(a0) ; RV64ZVE32F-NEXT: lb a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1 ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: beqz a2, .LBB11_3 ; RV64ZVE32F-NEXT: .LBB11_11: # %cond.load4 ; RV64ZVE32F-NEXT: ld a2, 16(a0) ; RV64ZVE32F-NEXT: lb a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 3, e8, mf2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 3, e8, mf2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 2 ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: beqz a2, .LBB11_4 ; RV64ZVE32F-NEXT: .LBB11_12: # %cond.load7 ; RV64ZVE32F-NEXT: ld a2, 24(a0) ; RV64ZVE32F-NEXT: lb a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 3 ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB11_5 ; RV64ZVE32F-NEXT: .LBB11_13: # %cond.load10 ; RV64ZVE32F-NEXT: ld a2, 32(a0) ; RV64ZVE32F-NEXT: lb a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 5, e8, mf2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 5, e8, mf2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 4 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB11_6 ; RV64ZVE32F-NEXT: .LBB11_14: # %cond.load13 ; RV64ZVE32F-NEXT: ld a2, 40(a0) ; RV64ZVE32F-NEXT: lb a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 6, e8, mf2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 6, e8, mf2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 5 ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: beqz a2, .LBB11_7 ; RV64ZVE32F-NEXT: .LBB11_15: # %cond.load16 ; RV64ZVE32F-NEXT: ld a2, 48(a0) ; RV64ZVE32F-NEXT: lb a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e8, mf2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 7, e8, mf2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 6 ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB11_8 ; RV64ZVE32F-NEXT: .LBB11_16: # %cond.load19 ; RV64ZVE32F-NEXT: ld a0, 56(a0) ; RV64ZVE32F-NEXT: lb a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 7 ; RV64ZVE32F-NEXT: ret %v = call <8 x i8> @llvm.masked.gather.v8i8.v8p0i8(<8 x i8*> %ptrs, i32 1, <8 x i1> %m, <8 x i8> %passthru) @@ -727,7 +727,7 @@ define <8 x i8> @mgather_baseidx_v8i8(i8* %base, <8 x i8> %idxs, <8 x i1> %m, <8 x i8> %passthru) { ; RV32-LABEL: mgather_baseidx_v8i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v10, v8 ; RV32-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; RV32-NEXT: vluxei32.v v9, (a0), v10, v0.t @@ -736,7 +736,7 @@ ; ; RV64V-LABEL: mgather_baseidx_v8i8: ; RV64V: # %bb.0: -; RV64V-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64V-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64V-NEXT: vsext.vf8 v12, v8 ; RV64V-NEXT: vsetvli zero, zero, e8, mf2, ta, mu ; RV64V-NEXT: vluxei64.v v9, (a0), v12, v0.t @@ -745,7 +745,7 @@ ; ; RV64ZVE32F-LABEL: mgather_baseidx_v8i8: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB12_2 @@ -753,22 +753,22 @@ ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a2 ; RV64ZVE32F-NEXT: .LBB12_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB12_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 1 ; RV64ZVE32F-NEXT: .LBB12_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB12_6 @@ -777,10 +777,10 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: vmv.s.x v11, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 3, e8, mf2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 3, e8, mf2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 2 ; RV64ZVE32F-NEXT: .LBB12_6: # %else5 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 4 ; RV64ZVE32F-NEXT: bnez a2, .LBB12_13 @@ -791,16 +791,16 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB12_10 ; RV64ZVE32F-NEXT: .LBB12_9: # %cond.load13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 6, e8, mf2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 6, e8, mf2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 5 ; RV64ZVE32F-NEXT: .LBB12_10: # %else14 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB12_15 @@ -811,24 +811,24 @@ ; RV64ZVE32F-NEXT: vmv1r.v v8, v9 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB12_13: # %cond.load7 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 3 ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB12_8 ; RV64ZVE32F-NEXT: .LBB12_14: # %cond.load10 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 5, e8, mf2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 5, e8, mf2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 4 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB12_9 @@ -838,18 +838,18 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e8, mf2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 7, e8, mf2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 6 ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB12_12 ; RV64ZVE32F-NEXT: .LBB12_16: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v8 ; RV64ZVE32F-NEXT: add a0, a0, a1 ; RV64ZVE32F-NEXT: lb a0, 0(a0) ; RV64ZVE32F-NEXT: vmv.s.x v8, a0 -; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7 ; RV64ZVE32F-NEXT: vmv1r.v v8, v9 ; RV64ZVE32F-NEXT: ret @@ -884,14 +884,14 @@ ; ; RV64ZVE32F-LABEL: mgather_v1i16: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.v.i v9, 0 ; RV64ZVE32F-NEXT: vmerge.vim v9, v9, 1, v0 ; RV64ZVE32F-NEXT: vmv.x.s a1, v9 ; RV64ZVE32F-NEXT: andi a1, a1, 1 ; RV64ZVE32F-NEXT: beqz a1, .LBB13_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vlse16.v v8, (a0), zero ; RV64ZVE32F-NEXT: .LBB13_2: # %else ; RV64ZVE32F-NEXT: ret @@ -925,7 +925,7 @@ ; ; RV64ZVE32F-LABEL: mgather_v2i16: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v0 ; RV64ZVE32F-NEXT: andi a3, a2, 1 ; RV64ZVE32F-NEXT: bnez a3, .LBB14_3 @@ -936,15 +936,15 @@ ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB14_3: # %cond.load ; RV64ZVE32F-NEXT: lh a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a0 ; RV64ZVE32F-NEXT: andi a0, a2, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB14_2 ; RV64ZVE32F-NEXT: .LBB14_4: # %cond.load1 ; RV64ZVE32F-NEXT: lh a0, 0(a1) -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1 ; RV64ZVE32F-NEXT: ret %v = call <2 x i16> @llvm.masked.gather.v2i16.v2p0i16(<2 x i16*> %ptrs, i32 2, <2 x i1> %m, <2 x i16> %passthru) @@ -956,7 +956,7 @@ ; RV32V: # %bb.0: ; RV32V-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; RV32V-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32V-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; RV32V-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; RV32V-NEXT: vsext.vf2 v8, v9 ; RV32V-NEXT: ret ; @@ -964,7 +964,7 @@ ; RV64V: # %bb.0: ; RV64V-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; RV64V-NEXT: vluxei64.v v9, (zero), v8, v0.t -; RV64V-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; RV64V-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; RV64V-NEXT: vsext.vf2 v8, v9 ; RV64V-NEXT: ret ; @@ -972,31 +972,31 @@ ; RV32ZVE32F: # %bb.0: ; RV32ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu ; RV32ZVE32F-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vsext.vf2 v8, v9 ; RV32ZVE32F-NEXT: ret ; ; RV64ZVE32F-LABEL: mgather_v2i16_sextload_v2i32: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v0 ; RV64ZVE32F-NEXT: andi a3, a2, 1 ; RV64ZVE32F-NEXT: beqz a3, .LBB15_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load ; RV64ZVE32F-NEXT: lh a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a0 ; RV64ZVE32F-NEXT: .LBB15_2: # %else ; RV64ZVE32F-NEXT: andi a0, a2, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB15_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 ; RV64ZVE32F-NEXT: lh a0, 0(a1) -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1 ; RV64ZVE32F-NEXT: .LBB15_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vsext.vf2 v9, v8 ; RV64ZVE32F-NEXT: vmv.v.v v8, v9 ; RV64ZVE32F-NEXT: ret @@ -1010,7 +1010,7 @@ ; RV32V: # %bb.0: ; RV32V-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; RV32V-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32V-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; RV32V-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; RV32V-NEXT: vzext.vf2 v8, v9 ; RV32V-NEXT: ret ; @@ -1018,7 +1018,7 @@ ; RV64V: # %bb.0: ; RV64V-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; RV64V-NEXT: vluxei64.v v9, (zero), v8, v0.t -; RV64V-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; RV64V-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; RV64V-NEXT: vzext.vf2 v8, v9 ; RV64V-NEXT: ret ; @@ -1026,31 +1026,31 @@ ; RV32ZVE32F: # %bb.0: ; RV32ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu ; RV32ZVE32F-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vzext.vf2 v8, v9 ; RV32ZVE32F-NEXT: ret ; ; RV64ZVE32F-LABEL: mgather_v2i16_zextload_v2i32: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v0 ; RV64ZVE32F-NEXT: andi a3, a2, 1 ; RV64ZVE32F-NEXT: beqz a3, .LBB16_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load ; RV64ZVE32F-NEXT: lh a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a0 ; RV64ZVE32F-NEXT: .LBB16_2: # %else ; RV64ZVE32F-NEXT: andi a0, a2, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB16_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 ; RV64ZVE32F-NEXT: lh a0, 0(a1) -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1 ; RV64ZVE32F-NEXT: .LBB16_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vzext.vf2 v9, v8 ; RV64ZVE32F-NEXT: vmv.v.v v8, v9 ; RV64ZVE32F-NEXT: ret @@ -1064,7 +1064,7 @@ ; RV32V: # %bb.0: ; RV32V-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; RV32V-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32V-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32V-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; RV32V-NEXT: vsext.vf4 v8, v9 ; RV32V-NEXT: ret ; @@ -1072,7 +1072,7 @@ ; RV64V: # %bb.0: ; RV64V-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; RV64V-NEXT: vluxei64.v v9, (zero), v8, v0.t -; RV64V-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64V-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; RV64V-NEXT: vsext.vf4 v8, v9 ; RV64V-NEXT: ret ; @@ -1080,7 +1080,7 @@ ; RV32ZVE32F: # %bb.0: ; RV32ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu ; RV32ZVE32F-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v9, 1 ; RV32ZVE32F-NEXT: vmv.x.s a1, v8 ; RV32ZVE32F-NEXT: srai a2, a1, 31 @@ -1094,25 +1094,25 @@ ; ; RV64ZVE32F-LABEL: mgather_v2i16_sextload_v2i64: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v0 ; RV64ZVE32F-NEXT: andi a3, a2, 1 ; RV64ZVE32F-NEXT: beqz a3, .LBB17_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load ; RV64ZVE32F-NEXT: lh a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a0 ; RV64ZVE32F-NEXT: .LBB17_2: # %else ; RV64ZVE32F-NEXT: andi a0, a2, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB17_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 ; RV64ZVE32F-NEXT: lh a0, 0(a1) -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1 ; RV64ZVE32F-NEXT: .LBB17_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v9 ; RV64ZVE32F-NEXT: vmv.x.s a0, v8 @@ -1127,7 +1127,7 @@ ; RV32V: # %bb.0: ; RV32V-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; RV32V-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32V-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32V-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; RV32V-NEXT: vzext.vf4 v8, v9 ; RV32V-NEXT: ret ; @@ -1135,7 +1135,7 @@ ; RV64V: # %bb.0: ; RV64V-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; RV64V-NEXT: vluxei64.v v9, (zero), v8, v0.t -; RV64V-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64V-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; RV64V-NEXT: vzext.vf4 v8, v9 ; RV64V-NEXT: ret ; @@ -1143,7 +1143,7 @@ ; RV32ZVE32F: # %bb.0: ; RV32ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu ; RV32ZVE32F-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v9, 1 ; RV32ZVE32F-NEXT: vmv.x.s a1, v8 ; RV32ZVE32F-NEXT: lui a2, 16 @@ -1159,30 +1159,30 @@ ; ; RV64ZVE32F-LABEL: mgather_v2i16_zextload_v2i64: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v0 ; RV64ZVE32F-NEXT: andi a3, a2, 1 ; RV64ZVE32F-NEXT: beqz a3, .LBB18_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load ; RV64ZVE32F-NEXT: lh a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a0 ; RV64ZVE32F-NEXT: .LBB18_2: # %else ; RV64ZVE32F-NEXT: andi a0, a2, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB18_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 ; RV64ZVE32F-NEXT: lh a0, 0(a1) -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1 ; RV64ZVE32F-NEXT: .LBB18_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a0, v8 ; RV64ZVE32F-NEXT: lui a1, 16 ; RV64ZVE32F-NEXT: addiw a1, a1, -1 ; RV64ZVE32F-NEXT: and a0, a0, a1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: and a1, a2, a1 @@ -1211,7 +1211,7 @@ ; ; RV64ZVE32F-LABEL: mgather_v4i16: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: bnez a2, .LBB19_5 @@ -1229,34 +1229,34 @@ ; RV64ZVE32F-NEXT: .LBB19_5: # %cond.load ; RV64ZVE32F-NEXT: ld a2, 0(a0) ; RV64ZVE32F-NEXT: lh a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, mf2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, mf2, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a2 ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB19_2 ; RV64ZVE32F-NEXT: .LBB19_6: # %cond.load1 ; RV64ZVE32F-NEXT: ld a2, 8(a0) ; RV64ZVE32F-NEXT: lh a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1 ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: beqz a2, .LBB19_3 ; RV64ZVE32F-NEXT: .LBB19_7: # %cond.load4 ; RV64ZVE32F-NEXT: ld a2, 16(a0) ; RV64ZVE32F-NEXT: lh a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 3, e16, mf2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 3, e16, mf2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 2 ; RV64ZVE32F-NEXT: andi a1, a1, 8 ; RV64ZVE32F-NEXT: beqz a1, .LBB19_4 ; RV64ZVE32F-NEXT: .LBB19_8: # %cond.load7 ; RV64ZVE32F-NEXT: ld a0, 24(a0) ; RV64ZVE32F-NEXT: lh a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 3 ; RV64ZVE32F-NEXT: ret %v = call <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*> %ptrs, i32 2, <4 x i1> %m, <4 x i16> %passthru) @@ -1266,21 +1266,21 @@ define <4 x i16> @mgather_truemask_v4i16(<4 x i16*> %ptrs, <4 x i16> %passthru) { ; RV32-LABEL: mgather_truemask_v4i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; RV32-NEXT: vluxei32.v v9, (zero), v8 ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64V-LABEL: mgather_truemask_v4i16: ; RV64V: # %bb.0: -; RV64V-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; RV64V-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; RV64V-NEXT: vluxei64.v v10, (zero), v8 ; RV64V-NEXT: vmv1r.v v8, v10 ; RV64V-NEXT: ret ; ; RV64ZVE32F-LABEL: mgather_truemask_v4i16: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: vmset.m v9 ; RV64ZVE32F-NEXT: vmv.x.s a1, v9 ; RV64ZVE32F-NEXT: beqz zero, .LBB20_5 @@ -1298,34 +1298,34 @@ ; RV64ZVE32F-NEXT: .LBB20_5: # %cond.load ; RV64ZVE32F-NEXT: ld a2, 0(a0) ; RV64ZVE32F-NEXT: lh a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, mf2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, mf2, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a2 ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB20_2 ; RV64ZVE32F-NEXT: .LBB20_6: # %cond.load1 ; RV64ZVE32F-NEXT: ld a2, 8(a0) ; RV64ZVE32F-NEXT: lh a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1 ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: beqz a2, .LBB20_3 ; RV64ZVE32F-NEXT: .LBB20_7: # %cond.load4 ; RV64ZVE32F-NEXT: ld a2, 16(a0) ; RV64ZVE32F-NEXT: lh a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 3, e16, mf2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 3, e16, mf2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 2 ; RV64ZVE32F-NEXT: andi a1, a1, 8 ; RV64ZVE32F-NEXT: beqz a1, .LBB20_4 ; RV64ZVE32F-NEXT: .LBB20_8: # %cond.load7 ; RV64ZVE32F-NEXT: ld a0, 24(a0) ; RV64ZVE32F-NEXT: lh a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 3 ; RV64ZVE32F-NEXT: ret %mhead = insertelement <4 x i1> poison, i1 1, i32 0 @@ -1371,7 +1371,7 @@ ; ; RV64ZVE32F-LABEL: mgather_v8i16: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: bnez a2, .LBB22_9 @@ -1401,70 +1401,70 @@ ; RV64ZVE32F-NEXT: .LBB22_9: # %cond.load ; RV64ZVE32F-NEXT: ld a2, 0(a0) ; RV64ZVE32F-NEXT: lh a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a2 ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB22_2 ; RV64ZVE32F-NEXT: .LBB22_10: # %cond.load1 ; RV64ZVE32F-NEXT: ld a2, 8(a0) ; RV64ZVE32F-NEXT: lh a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1 ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: beqz a2, .LBB22_3 ; RV64ZVE32F-NEXT: .LBB22_11: # %cond.load4 ; RV64ZVE32F-NEXT: ld a2, 16(a0) ; RV64ZVE32F-NEXT: lh a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 3, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 3, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 2 ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: beqz a2, .LBB22_4 ; RV64ZVE32F-NEXT: .LBB22_12: # %cond.load7 ; RV64ZVE32F-NEXT: ld a2, 24(a0) ; RV64ZVE32F-NEXT: lh a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 3 ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB22_5 ; RV64ZVE32F-NEXT: .LBB22_13: # %cond.load10 ; RV64ZVE32F-NEXT: ld a2, 32(a0) ; RV64ZVE32F-NEXT: lh a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 5, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 5, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 4 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB22_6 ; RV64ZVE32F-NEXT: .LBB22_14: # %cond.load13 ; RV64ZVE32F-NEXT: ld a2, 40(a0) ; RV64ZVE32F-NEXT: lh a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 6, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 6, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 5 ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: beqz a2, .LBB22_7 ; RV64ZVE32F-NEXT: .LBB22_15: # %cond.load16 ; RV64ZVE32F-NEXT: ld a2, 48(a0) ; RV64ZVE32F-NEXT: lh a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 6 ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB22_8 ; RV64ZVE32F-NEXT: .LBB22_16: # %cond.load19 ; RV64ZVE32F-NEXT: ld a0, 56(a0) ; RV64ZVE32F-NEXT: lh a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 7 ; RV64ZVE32F-NEXT: ret %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %ptrs, i32 2, <8 x i1> %m, <8 x i16> %passthru) @@ -1474,7 +1474,7 @@ define <8 x i16> @mgather_baseidx_v8i8_v8i16(i16* %base, <8 x i8> %idxs, <8 x i1> %m, <8 x i16> %passthru) { ; RV32-LABEL: mgather_baseidx_v8i8_v8i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v10, v8 ; RV32-NEXT: vadd.vv v10, v10, v10 ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu @@ -1484,7 +1484,7 @@ ; ; RV64V-LABEL: mgather_baseidx_v8i8_v8i16: ; RV64V: # %bb.0: -; RV64V-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64V-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64V-NEXT: vsext.vf8 v12, v8 ; RV64V-NEXT: vadd.vv v12, v12, v12 ; RV64V-NEXT: vsetvli zero, zero, e16, m1, ta, mu @@ -1494,7 +1494,7 @@ ; ; RV64ZVE32F-LABEL: mgather_baseidx_v8i8_v8i16: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB23_2 @@ -1503,24 +1503,24 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lh a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a2 ; RV64ZVE32F-NEXT: .LBB23_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB23_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lh a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 1 ; RV64ZVE32F-NEXT: .LBB23_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB23_6 @@ -1529,12 +1529,12 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lh a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v11, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 3, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 3, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 2 ; RV64ZVE32F-NEXT: .LBB23_6: # %else5 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 4 ; RV64ZVE32F-NEXT: bnez a2, .LBB23_13 @@ -1545,18 +1545,18 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB23_10 ; RV64ZVE32F-NEXT: .LBB23_9: # %cond.load13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lh a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 6, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 6, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 5 ; RV64ZVE32F-NEXT: .LBB23_10: # %else14 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB23_15 @@ -1567,27 +1567,27 @@ ; RV64ZVE32F-NEXT: vmv1r.v v8, v9 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB23_13: # %cond.load7 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lh a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 3 ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB23_8 ; RV64ZVE32F-NEXT: .LBB23_14: # %cond.load10 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lh a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 5, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 5, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 4 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB23_9 @@ -1597,22 +1597,22 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lh a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 6 ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB23_12 ; RV64ZVE32F-NEXT: .LBB23_16: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v8 ; RV64ZVE32F-NEXT: slli a1, a1, 1 ; RV64ZVE32F-NEXT: add a0, a0, a1 ; RV64ZVE32F-NEXT: lh a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7 ; RV64ZVE32F-NEXT: vmv1r.v v8, v9 ; RV64ZVE32F-NEXT: ret @@ -1624,7 +1624,7 @@ define <8 x i16> @mgather_baseidx_sext_v8i8_v8i16(i16* %base, <8 x i8> %idxs, <8 x i1> %m, <8 x i16> %passthru) { ; RV32-LABEL: mgather_baseidx_sext_v8i8_v8i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v10, v8 ; RV32-NEXT: vadd.vv v10, v10, v10 ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu @@ -1634,7 +1634,7 @@ ; ; RV64V-LABEL: mgather_baseidx_sext_v8i8_v8i16: ; RV64V: # %bb.0: -; RV64V-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64V-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64V-NEXT: vsext.vf8 v12, v8 ; RV64V-NEXT: vadd.vv v12, v12, v12 ; RV64V-NEXT: vsetvli zero, zero, e16, m1, ta, mu @@ -1644,7 +1644,7 @@ ; ; RV64ZVE32F-LABEL: mgather_baseidx_sext_v8i8_v8i16: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB24_2 @@ -1653,24 +1653,24 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lh a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a2 ; RV64ZVE32F-NEXT: .LBB24_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB24_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lh a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 1 ; RV64ZVE32F-NEXT: .LBB24_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB24_6 @@ -1679,12 +1679,12 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lh a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v11, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 3, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 3, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 2 ; RV64ZVE32F-NEXT: .LBB24_6: # %else5 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 4 ; RV64ZVE32F-NEXT: bnez a2, .LBB24_13 @@ -1695,18 +1695,18 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB24_10 ; RV64ZVE32F-NEXT: .LBB24_9: # %cond.load13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lh a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 6, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 6, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 5 ; RV64ZVE32F-NEXT: .LBB24_10: # %else14 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB24_15 @@ -1717,27 +1717,27 @@ ; RV64ZVE32F-NEXT: vmv1r.v v8, v9 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB24_13: # %cond.load7 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lh a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 3 ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB24_8 ; RV64ZVE32F-NEXT: .LBB24_14: # %cond.load10 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lh a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 5, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 5, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 4 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB24_9 @@ -1747,22 +1747,22 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lh a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 6 ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB24_12 ; RV64ZVE32F-NEXT: .LBB24_16: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v8 ; RV64ZVE32F-NEXT: slli a1, a1, 1 ; RV64ZVE32F-NEXT: add a0, a0, a1 ; RV64ZVE32F-NEXT: lh a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7 ; RV64ZVE32F-NEXT: vmv1r.v v8, v9 ; RV64ZVE32F-NEXT: ret @@ -1775,7 +1775,7 @@ define <8 x i16> @mgather_baseidx_zext_v8i8_v8i16(i16* %base, <8 x i8> %idxs, <8 x i1> %m, <8 x i16> %passthru) { ; RV32-LABEL: mgather_baseidx_zext_v8i8_v8i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vzext.vf4 v10, v8 ; RV32-NEXT: vadd.vv v10, v10, v10 ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu @@ -1785,7 +1785,7 @@ ; ; RV64V-LABEL: mgather_baseidx_zext_v8i8_v8i16: ; RV64V: # %bb.0: -; RV64V-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64V-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64V-NEXT: vzext.vf8 v12, v8 ; RV64V-NEXT: vadd.vv v12, v12, v12 ; RV64V-NEXT: vsetvli zero, zero, e16, m1, ta, mu @@ -1795,7 +1795,7 @@ ; ; RV64ZVE32F-LABEL: mgather_baseidx_zext_v8i8_v8i16: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB25_2 @@ -1805,25 +1805,25 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lh a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a2 ; RV64ZVE32F-NEXT: .LBB25_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB25_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: andi a2, a2, 255 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lh a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 1 ; RV64ZVE32F-NEXT: .LBB25_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB25_6 @@ -1833,12 +1833,12 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lh a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v11, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 3, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 3, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 2 ; RV64ZVE32F-NEXT: .LBB25_6: # %else5 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 4 ; RV64ZVE32F-NEXT: bnez a2, .LBB25_13 @@ -1849,19 +1849,19 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB25_10 ; RV64ZVE32F-NEXT: .LBB25_9: # %cond.load13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: andi a2, a2, 255 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lh a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 6, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 6, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 5 ; RV64ZVE32F-NEXT: .LBB25_10: # %else14 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB25_15 @@ -1872,29 +1872,29 @@ ; RV64ZVE32F-NEXT: vmv1r.v v8, v9 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB25_13: # %cond.load7 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: andi a2, a2, 255 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lh a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 3 ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB25_8 ; RV64ZVE32F-NEXT: .LBB25_14: # %cond.load10 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: andi a2, a2, 255 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lh a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 5, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 5, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 4 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB25_9 @@ -1905,23 +1905,23 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lh a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 6 ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB25_12 ; RV64ZVE32F-NEXT: .LBB25_16: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v8 ; RV64ZVE32F-NEXT: andi a1, a1, 255 ; RV64ZVE32F-NEXT: slli a1, a1, 1 ; RV64ZVE32F-NEXT: add a0, a0, a1 ; RV64ZVE32F-NEXT: lh a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7 ; RV64ZVE32F-NEXT: vmv1r.v v8, v9 ; RV64ZVE32F-NEXT: ret @@ -1934,7 +1934,7 @@ define <8 x i16> @mgather_baseidx_v8i16(i16* %base, <8 x i16> %idxs, <8 x i1> %m, <8 x i16> %passthru) { ; RV32-LABEL: mgather_baseidx_v8i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf2 v10, v8 ; RV32-NEXT: vadd.vv v10, v10, v10 ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu @@ -1944,7 +1944,7 @@ ; ; RV64V-LABEL: mgather_baseidx_v8i16: ; RV64V: # %bb.0: -; RV64V-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64V-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64V-NEXT: vsext.vf4 v12, v8 ; RV64V-NEXT: vadd.vv v12, v12, v12 ; RV64V-NEXT: vsetvli zero, zero, e16, m1, ta, mu @@ -1954,33 +1954,33 @@ ; ; RV64ZVE32F-LABEL: mgather_baseidx_v8i16: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB26_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lh a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a2 ; RV64ZVE32F-NEXT: .LBB26_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB26_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lh a2, 0(a2) ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 1 ; RV64ZVE32F-NEXT: .LBB26_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB26_6 @@ -1990,10 +1990,10 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lh a2, 0(a2) ; RV64ZVE32F-NEXT: vmv.s.x v11, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 3, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 3, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 2 ; RV64ZVE32F-NEXT: .LBB26_6: # %else5 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 4 ; RV64ZVE32F-NEXT: bnez a2, .LBB26_13 @@ -2004,17 +2004,17 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB26_10 ; RV64ZVE32F-NEXT: .LBB26_9: # %cond.load13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lh a2, 0(a2) ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 6, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 6, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 5 ; RV64ZVE32F-NEXT: .LBB26_10: # %else14 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB26_15 @@ -2025,26 +2025,26 @@ ; RV64ZVE32F-NEXT: vmv1r.v v8, v9 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB26_13: # %cond.load7 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lh a2, 0(a2) ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 3 ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB26_8 ; RV64ZVE32F-NEXT: .LBB26_14: # %cond.load10 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lh a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 5, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 5, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 4 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB26_9 @@ -2055,19 +2055,19 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lh a2, 0(a2) ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 6 ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB26_12 ; RV64ZVE32F-NEXT: .LBB26_16: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v8 ; RV64ZVE32F-NEXT: slli a1, a1, 1 ; RV64ZVE32F-NEXT: add a0, a0, a1 ; RV64ZVE32F-NEXT: lh a0, 0(a0) ; RV64ZVE32F-NEXT: vmv.s.x v8, a0 -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7 ; RV64ZVE32F-NEXT: vmv1r.v v8, v9 ; RV64ZVE32F-NEXT: ret @@ -2102,14 +2102,14 @@ ; ; RV64ZVE32F-LABEL: mgather_v1i32: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.v.i v9, 0 ; RV64ZVE32F-NEXT: vmerge.vim v9, v9, 1, v0 ; RV64ZVE32F-NEXT: vmv.x.s a1, v9 ; RV64ZVE32F-NEXT: andi a1, a1, 1 ; RV64ZVE32F-NEXT: beqz a1, .LBB27_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vlse32.v v8, (a0), zero ; RV64ZVE32F-NEXT: .LBB27_2: # %else ; RV64ZVE32F-NEXT: ret @@ -2143,7 +2143,7 @@ ; ; RV64ZVE32F-LABEL: mgather_v2i32: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v0 ; RV64ZVE32F-NEXT: andi a3, a2, 1 ; RV64ZVE32F-NEXT: bnez a3, .LBB28_3 @@ -2154,15 +2154,15 @@ ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB28_3: # %cond.load ; RV64ZVE32F-NEXT: lw a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a0 ; RV64ZVE32F-NEXT: andi a0, a2, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB28_2 ; RV64ZVE32F-NEXT: .LBB28_4: # %cond.load1 ; RV64ZVE32F-NEXT: lw a0, 0(a1) -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1 ; RV64ZVE32F-NEXT: ret %v = call <2 x i32> @llvm.masked.gather.v2i32.v2p0i32(<2 x i32*> %ptrs, i32 4, <2 x i1> %m, <2 x i32> %passthru) @@ -2174,7 +2174,7 @@ ; RV32V: # %bb.0: ; RV32V-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; RV32V-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32V-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32V-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; RV32V-NEXT: vsext.vf2 v8, v9 ; RV32V-NEXT: ret ; @@ -2182,7 +2182,7 @@ ; RV64V: # %bb.0: ; RV64V-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; RV64V-NEXT: vluxei64.v v9, (zero), v8, v0.t -; RV64V-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64V-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; RV64V-NEXT: vsext.vf2 v8, v9 ; RV64V-NEXT: ret ; @@ -2190,7 +2190,7 @@ ; RV32ZVE32F: # %bb.0: ; RV32ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu ; RV32ZVE32F-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v9, 1 ; RV32ZVE32F-NEXT: vmv.x.s a1, v8 ; RV32ZVE32F-NEXT: srai a1, a1, 31 @@ -2205,25 +2205,25 @@ ; ; RV64ZVE32F-LABEL: mgather_v2i32_sextload_v2i64: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v0 ; RV64ZVE32F-NEXT: andi a3, a2, 1 ; RV64ZVE32F-NEXT: beqz a3, .LBB29_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load ; RV64ZVE32F-NEXT: lw a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a0 ; RV64ZVE32F-NEXT: .LBB29_2: # %else ; RV64ZVE32F-NEXT: andi a0, a2, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB29_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 ; RV64ZVE32F-NEXT: lw a0, 0(a1) -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1 ; RV64ZVE32F-NEXT: .LBB29_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v9 ; RV64ZVE32F-NEXT: vmv.x.s a0, v8 @@ -2238,7 +2238,7 @@ ; RV32V: # %bb.0: ; RV32V-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; RV32V-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32V-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32V-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; RV32V-NEXT: vzext.vf2 v8, v9 ; RV32V-NEXT: ret ; @@ -2246,7 +2246,7 @@ ; RV64V: # %bb.0: ; RV64V-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; RV64V-NEXT: vluxei64.v v9, (zero), v8, v0.t -; RV64V-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64V-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; RV64V-NEXT: vzext.vf2 v8, v9 ; RV64V-NEXT: ret ; @@ -2254,7 +2254,7 @@ ; RV32ZVE32F: # %bb.0: ; RV32ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu ; RV32ZVE32F-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v9, 1 ; RV32ZVE32F-NEXT: sw zero, 12(a0) ; RV32ZVE32F-NEXT: sw zero, 4(a0) @@ -2265,25 +2265,25 @@ ; ; RV64ZVE32F-LABEL: mgather_v2i32_zextload_v2i64: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v0 ; RV64ZVE32F-NEXT: andi a3, a2, 1 ; RV64ZVE32F-NEXT: beqz a3, .LBB30_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load ; RV64ZVE32F-NEXT: lw a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a0 ; RV64ZVE32F-NEXT: .LBB30_2: # %else ; RV64ZVE32F-NEXT: andi a0, a2, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB30_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 ; RV64ZVE32F-NEXT: lw a0, 0(a1) -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1 ; RV64ZVE32F-NEXT: .LBB30_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a0, v9 ; RV64ZVE32F-NEXT: slli a0, a0, 32 @@ -2316,7 +2316,7 @@ ; ; RV64ZVE32F-LABEL: mgather_v4i32: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: bnez a2, .LBB31_5 @@ -2334,34 +2334,34 @@ ; RV64ZVE32F-NEXT: .LBB31_5: # %cond.load ; RV64ZVE32F-NEXT: ld a2, 0(a0) ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a2 ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB31_2 ; RV64ZVE32F-NEXT: .LBB31_6: # %cond.load1 ; RV64ZVE32F-NEXT: ld a2, 8(a0) ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1 ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: beqz a2, .LBB31_3 ; RV64ZVE32F-NEXT: .LBB31_7: # %cond.load4 ; RV64ZVE32F-NEXT: ld a2, 16(a0) ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 2 ; RV64ZVE32F-NEXT: andi a1, a1, 8 ; RV64ZVE32F-NEXT: beqz a1, .LBB31_4 ; RV64ZVE32F-NEXT: .LBB31_8: # %cond.load7 ; RV64ZVE32F-NEXT: ld a0, 24(a0) ; RV64ZVE32F-NEXT: lw a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 3 ; RV64ZVE32F-NEXT: ret %v = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %ptrs, i32 4, <4 x i1> %m, <4 x i32> %passthru) @@ -2371,20 +2371,20 @@ define <4 x i32> @mgather_truemask_v4i32(<4 x i32*> %ptrs, <4 x i32> %passthru) { ; RV32-LABEL: mgather_truemask_v4i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vluxei32.v v8, (zero), v8 ; RV32-NEXT: ret ; ; RV64V-LABEL: mgather_truemask_v4i32: ; RV64V: # %bb.0: -; RV64V-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV64V-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64V-NEXT: vluxei64.v v10, (zero), v8 ; RV64V-NEXT: vmv.v.v v8, v10 ; RV64V-NEXT: ret ; ; RV64ZVE32F-LABEL: mgather_truemask_v4i32: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: vmset.m v9 ; RV64ZVE32F-NEXT: vmv.x.s a1, v9 ; RV64ZVE32F-NEXT: beqz zero, .LBB32_5 @@ -2402,34 +2402,34 @@ ; RV64ZVE32F-NEXT: .LBB32_5: # %cond.load ; RV64ZVE32F-NEXT: ld a2, 0(a0) ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a2 ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB32_2 ; RV64ZVE32F-NEXT: .LBB32_6: # %cond.load1 ; RV64ZVE32F-NEXT: ld a2, 8(a0) ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1 ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: beqz a2, .LBB32_3 ; RV64ZVE32F-NEXT: .LBB32_7: # %cond.load4 ; RV64ZVE32F-NEXT: ld a2, 16(a0) ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 2 ; RV64ZVE32F-NEXT: andi a1, a1, 8 ; RV64ZVE32F-NEXT: beqz a1, .LBB32_4 ; RV64ZVE32F-NEXT: .LBB32_8: # %cond.load7 ; RV64ZVE32F-NEXT: ld a0, 24(a0) ; RV64ZVE32F-NEXT: lw a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 3 ; RV64ZVE32F-NEXT: ret %mhead = insertelement <4 x i1> poison, i1 1, i32 0 @@ -2475,7 +2475,7 @@ ; ; RV64ZVE32F-LABEL: mgather_v8i32: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: bnez a2, .LBB34_9 @@ -2505,70 +2505,70 @@ ; RV64ZVE32F-NEXT: .LBB34_9: # %cond.load ; RV64ZVE32F-NEXT: ld a2, 0(a0) ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a2 ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB34_2 ; RV64ZVE32F-NEXT: .LBB34_10: # %cond.load1 ; RV64ZVE32F-NEXT: ld a2, 8(a0) ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v10, 1 ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: beqz a2, .LBB34_3 ; RV64ZVE32F-NEXT: .LBB34_11: # %cond.load4 ; RV64ZVE32F-NEXT: ld a2, 16(a0) ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v10, 2 ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: beqz a2, .LBB34_4 ; RV64ZVE32F-NEXT: .LBB34_12: # %cond.load7 ; RV64ZVE32F-NEXT: ld a2, 24(a0) ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v10, 3 ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB34_5 ; RV64ZVE32F-NEXT: .LBB34_13: # %cond.load10 ; RV64ZVE32F-NEXT: ld a2, 32(a0) ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v10, 4 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB34_6 ; RV64ZVE32F-NEXT: .LBB34_14: # %cond.load13 ; RV64ZVE32F-NEXT: ld a2, 40(a0) ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v10, 5 ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: beqz a2, .LBB34_7 ; RV64ZVE32F-NEXT: .LBB34_15: # %cond.load16 ; RV64ZVE32F-NEXT: ld a2, 48(a0) ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v10, 6 ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB34_8 ; RV64ZVE32F-NEXT: .LBB34_16: # %cond.load19 ; RV64ZVE32F-NEXT: ld a0, 56(a0) ; RV64ZVE32F-NEXT: lw a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v10, a0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v10, 7 ; RV64ZVE32F-NEXT: ret %v = call <8 x i32> @llvm.masked.gather.v8i32.v8p0i32(<8 x i32*> %ptrs, i32 4, <8 x i1> %m, <8 x i32> %passthru) @@ -2587,7 +2587,7 @@ ; ; RV64V-LABEL: mgather_baseidx_v8i8_v8i32: ; RV64V: # %bb.0: -; RV64V-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64V-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64V-NEXT: vsext.vf8 v12, v8 ; RV64V-NEXT: vsll.vi v12, v12, 2 ; RV64V-NEXT: vsetvli zero, zero, e32, m2, ta, mu @@ -2597,7 +2597,7 @@ ; ; RV64ZVE32F-LABEL: mgather_baseidx_v8i8_v8i32: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB35_2 @@ -2606,24 +2606,24 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 ; RV64ZVE32F-NEXT: .LBB35_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB35_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 1 ; RV64ZVE32F-NEXT: .LBB35_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB35_6 @@ -2632,12 +2632,12 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 2 ; RV64ZVE32F-NEXT: .LBB35_6: # %else5 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 4 ; RV64ZVE32F-NEXT: bnez a2, .LBB35_13 @@ -2648,18 +2648,18 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB35_10 ; RV64ZVE32F-NEXT: .LBB35_9: # %cond.load13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5 ; RV64ZVE32F-NEXT: .LBB35_10: # %else14 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB35_15 @@ -2670,27 +2670,27 @@ ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB35_13: # %cond.load7 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 3 ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB35_8 ; RV64ZVE32F-NEXT: .LBB35_14: # %cond.load10 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB35_9 @@ -2700,22 +2700,22 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB35_12 ; RV64ZVE32F-NEXT: .LBB35_16: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v8 ; RV64ZVE32F-NEXT: slli a1, a1, 2 ; RV64ZVE32F-NEXT: add a0, a0, a1 ; RV64ZVE32F-NEXT: lw a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret @@ -2736,7 +2736,7 @@ ; ; RV64V-LABEL: mgather_baseidx_sext_v8i8_v8i32: ; RV64V: # %bb.0: -; RV64V-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64V-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64V-NEXT: vsext.vf8 v12, v8 ; RV64V-NEXT: vsll.vi v12, v12, 2 ; RV64V-NEXT: vsetvli zero, zero, e32, m2, ta, mu @@ -2746,7 +2746,7 @@ ; ; RV64ZVE32F-LABEL: mgather_baseidx_sext_v8i8_v8i32: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB36_2 @@ -2755,24 +2755,24 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 ; RV64ZVE32F-NEXT: .LBB36_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB36_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 1 ; RV64ZVE32F-NEXT: .LBB36_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB36_6 @@ -2781,12 +2781,12 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 2 ; RV64ZVE32F-NEXT: .LBB36_6: # %else5 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 4 ; RV64ZVE32F-NEXT: bnez a2, .LBB36_13 @@ -2797,18 +2797,18 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB36_10 ; RV64ZVE32F-NEXT: .LBB36_9: # %cond.load13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5 ; RV64ZVE32F-NEXT: .LBB36_10: # %else14 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB36_15 @@ -2819,27 +2819,27 @@ ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB36_13: # %cond.load7 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 3 ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB36_8 ; RV64ZVE32F-NEXT: .LBB36_14: # %cond.load10 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB36_9 @@ -2849,22 +2849,22 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB36_12 ; RV64ZVE32F-NEXT: .LBB36_16: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v8 ; RV64ZVE32F-NEXT: slli a1, a1, 2 ; RV64ZVE32F-NEXT: add a0, a0, a1 ; RV64ZVE32F-NEXT: lw a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret @@ -2886,7 +2886,7 @@ ; ; RV64V-LABEL: mgather_baseidx_zext_v8i8_v8i32: ; RV64V: # %bb.0: -; RV64V-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64V-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64V-NEXT: vzext.vf8 v12, v8 ; RV64V-NEXT: vsll.vi v12, v12, 2 ; RV64V-NEXT: vsetvli zero, zero, e32, m2, ta, mu @@ -2896,7 +2896,7 @@ ; ; RV64ZVE32F-LABEL: mgather_baseidx_zext_v8i8_v8i32: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB37_2 @@ -2906,25 +2906,25 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 ; RV64ZVE32F-NEXT: .LBB37_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB37_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: andi a2, a2, 255 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 1 ; RV64ZVE32F-NEXT: .LBB37_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB37_6 @@ -2934,12 +2934,12 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 2 ; RV64ZVE32F-NEXT: .LBB37_6: # %else5 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 4 ; RV64ZVE32F-NEXT: bnez a2, .LBB37_13 @@ -2950,19 +2950,19 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB37_10 ; RV64ZVE32F-NEXT: .LBB37_9: # %cond.load13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: andi a2, a2, 255 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5 ; RV64ZVE32F-NEXT: .LBB37_10: # %else14 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB37_15 @@ -2973,29 +2973,29 @@ ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB37_13: # %cond.load7 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: andi a2, a2, 255 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 3 ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB37_8 ; RV64ZVE32F-NEXT: .LBB37_14: # %cond.load10 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: andi a2, a2, 255 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB37_9 @@ -3006,23 +3006,23 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB37_12 ; RV64ZVE32F-NEXT: .LBB37_16: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v8 ; RV64ZVE32F-NEXT: andi a1, a1, 255 ; RV64ZVE32F-NEXT: slli a1, a1, 2 ; RV64ZVE32F-NEXT: add a0, a0, a1 ; RV64ZVE32F-NEXT: lw a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret @@ -3044,7 +3044,7 @@ ; ; RV64V-LABEL: mgather_baseidx_v8i16_v8i32: ; RV64V: # %bb.0: -; RV64V-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64V-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64V-NEXT: vsext.vf4 v12, v8 ; RV64V-NEXT: vsll.vi v12, v12, 2 ; RV64V-NEXT: vsetvli zero, zero, e32, m2, ta, mu @@ -3054,34 +3054,34 @@ ; ; RV64ZVE32F-LABEL: mgather_baseidx_v8i16_v8i32: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB38_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 ; RV64ZVE32F-NEXT: .LBB38_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB38_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 1 ; RV64ZVE32F-NEXT: .LBB38_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB38_6 @@ -3090,12 +3090,12 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 2 ; RV64ZVE32F-NEXT: .LBB38_6: # %else5 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 4 ; RV64ZVE32F-NEXT: bnez a2, .LBB38_13 @@ -3106,18 +3106,18 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB38_10 ; RV64ZVE32F-NEXT: .LBB38_9: # %cond.load13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5 ; RV64ZVE32F-NEXT: .LBB38_10: # %else14 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB38_15 @@ -3128,27 +3128,27 @@ ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB38_13: # %cond.load7 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 3 ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB38_8 ; RV64ZVE32F-NEXT: .LBB38_14: # %cond.load10 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB38_9 @@ -3158,22 +3158,22 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB38_12 ; RV64ZVE32F-NEXT: .LBB38_16: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v8 ; RV64ZVE32F-NEXT: slli a1, a1, 2 ; RV64ZVE32F-NEXT: add a0, a0, a1 ; RV64ZVE32F-NEXT: lw a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret @@ -3194,7 +3194,7 @@ ; ; RV64V-LABEL: mgather_baseidx_sext_v8i16_v8i32: ; RV64V: # %bb.0: -; RV64V-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64V-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64V-NEXT: vsext.vf4 v12, v8 ; RV64V-NEXT: vsll.vi v12, v12, 2 ; RV64V-NEXT: vsetvli zero, zero, e32, m2, ta, mu @@ -3204,34 +3204,34 @@ ; ; RV64ZVE32F-LABEL: mgather_baseidx_sext_v8i16_v8i32: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB39_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 ; RV64ZVE32F-NEXT: .LBB39_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB39_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 1 ; RV64ZVE32F-NEXT: .LBB39_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB39_6 @@ -3240,12 +3240,12 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 2 ; RV64ZVE32F-NEXT: .LBB39_6: # %else5 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 4 ; RV64ZVE32F-NEXT: bnez a2, .LBB39_13 @@ -3256,18 +3256,18 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB39_10 ; RV64ZVE32F-NEXT: .LBB39_9: # %cond.load13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5 ; RV64ZVE32F-NEXT: .LBB39_10: # %else14 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB39_15 @@ -3278,27 +3278,27 @@ ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB39_13: # %cond.load7 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 3 ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB39_8 ; RV64ZVE32F-NEXT: .LBB39_14: # %cond.load10 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB39_9 @@ -3308,22 +3308,22 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB39_12 ; RV64ZVE32F-NEXT: .LBB39_16: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v8 ; RV64ZVE32F-NEXT: slli a1, a1, 2 ; RV64ZVE32F-NEXT: add a0, a0, a1 ; RV64ZVE32F-NEXT: lw a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret @@ -3345,7 +3345,7 @@ ; ; RV64V-LABEL: mgather_baseidx_zext_v8i16_v8i32: ; RV64V: # %bb.0: -; RV64V-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64V-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64V-NEXT: vzext.vf4 v12, v8 ; RV64V-NEXT: vsll.vi v12, v12, 2 ; RV64V-NEXT: vsetvli zero, zero, e32, m2, ta, mu @@ -3356,37 +3356,37 @@ ; RV64ZVE32F-LABEL: mgather_baseidx_zext_v8i16_v8i32: ; RV64ZVE32F: # %bb.0: ; RV64ZVE32F-NEXT: lui a1, 16 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v0 ; RV64ZVE32F-NEXT: andi a3, a2, 1 ; RV64ZVE32F-NEXT: addiw a1, a1, -1 ; RV64ZVE32F-NEXT: beqz a3, .LBB40_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a3, v8 ; RV64ZVE32F-NEXT: and a3, a3, a1 ; RV64ZVE32F-NEXT: slli a3, a3, 2 ; RV64ZVE32F-NEXT: add a3, a0, a3 ; RV64ZVE32F-NEXT: lw a3, 0(a3) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v10, a3 ; RV64ZVE32F-NEXT: .LBB40_2: # %else ; RV64ZVE32F-NEXT: andi a3, a2, 2 ; RV64ZVE32F-NEXT: beqz a3, .LBB40_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a3, v9 ; RV64ZVE32F-NEXT: and a3, a3, a1 ; RV64ZVE32F-NEXT: slli a3, a3, 2 ; RV64ZVE32F-NEXT: add a3, a0, a3 ; RV64ZVE32F-NEXT: lw a3, 0(a3) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a3 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 1 ; RV64ZVE32F-NEXT: .LBB40_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a3, a2, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV64ZVE32F-NEXT: beqz a3, .LBB40_6 @@ -3396,12 +3396,12 @@ ; RV64ZVE32F-NEXT: slli a3, a3, 2 ; RV64ZVE32F-NEXT: add a3, a0, a3 ; RV64ZVE32F-NEXT: lw a3, 0(a3) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a3 -; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 2 ; RV64ZVE32F-NEXT: .LBB40_6: # %else5 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma ; RV64ZVE32F-NEXT: andi a3, a2, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 4 ; RV64ZVE32F-NEXT: bnez a3, .LBB40_13 @@ -3412,19 +3412,19 @@ ; RV64ZVE32F-NEXT: andi a3, a2, 32 ; RV64ZVE32F-NEXT: beqz a3, .LBB40_10 ; RV64ZVE32F-NEXT: .LBB40_9: # %cond.load13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a3, v9 ; RV64ZVE32F-NEXT: and a3, a3, a1 ; RV64ZVE32F-NEXT: slli a3, a3, 2 ; RV64ZVE32F-NEXT: add a3, a0, a3 ; RV64ZVE32F-NEXT: lw a3, 0(a3) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a3 -; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5 ; RV64ZVE32F-NEXT: .LBB40_10: # %else14 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a3, a2, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: bnez a3, .LBB40_15 @@ -3435,29 +3435,29 @@ ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB40_13: # %cond.load7 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a3, v9 ; RV64ZVE32F-NEXT: and a3, a3, a1 ; RV64ZVE32F-NEXT: slli a3, a3, 2 ; RV64ZVE32F-NEXT: add a3, a0, a3 ; RV64ZVE32F-NEXT: lw a3, 0(a3) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a3 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 3 ; RV64ZVE32F-NEXT: andi a3, a2, 16 ; RV64ZVE32F-NEXT: beqz a3, .LBB40_8 ; RV64ZVE32F-NEXT: .LBB40_14: # %cond.load10 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a3, v8 ; RV64ZVE32F-NEXT: and a3, a3, a1 ; RV64ZVE32F-NEXT: slli a3, a3, 2 ; RV64ZVE32F-NEXT: add a3, a0, a3 ; RV64ZVE32F-NEXT: lw a3, 0(a3) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a3 -; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4 ; RV64ZVE32F-NEXT: andi a3, a2, 32 ; RV64ZVE32F-NEXT: bnez a3, .LBB40_9 @@ -3468,23 +3468,23 @@ ; RV64ZVE32F-NEXT: slli a3, a3, 2 ; RV64ZVE32F-NEXT: add a3, a0, a3 ; RV64ZVE32F-NEXT: lw a3, 0(a3) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a3 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 ; RV64ZVE32F-NEXT: andi a2, a2, -128 ; RV64ZVE32F-NEXT: beqz a2, .LBB40_12 ; RV64ZVE32F-NEXT: .LBB40_16: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: and a1, a2, a1 ; RV64ZVE32F-NEXT: slli a1, a1, 2 ; RV64ZVE32F-NEXT: add a0, a0, a1 ; RV64ZVE32F-NEXT: lw a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret @@ -3505,7 +3505,7 @@ ; ; RV64V-LABEL: mgather_baseidx_v8i32: ; RV64V: # %bb.0: -; RV64V-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64V-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64V-NEXT: vsext.vf2 v12, v8 ; RV64V-NEXT: vsll.vi v12, v12, 2 ; RV64V-NEXT: vsetvli zero, zero, e32, m2, ta, mu @@ -3515,35 +3515,35 @@ ; ; RV64ZVE32F-LABEL: mgather_baseidx_v8i32: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB41_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 ; RV64ZVE32F-NEXT: .LBB41_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB41_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v12 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 1 ; RV64ZVE32F-NEXT: .LBB41_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 4 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB41_12 @@ -3557,17 +3557,17 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB41_9 ; RV64ZVE32F-NEXT: .LBB41_8: # %cond.load13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v12, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) ; RV64ZVE32F-NEXT: vmv.s.x v8, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5 ; RV64ZVE32F-NEXT: .LBB41_9: # %else14 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v12, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB41_15 @@ -3583,31 +3583,31 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) ; RV64ZVE32F-NEXT: vmv.s.x v14, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 2 ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: beqz a2, .LBB41_6 ; RV64ZVE32F-NEXT: .LBB41_13: # %cond.load7 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) ; RV64ZVE32F-NEXT: vmv.s.x v8, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 3 ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB41_7 ; RV64ZVE32F-NEXT: .LBB41_14: # %cond.load10 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v12 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 4 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB41_8 @@ -3618,19 +3618,19 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lw a2, 0(a2) ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB41_11 ; RV64ZVE32F-NEXT: .LBB41_16: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v8 ; RV64ZVE32F-NEXT: slli a1, a1, 2 ; RV64ZVE32F-NEXT: add a0, a0, a1 ; RV64ZVE32F-NEXT: lw a0, 0(a0) ; RV64ZVE32F-NEXT: vmv.s.x v8, a0 -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret @@ -3658,14 +3658,14 @@ ; ; RV32ZVE32F-LABEL: mgather_v1i64: ; RV32ZVE32F: # %bb.0: -; RV32ZVE32F-NEXT: vsetvli a2, zero, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetvli a2, zero, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.v.i v9, 0 ; RV32ZVE32F-NEXT: vmerge.vim v9, v9, 1, v0 ; RV32ZVE32F-NEXT: vmv.x.s a2, v9 ; RV32ZVE32F-NEXT: andi a2, a2, 1 ; RV32ZVE32F-NEXT: beqz a2, .LBB42_2 ; RV32ZVE32F-NEXT: # %bb.1: # %cond.load -; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a0, v8 ; RV32ZVE32F-NEXT: lw a1, 4(a0) ; RV32ZVE32F-NEXT: lw a0, 0(a0) @@ -3674,7 +3674,7 @@ ; ; RV64ZVE32F-LABEL: mgather_v1i64: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetvli a2, zero, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetvli a2, zero, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.v.i v8, 0 ; RV64ZVE32F-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 @@ -3708,12 +3708,12 @@ ; ; RV32ZVE32F-LABEL: mgather_v2i64: ; RV32ZVE32F: # %bb.0: -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a4, v0 ; RV32ZVE32F-NEXT: andi a2, a4, 1 ; RV32ZVE32F-NEXT: beqz a2, .LBB43_3 ; RV32ZVE32F-NEXT: # %bb.1: # %cond.load -; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a3, v8 ; RV32ZVE32F-NEXT: lw a2, 4(a3) ; RV32ZVE32F-NEXT: lw a3, 0(a3) @@ -3729,7 +3729,7 @@ ; RV32ZVE32F-NEXT: andi a4, a4, 2 ; RV32ZVE32F-NEXT: beqz a4, .LBB43_2 ; RV32ZVE32F-NEXT: .LBB43_4: # %cond.load1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a1, v8 ; RV32ZVE32F-NEXT: lw a4, 4(a1) @@ -3743,7 +3743,7 @@ ; ; RV64ZVE32F-LABEL: mgather_v2i64: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a4, v0 ; RV64ZVE32F-NEXT: andi a5, a4, 1 ; RV64ZVE32F-NEXT: beqz a5, .LBB43_2 @@ -3781,12 +3781,12 @@ ; ; RV32ZVE32F-LABEL: mgather_v4i64: ; RV32ZVE32F: # %bb.0: -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a6, v0 ; RV32ZVE32F-NEXT: andi a2, a6, 1 ; RV32ZVE32F-NEXT: beqz a2, .LBB44_5 ; RV32ZVE32F-NEXT: # %bb.1: # %cond.load -; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a3, v8 ; RV32ZVE32F-NEXT: lw a2, 4(a3) ; RV32ZVE32F-NEXT: lw a3, 0(a3) @@ -3812,7 +3812,7 @@ ; RV32ZVE32F-NEXT: andi a4, a6, 2 ; RV32ZVE32F-NEXT: beqz a4, .LBB44_2 ; RV32ZVE32F-NEXT: .LBB44_6: # %cond.load1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a5, v9 ; RV32ZVE32F-NEXT: lw a4, 4(a5) @@ -3820,7 +3820,7 @@ ; RV32ZVE32F-NEXT: andi a7, a6, 4 ; RV32ZVE32F-NEXT: beqz a7, .LBB44_3 ; RV32ZVE32F-NEXT: .LBB44_7: # %cond.load4 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s t0, v9 ; RV32ZVE32F-NEXT: lw a7, 4(t0) @@ -3828,7 +3828,7 @@ ; RV32ZVE32F-NEXT: andi a6, a6, 8 ; RV32ZVE32F-NEXT: beqz a6, .LBB44_4 ; RV32ZVE32F-NEXT: .LBB44_8: # %cond.load7 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s a1, v8 ; RV32ZVE32F-NEXT: lw a6, 4(a1) @@ -3846,7 +3846,7 @@ ; ; RV64ZVE32F-LABEL: mgather_v4i64: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a5, v0 ; RV64ZVE32F-NEXT: andi a3, a5, 1 ; RV64ZVE32F-NEXT: beqz a3, .LBB44_5 @@ -3896,25 +3896,25 @@ define <4 x i64> @mgather_truemask_v4i64(<4 x i64*> %ptrs, <4 x i64> %passthru) { ; RV32V-LABEL: mgather_truemask_v4i64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32V-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32V-NEXT: vluxei32.v v10, (zero), v8 ; RV32V-NEXT: vmv.v.v v8, v10 ; RV32V-NEXT: ret ; ; RV64V-LABEL: mgather_truemask_v4i64: ; RV64V: # %bb.0: -; RV64V-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64V-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64V-NEXT: vluxei64.v v8, (zero), v8 ; RV64V-NEXT: ret ; ; RV32ZVE32F-LABEL: mgather_truemask_v4i64: ; RV32ZVE32F: # %bb.0: -; RV32ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV32ZVE32F-NEXT: vmset.m v9 ; RV32ZVE32F-NEXT: vmv.x.s a6, v9 ; RV32ZVE32F-NEXT: bnez zero, .LBB45_5 ; RV32ZVE32F-NEXT: # %bb.1: # %cond.load -; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a3, v8 ; RV32ZVE32F-NEXT: lw a2, 4(a3) ; RV32ZVE32F-NEXT: lw a3, 0(a3) @@ -3940,7 +3940,7 @@ ; RV32ZVE32F-NEXT: andi a4, a6, 2 ; RV32ZVE32F-NEXT: beqz a4, .LBB45_2 ; RV32ZVE32F-NEXT: .LBB45_6: # %cond.load1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a5, v9 ; RV32ZVE32F-NEXT: lw a4, 4(a5) @@ -3948,7 +3948,7 @@ ; RV32ZVE32F-NEXT: andi a7, a6, 4 ; RV32ZVE32F-NEXT: beqz a7, .LBB45_3 ; RV32ZVE32F-NEXT: .LBB45_7: # %cond.load4 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s t0, v9 ; RV32ZVE32F-NEXT: lw a7, 4(t0) @@ -3956,7 +3956,7 @@ ; RV32ZVE32F-NEXT: andi a6, a6, 8 ; RV32ZVE32F-NEXT: beqz a6, .LBB45_4 ; RV32ZVE32F-NEXT: .LBB45_8: # %cond.load7 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s a1, v8 ; RV32ZVE32F-NEXT: lw a6, 4(a1) @@ -3974,7 +3974,7 @@ ; ; RV64ZVE32F-LABEL: mgather_truemask_v4i64: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: vmset.m v8 ; RV64ZVE32F-NEXT: vmv.x.s a5, v8 ; RV64ZVE32F-NEXT: bnez zero, .LBB45_5 @@ -4094,12 +4094,12 @@ ; RV32ZVE32F-NEXT: sw s1, 8(sp) # 4-byte Folded Spill ; RV32ZVE32F-NEXT: .cfi_offset s0, -4 ; RV32ZVE32F-NEXT: .cfi_offset s1, -8 -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s t0, v0 ; RV32ZVE32F-NEXT: andi a2, t0, 1 ; RV32ZVE32F-NEXT: beqz a2, .LBB47_9 ; RV32ZVE32F-NEXT: # %bb.1: # %cond.load -; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a3, v8 ; RV32ZVE32F-NEXT: lw a2, 4(a3) ; RV32ZVE32F-NEXT: lw a3, 0(a3) @@ -4145,7 +4145,7 @@ ; RV32ZVE32F-NEXT: andi a4, t0, 2 ; RV32ZVE32F-NEXT: beqz a4, .LBB47_2 ; RV32ZVE32F-NEXT: .LBB47_10: # %cond.load1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a5, v10 ; RV32ZVE32F-NEXT: lw a4, 4(a5) @@ -4153,7 +4153,7 @@ ; RV32ZVE32F-NEXT: andi a6, t0, 4 ; RV32ZVE32F-NEXT: beqz a6, .LBB47_3 ; RV32ZVE32F-NEXT: .LBB47_11: # %cond.load4 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s a7, v10 ; RV32ZVE32F-NEXT: lw a6, 4(a7) @@ -4161,7 +4161,7 @@ ; RV32ZVE32F-NEXT: andi t1, t0, 8 ; RV32ZVE32F-NEXT: beqz t1, .LBB47_4 ; RV32ZVE32F-NEXT: .LBB47_12: # %cond.load7 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s t2, v10 ; RV32ZVE32F-NEXT: lw t1, 4(t2) @@ -4169,7 +4169,7 @@ ; RV32ZVE32F-NEXT: andi t3, t0, 16 ; RV32ZVE32F-NEXT: beqz t3, .LBB47_5 ; RV32ZVE32F-NEXT: .LBB47_13: # %cond.load10 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV32ZVE32F-NEXT: vmv.x.s t4, v10 ; RV32ZVE32F-NEXT: lw t3, 4(t4) @@ -4177,7 +4177,7 @@ ; RV32ZVE32F-NEXT: andi t5, t0, 32 ; RV32ZVE32F-NEXT: beqz t5, .LBB47_6 ; RV32ZVE32F-NEXT: .LBB47_14: # %cond.load13 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV32ZVE32F-NEXT: vmv.x.s t6, v10 ; RV32ZVE32F-NEXT: lw t5, 4(t6) @@ -4185,7 +4185,7 @@ ; RV32ZVE32F-NEXT: andi s0, t0, 64 ; RV32ZVE32F-NEXT: beqz s0, .LBB47_7 ; RV32ZVE32F-NEXT: .LBB47_15: # %cond.load16 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV32ZVE32F-NEXT: vmv.x.s s1, v10 ; RV32ZVE32F-NEXT: lw s0, 4(s1) @@ -4193,7 +4193,7 @@ ; RV32ZVE32F-NEXT: andi t0, t0, -128 ; RV32ZVE32F-NEXT: beqz t0, .LBB47_8 ; RV32ZVE32F-NEXT: .LBB47_16: # %cond.load19 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV32ZVE32F-NEXT: vmv.x.s a1, v8 ; RV32ZVE32F-NEXT: lw t0, 4(a1) @@ -4222,7 +4222,7 @@ ; ; RV64ZVE32F-LABEL: mgather_v8i64: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a6, v0 ; RV64ZVE32F-NEXT: andi a3, a6, 1 ; RV64ZVE32F-NEXT: beqz a3, .LBB47_9 @@ -4312,7 +4312,7 @@ define <8 x i64> @mgather_baseidx_v8i8_v8i64(i64* %base, <8 x i8> %idxs, <8 x i1> %m, <8 x i64> %passthru) { ; RV32V-LABEL: mgather_baseidx_v8i8_v8i64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32V-NEXT: vsext.vf4 v10, v8 ; RV32V-NEXT: vsll.vi v8, v10, 3 ; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu @@ -4337,16 +4337,16 @@ ; RV32ZVE32F-NEXT: sw s1, 8(sp) # 4-byte Folded Spill ; RV32ZVE32F-NEXT: .cfi_offset s0, -4 ; RV32ZVE32F-NEXT: .cfi_offset s1, -8 -; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vsext.vf4 v10, v8 ; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3 ; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1 -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s t0, v0 ; RV32ZVE32F-NEXT: andi a1, t0, 1 ; RV32ZVE32F-NEXT: beqz a1, .LBB48_9 ; RV32ZVE32F-NEXT: # %bb.1: # %cond.load -; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a3, v8 ; RV32ZVE32F-NEXT: lw a1, 4(a3) ; RV32ZVE32F-NEXT: lw a3, 0(a3) @@ -4392,7 +4392,7 @@ ; RV32ZVE32F-NEXT: andi a4, t0, 2 ; RV32ZVE32F-NEXT: beqz a4, .LBB48_2 ; RV32ZVE32F-NEXT: .LBB48_10: # %cond.load1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a5, v10 ; RV32ZVE32F-NEXT: lw a4, 4(a5) @@ -4400,7 +4400,7 @@ ; RV32ZVE32F-NEXT: andi a6, t0, 4 ; RV32ZVE32F-NEXT: beqz a6, .LBB48_3 ; RV32ZVE32F-NEXT: .LBB48_11: # %cond.load4 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s a7, v10 ; RV32ZVE32F-NEXT: lw a6, 4(a7) @@ -4408,7 +4408,7 @@ ; RV32ZVE32F-NEXT: andi t1, t0, 8 ; RV32ZVE32F-NEXT: beqz t1, .LBB48_4 ; RV32ZVE32F-NEXT: .LBB48_12: # %cond.load7 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s t2, v10 ; RV32ZVE32F-NEXT: lw t1, 4(t2) @@ -4416,7 +4416,7 @@ ; RV32ZVE32F-NEXT: andi t3, t0, 16 ; RV32ZVE32F-NEXT: beqz t3, .LBB48_5 ; RV32ZVE32F-NEXT: .LBB48_13: # %cond.load10 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV32ZVE32F-NEXT: vmv.x.s t4, v10 ; RV32ZVE32F-NEXT: lw t3, 4(t4) @@ -4424,7 +4424,7 @@ ; RV32ZVE32F-NEXT: andi t5, t0, 32 ; RV32ZVE32F-NEXT: beqz t5, .LBB48_6 ; RV32ZVE32F-NEXT: .LBB48_14: # %cond.load13 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV32ZVE32F-NEXT: vmv.x.s t6, v10 ; RV32ZVE32F-NEXT: lw t5, 4(t6) @@ -4432,7 +4432,7 @@ ; RV32ZVE32F-NEXT: andi s0, t0, 64 ; RV32ZVE32F-NEXT: beqz s0, .LBB48_7 ; RV32ZVE32F-NEXT: .LBB48_15: # %cond.load16 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV32ZVE32F-NEXT: vmv.x.s s1, v10 ; RV32ZVE32F-NEXT: lw s0, 4(s1) @@ -4440,7 +4440,7 @@ ; RV32ZVE32F-NEXT: andi t0, t0, -128 ; RV32ZVE32F-NEXT: beqz t0, .LBB48_8 ; RV32ZVE32F-NEXT: .LBB48_16: # %cond.load19 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV32ZVE32F-NEXT: vmv.x.s a2, v8 ; RV32ZVE32F-NEXT: lw t0, 4(a2) @@ -4469,7 +4469,7 @@ ; ; RV64ZVE32F-LABEL: mgather_baseidx_v8i8_v8i64: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a6, v0 ; RV64ZVE32F-NEXT: andi a3, a6, 1 ; RV64ZVE32F-NEXT: beqz a3, .LBB48_3 @@ -4488,14 +4488,14 @@ ; RV64ZVE32F-NEXT: andi a4, a6, 2 ; RV64ZVE32F-NEXT: beqz a4, .LBB48_2 ; RV64ZVE32F-NEXT: .LBB48_4: # %cond.load1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a4, v9 ; RV64ZVE32F-NEXT: slli a4, a4, 3 ; RV64ZVE32F-NEXT: add a4, a1, a4 ; RV64ZVE32F-NEXT: ld a4, 0(a4) ; RV64ZVE32F-NEXT: .LBB48_5: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a5, a6, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV64ZVE32F-NEXT: beqz a5, .LBB48_7 @@ -4508,12 +4508,12 @@ ; RV64ZVE32F-NEXT: .LBB48_7: ; RV64ZVE32F-NEXT: ld a5, 16(a2) ; RV64ZVE32F-NEXT: .LBB48_8: # %else5 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a7, a6, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 4 ; RV64ZVE32F-NEXT: beqz a7, .LBB48_12 ; RV64ZVE32F-NEXT: # %bb.9: # %cond.load7 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a7, v9 ; RV64ZVE32F-NEXT: slli a7, a7, 3 @@ -4533,7 +4533,7 @@ ; RV64ZVE32F-NEXT: andi t0, a6, 16 ; RV64ZVE32F-NEXT: beqz t0, .LBB48_10 ; RV64ZVE32F-NEXT: .LBB48_13: # %cond.load10 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s t0, v8 ; RV64ZVE32F-NEXT: slli t0, t0, 3 ; RV64ZVE32F-NEXT: add t0, a1, t0 @@ -4541,14 +4541,14 @@ ; RV64ZVE32F-NEXT: andi t1, a6, 32 ; RV64ZVE32F-NEXT: beqz t1, .LBB48_11 ; RV64ZVE32F-NEXT: .LBB48_14: # %cond.load13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s t1, v9 ; RV64ZVE32F-NEXT: slli t1, t1, 3 ; RV64ZVE32F-NEXT: add t1, a1, t1 ; RV64ZVE32F-NEXT: ld t1, 0(t1) ; RV64ZVE32F-NEXT: .LBB48_15: # %else14 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi t2, a6, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: beqz t2, .LBB48_18 @@ -4567,7 +4567,7 @@ ; RV64ZVE32F-NEXT: andi a6, a6, -128 ; RV64ZVE32F-NEXT: beqz a6, .LBB48_17 ; RV64ZVE32F-NEXT: .LBB48_19: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 3 @@ -4591,7 +4591,7 @@ define <8 x i64> @mgather_baseidx_sext_v8i8_v8i64(i64* %base, <8 x i8> %idxs, <8 x i1> %m, <8 x i64> %passthru) { ; RV32V-LABEL: mgather_baseidx_sext_v8i8_v8i64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32V-NEXT: vsext.vf4 v10, v8 ; RV32V-NEXT: vsll.vi v8, v10, 3 ; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu @@ -4616,16 +4616,16 @@ ; RV32ZVE32F-NEXT: sw s1, 8(sp) # 4-byte Folded Spill ; RV32ZVE32F-NEXT: .cfi_offset s0, -4 ; RV32ZVE32F-NEXT: .cfi_offset s1, -8 -; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vsext.vf4 v10, v8 ; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3 ; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1 -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s t0, v0 ; RV32ZVE32F-NEXT: andi a1, t0, 1 ; RV32ZVE32F-NEXT: beqz a1, .LBB49_9 ; RV32ZVE32F-NEXT: # %bb.1: # %cond.load -; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a3, v8 ; RV32ZVE32F-NEXT: lw a1, 4(a3) ; RV32ZVE32F-NEXT: lw a3, 0(a3) @@ -4671,7 +4671,7 @@ ; RV32ZVE32F-NEXT: andi a4, t0, 2 ; RV32ZVE32F-NEXT: beqz a4, .LBB49_2 ; RV32ZVE32F-NEXT: .LBB49_10: # %cond.load1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a5, v10 ; RV32ZVE32F-NEXT: lw a4, 4(a5) @@ -4679,7 +4679,7 @@ ; RV32ZVE32F-NEXT: andi a6, t0, 4 ; RV32ZVE32F-NEXT: beqz a6, .LBB49_3 ; RV32ZVE32F-NEXT: .LBB49_11: # %cond.load4 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s a7, v10 ; RV32ZVE32F-NEXT: lw a6, 4(a7) @@ -4687,7 +4687,7 @@ ; RV32ZVE32F-NEXT: andi t1, t0, 8 ; RV32ZVE32F-NEXT: beqz t1, .LBB49_4 ; RV32ZVE32F-NEXT: .LBB49_12: # %cond.load7 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s t2, v10 ; RV32ZVE32F-NEXT: lw t1, 4(t2) @@ -4695,7 +4695,7 @@ ; RV32ZVE32F-NEXT: andi t3, t0, 16 ; RV32ZVE32F-NEXT: beqz t3, .LBB49_5 ; RV32ZVE32F-NEXT: .LBB49_13: # %cond.load10 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV32ZVE32F-NEXT: vmv.x.s t4, v10 ; RV32ZVE32F-NEXT: lw t3, 4(t4) @@ -4703,7 +4703,7 @@ ; RV32ZVE32F-NEXT: andi t5, t0, 32 ; RV32ZVE32F-NEXT: beqz t5, .LBB49_6 ; RV32ZVE32F-NEXT: .LBB49_14: # %cond.load13 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV32ZVE32F-NEXT: vmv.x.s t6, v10 ; RV32ZVE32F-NEXT: lw t5, 4(t6) @@ -4711,7 +4711,7 @@ ; RV32ZVE32F-NEXT: andi s0, t0, 64 ; RV32ZVE32F-NEXT: beqz s0, .LBB49_7 ; RV32ZVE32F-NEXT: .LBB49_15: # %cond.load16 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV32ZVE32F-NEXT: vmv.x.s s1, v10 ; RV32ZVE32F-NEXT: lw s0, 4(s1) @@ -4719,7 +4719,7 @@ ; RV32ZVE32F-NEXT: andi t0, t0, -128 ; RV32ZVE32F-NEXT: beqz t0, .LBB49_8 ; RV32ZVE32F-NEXT: .LBB49_16: # %cond.load19 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV32ZVE32F-NEXT: vmv.x.s a2, v8 ; RV32ZVE32F-NEXT: lw t0, 4(a2) @@ -4748,7 +4748,7 @@ ; ; RV64ZVE32F-LABEL: mgather_baseidx_sext_v8i8_v8i64: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a6, v0 ; RV64ZVE32F-NEXT: andi a3, a6, 1 ; RV64ZVE32F-NEXT: beqz a3, .LBB49_3 @@ -4767,14 +4767,14 @@ ; RV64ZVE32F-NEXT: andi a4, a6, 2 ; RV64ZVE32F-NEXT: beqz a4, .LBB49_2 ; RV64ZVE32F-NEXT: .LBB49_4: # %cond.load1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a4, v9 ; RV64ZVE32F-NEXT: slli a4, a4, 3 ; RV64ZVE32F-NEXT: add a4, a1, a4 ; RV64ZVE32F-NEXT: ld a4, 0(a4) ; RV64ZVE32F-NEXT: .LBB49_5: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a5, a6, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV64ZVE32F-NEXT: beqz a5, .LBB49_7 @@ -4787,12 +4787,12 @@ ; RV64ZVE32F-NEXT: .LBB49_7: ; RV64ZVE32F-NEXT: ld a5, 16(a2) ; RV64ZVE32F-NEXT: .LBB49_8: # %else5 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a7, a6, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 4 ; RV64ZVE32F-NEXT: beqz a7, .LBB49_12 ; RV64ZVE32F-NEXT: # %bb.9: # %cond.load7 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a7, v9 ; RV64ZVE32F-NEXT: slli a7, a7, 3 @@ -4812,7 +4812,7 @@ ; RV64ZVE32F-NEXT: andi t0, a6, 16 ; RV64ZVE32F-NEXT: beqz t0, .LBB49_10 ; RV64ZVE32F-NEXT: .LBB49_13: # %cond.load10 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s t0, v8 ; RV64ZVE32F-NEXT: slli t0, t0, 3 ; RV64ZVE32F-NEXT: add t0, a1, t0 @@ -4820,14 +4820,14 @@ ; RV64ZVE32F-NEXT: andi t1, a6, 32 ; RV64ZVE32F-NEXT: beqz t1, .LBB49_11 ; RV64ZVE32F-NEXT: .LBB49_14: # %cond.load13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s t1, v9 ; RV64ZVE32F-NEXT: slli t1, t1, 3 ; RV64ZVE32F-NEXT: add t1, a1, t1 ; RV64ZVE32F-NEXT: ld t1, 0(t1) ; RV64ZVE32F-NEXT: .LBB49_15: # %else14 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi t2, a6, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: beqz t2, .LBB49_18 @@ -4846,7 +4846,7 @@ ; RV64ZVE32F-NEXT: andi a6, a6, -128 ; RV64ZVE32F-NEXT: beqz a6, .LBB49_17 ; RV64ZVE32F-NEXT: .LBB49_19: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 3 @@ -4871,7 +4871,7 @@ define <8 x i64> @mgather_baseidx_zext_v8i8_v8i64(i64* %base, <8 x i8> %idxs, <8 x i1> %m, <8 x i64> %passthru) { ; RV32V-LABEL: mgather_baseidx_zext_v8i8_v8i64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32V-NEXT: vzext.vf4 v10, v8 ; RV32V-NEXT: vsll.vi v8, v10, 3 ; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu @@ -4896,16 +4896,16 @@ ; RV32ZVE32F-NEXT: sw s1, 8(sp) # 4-byte Folded Spill ; RV32ZVE32F-NEXT: .cfi_offset s0, -4 ; RV32ZVE32F-NEXT: .cfi_offset s1, -8 -; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vzext.vf4 v10, v8 ; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3 ; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1 -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s t0, v0 ; RV32ZVE32F-NEXT: andi a1, t0, 1 ; RV32ZVE32F-NEXT: beqz a1, .LBB50_9 ; RV32ZVE32F-NEXT: # %bb.1: # %cond.load -; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a3, v8 ; RV32ZVE32F-NEXT: lw a1, 4(a3) ; RV32ZVE32F-NEXT: lw a3, 0(a3) @@ -4951,7 +4951,7 @@ ; RV32ZVE32F-NEXT: andi a4, t0, 2 ; RV32ZVE32F-NEXT: beqz a4, .LBB50_2 ; RV32ZVE32F-NEXT: .LBB50_10: # %cond.load1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a5, v10 ; RV32ZVE32F-NEXT: lw a4, 4(a5) @@ -4959,7 +4959,7 @@ ; RV32ZVE32F-NEXT: andi a6, t0, 4 ; RV32ZVE32F-NEXT: beqz a6, .LBB50_3 ; RV32ZVE32F-NEXT: .LBB50_11: # %cond.load4 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s a7, v10 ; RV32ZVE32F-NEXT: lw a6, 4(a7) @@ -4967,7 +4967,7 @@ ; RV32ZVE32F-NEXT: andi t1, t0, 8 ; RV32ZVE32F-NEXT: beqz t1, .LBB50_4 ; RV32ZVE32F-NEXT: .LBB50_12: # %cond.load7 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s t2, v10 ; RV32ZVE32F-NEXT: lw t1, 4(t2) @@ -4975,7 +4975,7 @@ ; RV32ZVE32F-NEXT: andi t3, t0, 16 ; RV32ZVE32F-NEXT: beqz t3, .LBB50_5 ; RV32ZVE32F-NEXT: .LBB50_13: # %cond.load10 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV32ZVE32F-NEXT: vmv.x.s t4, v10 ; RV32ZVE32F-NEXT: lw t3, 4(t4) @@ -4983,7 +4983,7 @@ ; RV32ZVE32F-NEXT: andi t5, t0, 32 ; RV32ZVE32F-NEXT: beqz t5, .LBB50_6 ; RV32ZVE32F-NEXT: .LBB50_14: # %cond.load13 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV32ZVE32F-NEXT: vmv.x.s t6, v10 ; RV32ZVE32F-NEXT: lw t5, 4(t6) @@ -4991,7 +4991,7 @@ ; RV32ZVE32F-NEXT: andi s0, t0, 64 ; RV32ZVE32F-NEXT: beqz s0, .LBB50_7 ; RV32ZVE32F-NEXT: .LBB50_15: # %cond.load16 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV32ZVE32F-NEXT: vmv.x.s s1, v10 ; RV32ZVE32F-NEXT: lw s0, 4(s1) @@ -4999,7 +4999,7 @@ ; RV32ZVE32F-NEXT: andi t0, t0, -128 ; RV32ZVE32F-NEXT: beqz t0, .LBB50_8 ; RV32ZVE32F-NEXT: .LBB50_16: # %cond.load19 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV32ZVE32F-NEXT: vmv.x.s a2, v8 ; RV32ZVE32F-NEXT: lw t0, 4(a2) @@ -5028,7 +5028,7 @@ ; ; RV64ZVE32F-LABEL: mgather_baseidx_zext_v8i8_v8i64: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a5, v0 ; RV64ZVE32F-NEXT: andi a3, a5, 1 ; RV64ZVE32F-NEXT: beqz a3, .LBB50_3 @@ -5048,7 +5048,7 @@ ; RV64ZVE32F-NEXT: andi a4, a5, 2 ; RV64ZVE32F-NEXT: beqz a4, .LBB50_2 ; RV64ZVE32F-NEXT: .LBB50_4: # %cond.load1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a4, v9 ; RV64ZVE32F-NEXT: andi a4, a4, 255 @@ -5056,7 +5056,7 @@ ; RV64ZVE32F-NEXT: add a4, a1, a4 ; RV64ZVE32F-NEXT: ld a4, 0(a4) ; RV64ZVE32F-NEXT: .LBB50_5: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a6, a5, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV64ZVE32F-NEXT: beqz a6, .LBB50_7 @@ -5070,12 +5070,12 @@ ; RV64ZVE32F-NEXT: .LBB50_7: ; RV64ZVE32F-NEXT: ld a6, 16(a2) ; RV64ZVE32F-NEXT: .LBB50_8: # %else5 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a7, a5, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 4 ; RV64ZVE32F-NEXT: beqz a7, .LBB50_12 ; RV64ZVE32F-NEXT: # %bb.9: # %cond.load7 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a7, v9 ; RV64ZVE32F-NEXT: andi a7, a7, 255 @@ -5096,7 +5096,7 @@ ; RV64ZVE32F-NEXT: andi t0, a5, 16 ; RV64ZVE32F-NEXT: beqz t0, .LBB50_10 ; RV64ZVE32F-NEXT: .LBB50_13: # %cond.load10 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s t0, v8 ; RV64ZVE32F-NEXT: andi t0, t0, 255 ; RV64ZVE32F-NEXT: slli t0, t0, 3 @@ -5105,7 +5105,7 @@ ; RV64ZVE32F-NEXT: andi t1, a5, 32 ; RV64ZVE32F-NEXT: beqz t1, .LBB50_11 ; RV64ZVE32F-NEXT: .LBB50_14: # %cond.load13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s t1, v9 ; RV64ZVE32F-NEXT: andi t1, t1, 255 @@ -5113,7 +5113,7 @@ ; RV64ZVE32F-NEXT: add t1, a1, t1 ; RV64ZVE32F-NEXT: ld t1, 0(t1) ; RV64ZVE32F-NEXT: .LBB50_15: # %else14 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi t2, a5, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: beqz t2, .LBB50_18 @@ -5133,7 +5133,7 @@ ; RV64ZVE32F-NEXT: andi a5, a5, -128 ; RV64ZVE32F-NEXT: beqz a5, .LBB50_17 ; RV64ZVE32F-NEXT: .LBB50_19: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: andi a2, a2, 255 @@ -5159,7 +5159,7 @@ define <8 x i64> @mgather_baseidx_v8i16_v8i64(i64* %base, <8 x i16> %idxs, <8 x i1> %m, <8 x i64> %passthru) { ; RV32V-LABEL: mgather_baseidx_v8i16_v8i64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32V-NEXT: vsext.vf2 v10, v8 ; RV32V-NEXT: vsll.vi v8, v10, 3 ; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu @@ -5184,16 +5184,16 @@ ; RV32ZVE32F-NEXT: sw s1, 8(sp) # 4-byte Folded Spill ; RV32ZVE32F-NEXT: .cfi_offset s0, -4 ; RV32ZVE32F-NEXT: .cfi_offset s1, -8 -; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vsext.vf2 v10, v8 ; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3 ; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1 -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s t0, v0 ; RV32ZVE32F-NEXT: andi a1, t0, 1 ; RV32ZVE32F-NEXT: beqz a1, .LBB51_9 ; RV32ZVE32F-NEXT: # %bb.1: # %cond.load -; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a3, v8 ; RV32ZVE32F-NEXT: lw a1, 4(a3) ; RV32ZVE32F-NEXT: lw a3, 0(a3) @@ -5239,7 +5239,7 @@ ; RV32ZVE32F-NEXT: andi a4, t0, 2 ; RV32ZVE32F-NEXT: beqz a4, .LBB51_2 ; RV32ZVE32F-NEXT: .LBB51_10: # %cond.load1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a5, v10 ; RV32ZVE32F-NEXT: lw a4, 4(a5) @@ -5247,7 +5247,7 @@ ; RV32ZVE32F-NEXT: andi a6, t0, 4 ; RV32ZVE32F-NEXT: beqz a6, .LBB51_3 ; RV32ZVE32F-NEXT: .LBB51_11: # %cond.load4 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s a7, v10 ; RV32ZVE32F-NEXT: lw a6, 4(a7) @@ -5255,7 +5255,7 @@ ; RV32ZVE32F-NEXT: andi t1, t0, 8 ; RV32ZVE32F-NEXT: beqz t1, .LBB51_4 ; RV32ZVE32F-NEXT: .LBB51_12: # %cond.load7 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s t2, v10 ; RV32ZVE32F-NEXT: lw t1, 4(t2) @@ -5263,7 +5263,7 @@ ; RV32ZVE32F-NEXT: andi t3, t0, 16 ; RV32ZVE32F-NEXT: beqz t3, .LBB51_5 ; RV32ZVE32F-NEXT: .LBB51_13: # %cond.load10 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV32ZVE32F-NEXT: vmv.x.s t4, v10 ; RV32ZVE32F-NEXT: lw t3, 4(t4) @@ -5271,7 +5271,7 @@ ; RV32ZVE32F-NEXT: andi t5, t0, 32 ; RV32ZVE32F-NEXT: beqz t5, .LBB51_6 ; RV32ZVE32F-NEXT: .LBB51_14: # %cond.load13 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV32ZVE32F-NEXT: vmv.x.s t6, v10 ; RV32ZVE32F-NEXT: lw t5, 4(t6) @@ -5279,7 +5279,7 @@ ; RV32ZVE32F-NEXT: andi s0, t0, 64 ; RV32ZVE32F-NEXT: beqz s0, .LBB51_7 ; RV32ZVE32F-NEXT: .LBB51_15: # %cond.load16 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV32ZVE32F-NEXT: vmv.x.s s1, v10 ; RV32ZVE32F-NEXT: lw s0, 4(s1) @@ -5287,7 +5287,7 @@ ; RV32ZVE32F-NEXT: andi t0, t0, -128 ; RV32ZVE32F-NEXT: beqz t0, .LBB51_8 ; RV32ZVE32F-NEXT: .LBB51_16: # %cond.load19 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV32ZVE32F-NEXT: vmv.x.s a2, v8 ; RV32ZVE32F-NEXT: lw t0, 4(a2) @@ -5316,12 +5316,12 @@ ; ; RV64ZVE32F-LABEL: mgather_baseidx_v8i16_v8i64: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a6, v0 ; RV64ZVE32F-NEXT: andi a3, a6, 1 ; RV64ZVE32F-NEXT: beqz a3, .LBB51_3 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a3, v8 ; RV64ZVE32F-NEXT: slli a3, a3, 3 ; RV64ZVE32F-NEXT: add a3, a1, a3 @@ -5336,14 +5336,14 @@ ; RV64ZVE32F-NEXT: andi a4, a6, 2 ; RV64ZVE32F-NEXT: beqz a4, .LBB51_2 ; RV64ZVE32F-NEXT: .LBB51_4: # %cond.load1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a4, v9 ; RV64ZVE32F-NEXT: slli a4, a4, 3 ; RV64ZVE32F-NEXT: add a4, a1, a4 ; RV64ZVE32F-NEXT: ld a4, 0(a4) ; RV64ZVE32F-NEXT: .LBB51_5: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a5, a6, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV64ZVE32F-NEXT: beqz a5, .LBB51_7 @@ -5356,12 +5356,12 @@ ; RV64ZVE32F-NEXT: .LBB51_7: ; RV64ZVE32F-NEXT: ld a5, 16(a2) ; RV64ZVE32F-NEXT: .LBB51_8: # %else5 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma ; RV64ZVE32F-NEXT: andi a7, a6, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 4 ; RV64ZVE32F-NEXT: beqz a7, .LBB51_12 ; RV64ZVE32F-NEXT: # %bb.9: # %cond.load7 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a7, v9 ; RV64ZVE32F-NEXT: slli a7, a7, 3 @@ -5381,7 +5381,7 @@ ; RV64ZVE32F-NEXT: andi t0, a6, 16 ; RV64ZVE32F-NEXT: beqz t0, .LBB51_10 ; RV64ZVE32F-NEXT: .LBB51_13: # %cond.load10 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s t0, v8 ; RV64ZVE32F-NEXT: slli t0, t0, 3 ; RV64ZVE32F-NEXT: add t0, a1, t0 @@ -5389,14 +5389,14 @@ ; RV64ZVE32F-NEXT: andi t1, a6, 32 ; RV64ZVE32F-NEXT: beqz t1, .LBB51_11 ; RV64ZVE32F-NEXT: .LBB51_14: # %cond.load13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s t1, v9 ; RV64ZVE32F-NEXT: slli t1, t1, 3 ; RV64ZVE32F-NEXT: add t1, a1, t1 ; RV64ZVE32F-NEXT: ld t1, 0(t1) ; RV64ZVE32F-NEXT: .LBB51_15: # %else14 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi t2, a6, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: beqz t2, .LBB51_18 @@ -5415,7 +5415,7 @@ ; RV64ZVE32F-NEXT: andi a6, a6, -128 ; RV64ZVE32F-NEXT: beqz a6, .LBB51_17 ; RV64ZVE32F-NEXT: .LBB51_19: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 3 @@ -5439,7 +5439,7 @@ define <8 x i64> @mgather_baseidx_sext_v8i16_v8i64(i64* %base, <8 x i16> %idxs, <8 x i1> %m, <8 x i64> %passthru) { ; RV32V-LABEL: mgather_baseidx_sext_v8i16_v8i64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32V-NEXT: vsext.vf2 v10, v8 ; RV32V-NEXT: vsll.vi v8, v10, 3 ; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu @@ -5464,16 +5464,16 @@ ; RV32ZVE32F-NEXT: sw s1, 8(sp) # 4-byte Folded Spill ; RV32ZVE32F-NEXT: .cfi_offset s0, -4 ; RV32ZVE32F-NEXT: .cfi_offset s1, -8 -; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vsext.vf2 v10, v8 ; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3 ; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1 -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s t0, v0 ; RV32ZVE32F-NEXT: andi a1, t0, 1 ; RV32ZVE32F-NEXT: beqz a1, .LBB52_9 ; RV32ZVE32F-NEXT: # %bb.1: # %cond.load -; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a3, v8 ; RV32ZVE32F-NEXT: lw a1, 4(a3) ; RV32ZVE32F-NEXT: lw a3, 0(a3) @@ -5519,7 +5519,7 @@ ; RV32ZVE32F-NEXT: andi a4, t0, 2 ; RV32ZVE32F-NEXT: beqz a4, .LBB52_2 ; RV32ZVE32F-NEXT: .LBB52_10: # %cond.load1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a5, v10 ; RV32ZVE32F-NEXT: lw a4, 4(a5) @@ -5527,7 +5527,7 @@ ; RV32ZVE32F-NEXT: andi a6, t0, 4 ; RV32ZVE32F-NEXT: beqz a6, .LBB52_3 ; RV32ZVE32F-NEXT: .LBB52_11: # %cond.load4 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s a7, v10 ; RV32ZVE32F-NEXT: lw a6, 4(a7) @@ -5535,7 +5535,7 @@ ; RV32ZVE32F-NEXT: andi t1, t0, 8 ; RV32ZVE32F-NEXT: beqz t1, .LBB52_4 ; RV32ZVE32F-NEXT: .LBB52_12: # %cond.load7 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s t2, v10 ; RV32ZVE32F-NEXT: lw t1, 4(t2) @@ -5543,7 +5543,7 @@ ; RV32ZVE32F-NEXT: andi t3, t0, 16 ; RV32ZVE32F-NEXT: beqz t3, .LBB52_5 ; RV32ZVE32F-NEXT: .LBB52_13: # %cond.load10 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV32ZVE32F-NEXT: vmv.x.s t4, v10 ; RV32ZVE32F-NEXT: lw t3, 4(t4) @@ -5551,7 +5551,7 @@ ; RV32ZVE32F-NEXT: andi t5, t0, 32 ; RV32ZVE32F-NEXT: beqz t5, .LBB52_6 ; RV32ZVE32F-NEXT: .LBB52_14: # %cond.load13 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV32ZVE32F-NEXT: vmv.x.s t6, v10 ; RV32ZVE32F-NEXT: lw t5, 4(t6) @@ -5559,7 +5559,7 @@ ; RV32ZVE32F-NEXT: andi s0, t0, 64 ; RV32ZVE32F-NEXT: beqz s0, .LBB52_7 ; RV32ZVE32F-NEXT: .LBB52_15: # %cond.load16 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV32ZVE32F-NEXT: vmv.x.s s1, v10 ; RV32ZVE32F-NEXT: lw s0, 4(s1) @@ -5567,7 +5567,7 @@ ; RV32ZVE32F-NEXT: andi t0, t0, -128 ; RV32ZVE32F-NEXT: beqz t0, .LBB52_8 ; RV32ZVE32F-NEXT: .LBB52_16: # %cond.load19 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV32ZVE32F-NEXT: vmv.x.s a2, v8 ; RV32ZVE32F-NEXT: lw t0, 4(a2) @@ -5596,12 +5596,12 @@ ; ; RV64ZVE32F-LABEL: mgather_baseidx_sext_v8i16_v8i64: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a6, v0 ; RV64ZVE32F-NEXT: andi a3, a6, 1 ; RV64ZVE32F-NEXT: beqz a3, .LBB52_3 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a3, v8 ; RV64ZVE32F-NEXT: slli a3, a3, 3 ; RV64ZVE32F-NEXT: add a3, a1, a3 @@ -5616,14 +5616,14 @@ ; RV64ZVE32F-NEXT: andi a4, a6, 2 ; RV64ZVE32F-NEXT: beqz a4, .LBB52_2 ; RV64ZVE32F-NEXT: .LBB52_4: # %cond.load1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a4, v9 ; RV64ZVE32F-NEXT: slli a4, a4, 3 ; RV64ZVE32F-NEXT: add a4, a1, a4 ; RV64ZVE32F-NEXT: ld a4, 0(a4) ; RV64ZVE32F-NEXT: .LBB52_5: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a5, a6, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV64ZVE32F-NEXT: beqz a5, .LBB52_7 @@ -5636,12 +5636,12 @@ ; RV64ZVE32F-NEXT: .LBB52_7: ; RV64ZVE32F-NEXT: ld a5, 16(a2) ; RV64ZVE32F-NEXT: .LBB52_8: # %else5 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma ; RV64ZVE32F-NEXT: andi a7, a6, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 4 ; RV64ZVE32F-NEXT: beqz a7, .LBB52_12 ; RV64ZVE32F-NEXT: # %bb.9: # %cond.load7 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a7, v9 ; RV64ZVE32F-NEXT: slli a7, a7, 3 @@ -5661,7 +5661,7 @@ ; RV64ZVE32F-NEXT: andi t0, a6, 16 ; RV64ZVE32F-NEXT: beqz t0, .LBB52_10 ; RV64ZVE32F-NEXT: .LBB52_13: # %cond.load10 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s t0, v8 ; RV64ZVE32F-NEXT: slli t0, t0, 3 ; RV64ZVE32F-NEXT: add t0, a1, t0 @@ -5669,14 +5669,14 @@ ; RV64ZVE32F-NEXT: andi t1, a6, 32 ; RV64ZVE32F-NEXT: beqz t1, .LBB52_11 ; RV64ZVE32F-NEXT: .LBB52_14: # %cond.load13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s t1, v9 ; RV64ZVE32F-NEXT: slli t1, t1, 3 ; RV64ZVE32F-NEXT: add t1, a1, t1 ; RV64ZVE32F-NEXT: ld t1, 0(t1) ; RV64ZVE32F-NEXT: .LBB52_15: # %else14 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi t2, a6, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: beqz t2, .LBB52_18 @@ -5695,7 +5695,7 @@ ; RV64ZVE32F-NEXT: andi a6, a6, -128 ; RV64ZVE32F-NEXT: beqz a6, .LBB52_17 ; RV64ZVE32F-NEXT: .LBB52_19: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 3 @@ -5720,7 +5720,7 @@ define <8 x i64> @mgather_baseidx_zext_v8i16_v8i64(i64* %base, <8 x i16> %idxs, <8 x i1> %m, <8 x i64> %passthru) { ; RV32V-LABEL: mgather_baseidx_zext_v8i16_v8i64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32V-NEXT: vzext.vf2 v10, v8 ; RV32V-NEXT: vsll.vi v8, v10, 3 ; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu @@ -5745,16 +5745,16 @@ ; RV32ZVE32F-NEXT: sw s1, 8(sp) # 4-byte Folded Spill ; RV32ZVE32F-NEXT: .cfi_offset s0, -4 ; RV32ZVE32F-NEXT: .cfi_offset s1, -8 -; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vzext.vf2 v10, v8 ; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3 ; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1 -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s t0, v0 ; RV32ZVE32F-NEXT: andi a1, t0, 1 ; RV32ZVE32F-NEXT: beqz a1, .LBB53_9 ; RV32ZVE32F-NEXT: # %bb.1: # %cond.load -; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a3, v8 ; RV32ZVE32F-NEXT: lw a1, 4(a3) ; RV32ZVE32F-NEXT: lw a3, 0(a3) @@ -5800,7 +5800,7 @@ ; RV32ZVE32F-NEXT: andi a4, t0, 2 ; RV32ZVE32F-NEXT: beqz a4, .LBB53_2 ; RV32ZVE32F-NEXT: .LBB53_10: # %cond.load1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a5, v10 ; RV32ZVE32F-NEXT: lw a4, 4(a5) @@ -5808,7 +5808,7 @@ ; RV32ZVE32F-NEXT: andi a6, t0, 4 ; RV32ZVE32F-NEXT: beqz a6, .LBB53_3 ; RV32ZVE32F-NEXT: .LBB53_11: # %cond.load4 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s a7, v10 ; RV32ZVE32F-NEXT: lw a6, 4(a7) @@ -5816,7 +5816,7 @@ ; RV32ZVE32F-NEXT: andi t1, t0, 8 ; RV32ZVE32F-NEXT: beqz t1, .LBB53_4 ; RV32ZVE32F-NEXT: .LBB53_12: # %cond.load7 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s t2, v10 ; RV32ZVE32F-NEXT: lw t1, 4(t2) @@ -5824,7 +5824,7 @@ ; RV32ZVE32F-NEXT: andi t3, t0, 16 ; RV32ZVE32F-NEXT: beqz t3, .LBB53_5 ; RV32ZVE32F-NEXT: .LBB53_13: # %cond.load10 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV32ZVE32F-NEXT: vmv.x.s t4, v10 ; RV32ZVE32F-NEXT: lw t3, 4(t4) @@ -5832,7 +5832,7 @@ ; RV32ZVE32F-NEXT: andi t5, t0, 32 ; RV32ZVE32F-NEXT: beqz t5, .LBB53_6 ; RV32ZVE32F-NEXT: .LBB53_14: # %cond.load13 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV32ZVE32F-NEXT: vmv.x.s t6, v10 ; RV32ZVE32F-NEXT: lw t5, 4(t6) @@ -5840,7 +5840,7 @@ ; RV32ZVE32F-NEXT: andi s0, t0, 64 ; RV32ZVE32F-NEXT: beqz s0, .LBB53_7 ; RV32ZVE32F-NEXT: .LBB53_15: # %cond.load16 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV32ZVE32F-NEXT: vmv.x.s s1, v10 ; RV32ZVE32F-NEXT: lw s0, 4(s1) @@ -5848,7 +5848,7 @@ ; RV32ZVE32F-NEXT: andi t0, t0, -128 ; RV32ZVE32F-NEXT: beqz t0, .LBB53_8 ; RV32ZVE32F-NEXT: .LBB53_16: # %cond.load19 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV32ZVE32F-NEXT: vmv.x.s a2, v8 ; RV32ZVE32F-NEXT: lw t0, 4(a2) @@ -5878,13 +5878,13 @@ ; RV64ZVE32F-LABEL: mgather_baseidx_zext_v8i16_v8i64: ; RV64ZVE32F: # %bb.0: ; RV64ZVE32F-NEXT: lui a3, 16 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a6, v0 ; RV64ZVE32F-NEXT: andi a4, a6, 1 ; RV64ZVE32F-NEXT: addiw a5, a3, -1 ; RV64ZVE32F-NEXT: beqz a4, .LBB53_3 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a3, v8 ; RV64ZVE32F-NEXT: and a3, a3, a5 ; RV64ZVE32F-NEXT: slli a3, a3, 3 @@ -5900,7 +5900,7 @@ ; RV64ZVE32F-NEXT: andi a4, a6, 2 ; RV64ZVE32F-NEXT: beqz a4, .LBB53_2 ; RV64ZVE32F-NEXT: .LBB53_4: # %cond.load1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a4, v9 ; RV64ZVE32F-NEXT: and a4, a4, a5 @@ -5908,7 +5908,7 @@ ; RV64ZVE32F-NEXT: add a4, a1, a4 ; RV64ZVE32F-NEXT: ld a4, 0(a4) ; RV64ZVE32F-NEXT: .LBB53_5: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a7, a6, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV64ZVE32F-NEXT: beqz a7, .LBB53_7 @@ -5922,12 +5922,12 @@ ; RV64ZVE32F-NEXT: .LBB53_7: ; RV64ZVE32F-NEXT: ld a7, 16(a2) ; RV64ZVE32F-NEXT: .LBB53_8: # %else5 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma ; RV64ZVE32F-NEXT: andi t0, a6, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 4 ; RV64ZVE32F-NEXT: beqz t0, .LBB53_12 ; RV64ZVE32F-NEXT: # %bb.9: # %cond.load7 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s t0, v9 ; RV64ZVE32F-NEXT: and t0, t0, a5 @@ -5948,7 +5948,7 @@ ; RV64ZVE32F-NEXT: andi t1, a6, 16 ; RV64ZVE32F-NEXT: beqz t1, .LBB53_10 ; RV64ZVE32F-NEXT: .LBB53_13: # %cond.load10 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s t1, v8 ; RV64ZVE32F-NEXT: and t1, t1, a5 ; RV64ZVE32F-NEXT: slli t1, t1, 3 @@ -5957,7 +5957,7 @@ ; RV64ZVE32F-NEXT: andi t2, a6, 32 ; RV64ZVE32F-NEXT: beqz t2, .LBB53_11 ; RV64ZVE32F-NEXT: .LBB53_14: # %cond.load13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s t2, v9 ; RV64ZVE32F-NEXT: and t2, t2, a5 @@ -5965,7 +5965,7 @@ ; RV64ZVE32F-NEXT: add t2, a1, t2 ; RV64ZVE32F-NEXT: ld t2, 0(t2) ; RV64ZVE32F-NEXT: .LBB53_15: # %else14 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi t3, a6, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: beqz t3, .LBB53_18 @@ -5985,7 +5985,7 @@ ; RV64ZVE32F-NEXT: andi a6, a6, -128 ; RV64ZVE32F-NEXT: beqz a6, .LBB53_17 ; RV64ZVE32F-NEXT: .LBB53_19: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: and a2, a2, a5 @@ -6011,7 +6011,7 @@ define <8 x i64> @mgather_baseidx_v8i32_v8i64(i64* %base, <8 x i32> %idxs, <8 x i1> %m, <8 x i64> %passthru) { ; RV32V-LABEL: mgather_baseidx_v8i32_v8i64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32V-NEXT: vsll.vi v8, v8, 3 ; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32V-NEXT: vluxei32.v v12, (a0), v8, v0.t @@ -6035,15 +6035,15 @@ ; RV32ZVE32F-NEXT: sw s1, 8(sp) # 4-byte Folded Spill ; RV32ZVE32F-NEXT: .cfi_offset s0, -4 ; RV32ZVE32F-NEXT: .cfi_offset s1, -8 -; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3 ; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1 -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s t0, v0 ; RV32ZVE32F-NEXT: andi a1, t0, 1 ; RV32ZVE32F-NEXT: beqz a1, .LBB54_9 ; RV32ZVE32F-NEXT: # %bb.1: # %cond.load -; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a3, v8 ; RV32ZVE32F-NEXT: lw a1, 4(a3) ; RV32ZVE32F-NEXT: lw a3, 0(a3) @@ -6089,7 +6089,7 @@ ; RV32ZVE32F-NEXT: andi a4, t0, 2 ; RV32ZVE32F-NEXT: beqz a4, .LBB54_2 ; RV32ZVE32F-NEXT: .LBB54_10: # %cond.load1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a5, v10 ; RV32ZVE32F-NEXT: lw a4, 4(a5) @@ -6097,7 +6097,7 @@ ; RV32ZVE32F-NEXT: andi a6, t0, 4 ; RV32ZVE32F-NEXT: beqz a6, .LBB54_3 ; RV32ZVE32F-NEXT: .LBB54_11: # %cond.load4 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s a7, v10 ; RV32ZVE32F-NEXT: lw a6, 4(a7) @@ -6105,7 +6105,7 @@ ; RV32ZVE32F-NEXT: andi t1, t0, 8 ; RV32ZVE32F-NEXT: beqz t1, .LBB54_4 ; RV32ZVE32F-NEXT: .LBB54_12: # %cond.load7 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s t2, v10 ; RV32ZVE32F-NEXT: lw t1, 4(t2) @@ -6113,7 +6113,7 @@ ; RV32ZVE32F-NEXT: andi t3, t0, 16 ; RV32ZVE32F-NEXT: beqz t3, .LBB54_5 ; RV32ZVE32F-NEXT: .LBB54_13: # %cond.load10 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV32ZVE32F-NEXT: vmv.x.s t4, v10 ; RV32ZVE32F-NEXT: lw t3, 4(t4) @@ -6121,7 +6121,7 @@ ; RV32ZVE32F-NEXT: andi t5, t0, 32 ; RV32ZVE32F-NEXT: beqz t5, .LBB54_6 ; RV32ZVE32F-NEXT: .LBB54_14: # %cond.load13 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV32ZVE32F-NEXT: vmv.x.s t6, v10 ; RV32ZVE32F-NEXT: lw t5, 4(t6) @@ -6129,7 +6129,7 @@ ; RV32ZVE32F-NEXT: andi s0, t0, 64 ; RV32ZVE32F-NEXT: beqz s0, .LBB54_7 ; RV32ZVE32F-NEXT: .LBB54_15: # %cond.load16 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV32ZVE32F-NEXT: vmv.x.s s1, v10 ; RV32ZVE32F-NEXT: lw s0, 4(s1) @@ -6137,7 +6137,7 @@ ; RV32ZVE32F-NEXT: andi t0, t0, -128 ; RV32ZVE32F-NEXT: beqz t0, .LBB54_8 ; RV32ZVE32F-NEXT: .LBB54_16: # %cond.load19 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV32ZVE32F-NEXT: vmv.x.s a2, v8 ; RV32ZVE32F-NEXT: lw t0, 4(a2) @@ -6166,12 +6166,12 @@ ; ; RV64ZVE32F-LABEL: mgather_baseidx_v8i32_v8i64: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a5, v0 ; RV64ZVE32F-NEXT: andi a3, a5, 1 ; RV64ZVE32F-NEXT: beqz a3, .LBB54_3 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a3, v8 ; RV64ZVE32F-NEXT: slli a3, a3, 3 ; RV64ZVE32F-NEXT: add a3, a1, a3 @@ -6186,16 +6186,16 @@ ; RV64ZVE32F-NEXT: andi a4, a5, 2 ; RV64ZVE32F-NEXT: beqz a4, .LBB54_2 ; RV64ZVE32F-NEXT: .LBB54_4: # %cond.load1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a4, v10 ; RV64ZVE32F-NEXT: slli a4, a4, 3 ; RV64ZVE32F-NEXT: add a4, a1, a4 ; RV64ZVE32F-NEXT: ld a4, 0(a4) ; RV64ZVE32F-NEXT: .LBB54_5: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV64ZVE32F-NEXT: andi a6, a5, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: beqz a6, .LBB54_10 @@ -6222,7 +6222,7 @@ ; RV64ZVE32F-NEXT: andi a7, a5, 8 ; RV64ZVE32F-NEXT: beqz a7, .LBB54_7 ; RV64ZVE32F-NEXT: .LBB54_11: # %cond.load7 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a7, v8 ; RV64ZVE32F-NEXT: slli a7, a7, 3 @@ -6231,7 +6231,7 @@ ; RV64ZVE32F-NEXT: andi t0, a5, 16 ; RV64ZVE32F-NEXT: beqz t0, .LBB54_8 ; RV64ZVE32F-NEXT: .LBB54_12: # %cond.load10 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s t0, v10 ; RV64ZVE32F-NEXT: slli t0, t0, 3 ; RV64ZVE32F-NEXT: add t0, a1, t0 @@ -6239,14 +6239,14 @@ ; RV64ZVE32F-NEXT: andi t1, a5, 32 ; RV64ZVE32F-NEXT: beqz t1, .LBB54_9 ; RV64ZVE32F-NEXT: .LBB54_13: # %cond.load13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s t1, v8 ; RV64ZVE32F-NEXT: slli t1, t1, 3 ; RV64ZVE32F-NEXT: add t1, a1, t1 ; RV64ZVE32F-NEXT: ld t1, 0(t1) ; RV64ZVE32F-NEXT: .LBB54_14: # %else14 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV64ZVE32F-NEXT: andi t2, a5, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 2 ; RV64ZVE32F-NEXT: beqz t2, .LBB54_17 @@ -6265,7 +6265,7 @@ ; RV64ZVE32F-NEXT: andi a5, a5, -128 ; RV64ZVE32F-NEXT: beqz a5, .LBB54_16 ; RV64ZVE32F-NEXT: .LBB54_18: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 3 @@ -6289,7 +6289,7 @@ define <8 x i64> @mgather_baseidx_sext_v8i32_v8i64(i64* %base, <8 x i32> %idxs, <8 x i1> %m, <8 x i64> %passthru) { ; RV32V-LABEL: mgather_baseidx_sext_v8i32_v8i64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32V-NEXT: vsll.vi v8, v8, 3 ; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32V-NEXT: vluxei32.v v12, (a0), v8, v0.t @@ -6313,15 +6313,15 @@ ; RV32ZVE32F-NEXT: sw s1, 8(sp) # 4-byte Folded Spill ; RV32ZVE32F-NEXT: .cfi_offset s0, -4 ; RV32ZVE32F-NEXT: .cfi_offset s1, -8 -; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3 ; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1 -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s t0, v0 ; RV32ZVE32F-NEXT: andi a1, t0, 1 ; RV32ZVE32F-NEXT: beqz a1, .LBB55_9 ; RV32ZVE32F-NEXT: # %bb.1: # %cond.load -; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a3, v8 ; RV32ZVE32F-NEXT: lw a1, 4(a3) ; RV32ZVE32F-NEXT: lw a3, 0(a3) @@ -6367,7 +6367,7 @@ ; RV32ZVE32F-NEXT: andi a4, t0, 2 ; RV32ZVE32F-NEXT: beqz a4, .LBB55_2 ; RV32ZVE32F-NEXT: .LBB55_10: # %cond.load1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a5, v10 ; RV32ZVE32F-NEXT: lw a4, 4(a5) @@ -6375,7 +6375,7 @@ ; RV32ZVE32F-NEXT: andi a6, t0, 4 ; RV32ZVE32F-NEXT: beqz a6, .LBB55_3 ; RV32ZVE32F-NEXT: .LBB55_11: # %cond.load4 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s a7, v10 ; RV32ZVE32F-NEXT: lw a6, 4(a7) @@ -6383,7 +6383,7 @@ ; RV32ZVE32F-NEXT: andi t1, t0, 8 ; RV32ZVE32F-NEXT: beqz t1, .LBB55_4 ; RV32ZVE32F-NEXT: .LBB55_12: # %cond.load7 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s t2, v10 ; RV32ZVE32F-NEXT: lw t1, 4(t2) @@ -6391,7 +6391,7 @@ ; RV32ZVE32F-NEXT: andi t3, t0, 16 ; RV32ZVE32F-NEXT: beqz t3, .LBB55_5 ; RV32ZVE32F-NEXT: .LBB55_13: # %cond.load10 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV32ZVE32F-NEXT: vmv.x.s t4, v10 ; RV32ZVE32F-NEXT: lw t3, 4(t4) @@ -6399,7 +6399,7 @@ ; RV32ZVE32F-NEXT: andi t5, t0, 32 ; RV32ZVE32F-NEXT: beqz t5, .LBB55_6 ; RV32ZVE32F-NEXT: .LBB55_14: # %cond.load13 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV32ZVE32F-NEXT: vmv.x.s t6, v10 ; RV32ZVE32F-NEXT: lw t5, 4(t6) @@ -6407,7 +6407,7 @@ ; RV32ZVE32F-NEXT: andi s0, t0, 64 ; RV32ZVE32F-NEXT: beqz s0, .LBB55_7 ; RV32ZVE32F-NEXT: .LBB55_15: # %cond.load16 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV32ZVE32F-NEXT: vmv.x.s s1, v10 ; RV32ZVE32F-NEXT: lw s0, 4(s1) @@ -6415,7 +6415,7 @@ ; RV32ZVE32F-NEXT: andi t0, t0, -128 ; RV32ZVE32F-NEXT: beqz t0, .LBB55_8 ; RV32ZVE32F-NEXT: .LBB55_16: # %cond.load19 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV32ZVE32F-NEXT: vmv.x.s a2, v8 ; RV32ZVE32F-NEXT: lw t0, 4(a2) @@ -6444,12 +6444,12 @@ ; ; RV64ZVE32F-LABEL: mgather_baseidx_sext_v8i32_v8i64: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a5, v0 ; RV64ZVE32F-NEXT: andi a3, a5, 1 ; RV64ZVE32F-NEXT: beqz a3, .LBB55_3 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a3, v8 ; RV64ZVE32F-NEXT: slli a3, a3, 3 ; RV64ZVE32F-NEXT: add a3, a1, a3 @@ -6464,16 +6464,16 @@ ; RV64ZVE32F-NEXT: andi a4, a5, 2 ; RV64ZVE32F-NEXT: beqz a4, .LBB55_2 ; RV64ZVE32F-NEXT: .LBB55_4: # %cond.load1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a4, v10 ; RV64ZVE32F-NEXT: slli a4, a4, 3 ; RV64ZVE32F-NEXT: add a4, a1, a4 ; RV64ZVE32F-NEXT: ld a4, 0(a4) ; RV64ZVE32F-NEXT: .LBB55_5: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV64ZVE32F-NEXT: andi a6, a5, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: beqz a6, .LBB55_10 @@ -6500,7 +6500,7 @@ ; RV64ZVE32F-NEXT: andi a7, a5, 8 ; RV64ZVE32F-NEXT: beqz a7, .LBB55_7 ; RV64ZVE32F-NEXT: .LBB55_11: # %cond.load7 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a7, v8 ; RV64ZVE32F-NEXT: slli a7, a7, 3 @@ -6509,7 +6509,7 @@ ; RV64ZVE32F-NEXT: andi t0, a5, 16 ; RV64ZVE32F-NEXT: beqz t0, .LBB55_8 ; RV64ZVE32F-NEXT: .LBB55_12: # %cond.load10 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s t0, v10 ; RV64ZVE32F-NEXT: slli t0, t0, 3 ; RV64ZVE32F-NEXT: add t0, a1, t0 @@ -6517,14 +6517,14 @@ ; RV64ZVE32F-NEXT: andi t1, a5, 32 ; RV64ZVE32F-NEXT: beqz t1, .LBB55_9 ; RV64ZVE32F-NEXT: .LBB55_13: # %cond.load13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s t1, v8 ; RV64ZVE32F-NEXT: slli t1, t1, 3 ; RV64ZVE32F-NEXT: add t1, a1, t1 ; RV64ZVE32F-NEXT: ld t1, 0(t1) ; RV64ZVE32F-NEXT: .LBB55_14: # %else14 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV64ZVE32F-NEXT: andi t2, a5, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 2 ; RV64ZVE32F-NEXT: beqz t2, .LBB55_17 @@ -6543,7 +6543,7 @@ ; RV64ZVE32F-NEXT: andi a5, a5, -128 ; RV64ZVE32F-NEXT: beqz a5, .LBB55_16 ; RV64ZVE32F-NEXT: .LBB55_18: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 3 @@ -6568,7 +6568,7 @@ define <8 x i64> @mgather_baseidx_zext_v8i32_v8i64(i64* %base, <8 x i32> %idxs, <8 x i1> %m, <8 x i64> %passthru) { ; RV32V-LABEL: mgather_baseidx_zext_v8i32_v8i64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32V-NEXT: vsll.vi v8, v8, 3 ; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32V-NEXT: vluxei32.v v12, (a0), v8, v0.t @@ -6592,15 +6592,15 @@ ; RV32ZVE32F-NEXT: sw s1, 8(sp) # 4-byte Folded Spill ; RV32ZVE32F-NEXT: .cfi_offset s0, -4 ; RV32ZVE32F-NEXT: .cfi_offset s1, -8 -; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3 ; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1 -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s t0, v0 ; RV32ZVE32F-NEXT: andi a1, t0, 1 ; RV32ZVE32F-NEXT: beqz a1, .LBB56_9 ; RV32ZVE32F-NEXT: # %bb.1: # %cond.load -; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a3, v8 ; RV32ZVE32F-NEXT: lw a1, 4(a3) ; RV32ZVE32F-NEXT: lw a3, 0(a3) @@ -6646,7 +6646,7 @@ ; RV32ZVE32F-NEXT: andi a4, t0, 2 ; RV32ZVE32F-NEXT: beqz a4, .LBB56_2 ; RV32ZVE32F-NEXT: .LBB56_10: # %cond.load1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a5, v10 ; RV32ZVE32F-NEXT: lw a4, 4(a5) @@ -6654,7 +6654,7 @@ ; RV32ZVE32F-NEXT: andi a6, t0, 4 ; RV32ZVE32F-NEXT: beqz a6, .LBB56_3 ; RV32ZVE32F-NEXT: .LBB56_11: # %cond.load4 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s a7, v10 ; RV32ZVE32F-NEXT: lw a6, 4(a7) @@ -6662,7 +6662,7 @@ ; RV32ZVE32F-NEXT: andi t1, t0, 8 ; RV32ZVE32F-NEXT: beqz t1, .LBB56_4 ; RV32ZVE32F-NEXT: .LBB56_12: # %cond.load7 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s t2, v10 ; RV32ZVE32F-NEXT: lw t1, 4(t2) @@ -6670,7 +6670,7 @@ ; RV32ZVE32F-NEXT: andi t3, t0, 16 ; RV32ZVE32F-NEXT: beqz t3, .LBB56_5 ; RV32ZVE32F-NEXT: .LBB56_13: # %cond.load10 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV32ZVE32F-NEXT: vmv.x.s t4, v10 ; RV32ZVE32F-NEXT: lw t3, 4(t4) @@ -6678,7 +6678,7 @@ ; RV32ZVE32F-NEXT: andi t5, t0, 32 ; RV32ZVE32F-NEXT: beqz t5, .LBB56_6 ; RV32ZVE32F-NEXT: .LBB56_14: # %cond.load13 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV32ZVE32F-NEXT: vmv.x.s t6, v10 ; RV32ZVE32F-NEXT: lw t5, 4(t6) @@ -6686,7 +6686,7 @@ ; RV32ZVE32F-NEXT: andi s0, t0, 64 ; RV32ZVE32F-NEXT: beqz s0, .LBB56_7 ; RV32ZVE32F-NEXT: .LBB56_15: # %cond.load16 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV32ZVE32F-NEXT: vmv.x.s s1, v10 ; RV32ZVE32F-NEXT: lw s0, 4(s1) @@ -6694,7 +6694,7 @@ ; RV32ZVE32F-NEXT: andi t0, t0, -128 ; RV32ZVE32F-NEXT: beqz t0, .LBB56_8 ; RV32ZVE32F-NEXT: .LBB56_16: # %cond.load19 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV32ZVE32F-NEXT: vmv.x.s a2, v8 ; RV32ZVE32F-NEXT: lw t0, 4(a2) @@ -6723,12 +6723,12 @@ ; ; RV64ZVE32F-LABEL: mgather_baseidx_zext_v8i32_v8i64: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a5, v0 ; RV64ZVE32F-NEXT: andi a3, a5, 1 ; RV64ZVE32F-NEXT: beqz a3, .LBB56_3 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a3, v8 ; RV64ZVE32F-NEXT: slli a3, a3, 32 ; RV64ZVE32F-NEXT: srli a3, a3, 29 @@ -6744,7 +6744,7 @@ ; RV64ZVE32F-NEXT: andi a4, a5, 2 ; RV64ZVE32F-NEXT: beqz a4, .LBB56_2 ; RV64ZVE32F-NEXT: .LBB56_4: # %cond.load1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a4, v10 ; RV64ZVE32F-NEXT: slli a4, a4, 32 @@ -6752,9 +6752,9 @@ ; RV64ZVE32F-NEXT: add a4, a1, a4 ; RV64ZVE32F-NEXT: ld a4, 0(a4) ; RV64ZVE32F-NEXT: .LBB56_5: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV64ZVE32F-NEXT: andi a6, a5, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: beqz a6, .LBB56_10 @@ -6782,7 +6782,7 @@ ; RV64ZVE32F-NEXT: andi a7, a5, 8 ; RV64ZVE32F-NEXT: beqz a7, .LBB56_7 ; RV64ZVE32F-NEXT: .LBB56_11: # %cond.load7 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a7, v8 ; RV64ZVE32F-NEXT: slli a7, a7, 32 @@ -6792,7 +6792,7 @@ ; RV64ZVE32F-NEXT: andi t0, a5, 16 ; RV64ZVE32F-NEXT: beqz t0, .LBB56_8 ; RV64ZVE32F-NEXT: .LBB56_12: # %cond.load10 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s t0, v10 ; RV64ZVE32F-NEXT: slli t0, t0, 32 ; RV64ZVE32F-NEXT: srli t0, t0, 29 @@ -6801,7 +6801,7 @@ ; RV64ZVE32F-NEXT: andi t1, a5, 32 ; RV64ZVE32F-NEXT: beqz t1, .LBB56_9 ; RV64ZVE32F-NEXT: .LBB56_13: # %cond.load13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s t1, v8 ; RV64ZVE32F-NEXT: slli t1, t1, 32 @@ -6809,7 +6809,7 @@ ; RV64ZVE32F-NEXT: add t1, a1, t1 ; RV64ZVE32F-NEXT: ld t1, 0(t1) ; RV64ZVE32F-NEXT: .LBB56_14: # %else14 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV64ZVE32F-NEXT: andi t2, a5, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 2 ; RV64ZVE32F-NEXT: beqz t2, .LBB56_17 @@ -6829,7 +6829,7 @@ ; RV64ZVE32F-NEXT: andi a5, a5, -128 ; RV64ZVE32F-NEXT: beqz a5, .LBB56_16 ; RV64ZVE32F-NEXT: .LBB56_18: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 32 @@ -6855,7 +6855,7 @@ define <8 x i64> @mgather_baseidx_v8i64(i64* %base, <8 x i64> %idxs, <8 x i1> %m, <8 x i64> %passthru) { ; RV32V-LABEL: mgather_baseidx_v8i64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32V-NEXT: vnsrl.wi v16, v8, 0 ; RV32V-NEXT: vsll.vi v8, v16, 3 ; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu @@ -6935,11 +6935,11 @@ ; RV32ZVE32F-NEXT: sw s7, 4(sp) ; RV32ZVE32F-NEXT: sw s6, 0(sp) ; RV32ZVE32F-NEXT: mv a2, sp -; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vle32.v v8, (a2) ; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3 ; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1 -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a1, v0 ; RV32ZVE32F-NEXT: andi a2, a1, 1 ; RV32ZVE32F-NEXT: bnez a2, .LBB57_10 @@ -6965,7 +6965,7 @@ ; RV32ZVE32F-NEXT: andi a1, a1, -128 ; RV32ZVE32F-NEXT: beqz a1, .LBB57_9 ; RV32ZVE32F-NEXT: .LBB57_8: # %cond.load19 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV32ZVE32F-NEXT: vmv.x.s a1, v8 ; RV32ZVE32F-NEXT: lw a4, 4(a1) @@ -7003,14 +7003,14 @@ ; RV32ZVE32F-NEXT: addi sp, sp, 96 ; RV32ZVE32F-NEXT: ret ; RV32ZVE32F-NEXT: .LBB57_10: # %cond.load -; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a2, v8 ; RV32ZVE32F-NEXT: lw s4, 4(a2) ; RV32ZVE32F-NEXT: lw a3, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 2 ; RV32ZVE32F-NEXT: beqz a2, .LBB57_2 ; RV32ZVE32F-NEXT: .LBB57_11: # %cond.load1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: lw s3, 4(a2) @@ -7018,7 +7018,7 @@ ; RV32ZVE32F-NEXT: andi a2, a1, 4 ; RV32ZVE32F-NEXT: beqz a2, .LBB57_3 ; RV32ZVE32F-NEXT: .LBB57_12: # %cond.load4 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: lw t6, 4(a2) @@ -7026,7 +7026,7 @@ ; RV32ZVE32F-NEXT: andi a2, a1, 8 ; RV32ZVE32F-NEXT: beqz a2, .LBB57_4 ; RV32ZVE32F-NEXT: .LBB57_13: # %cond.load7 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: lw t4, 4(a2) @@ -7034,7 +7034,7 @@ ; RV32ZVE32F-NEXT: andi a2, a1, 16 ; RV32ZVE32F-NEXT: beqz a2, .LBB57_5 ; RV32ZVE32F-NEXT: .LBB57_14: # %cond.load10 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: lw t2, 4(a2) @@ -7042,7 +7042,7 @@ ; RV32ZVE32F-NEXT: andi a2, a1, 32 ; RV32ZVE32F-NEXT: beqz a2, .LBB57_6 ; RV32ZVE32F-NEXT: .LBB57_15: # %cond.load13 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: lw t0, 4(a2) @@ -7050,7 +7050,7 @@ ; RV32ZVE32F-NEXT: andi a2, a1, 64 ; RV32ZVE32F-NEXT: beqz a2, .LBB57_7 ; RV32ZVE32F-NEXT: .LBB57_16: # %cond.load16 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: lw a6, 4(a2) @@ -7061,7 +7061,7 @@ ; ; RV64ZVE32F-LABEL: mgather_baseidx_v8i64: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a7, v0 ; RV64ZVE32F-NEXT: andi a4, a7, 1 ; RV64ZVE32F-NEXT: beqz a4, .LBB57_9 @@ -7191,14 +7191,14 @@ ; ; RV64ZVE32F-LABEL: mgather_v1f16: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.v.i v9, 0 ; RV64ZVE32F-NEXT: vmerge.vim v9, v9, 1, v0 ; RV64ZVE32F-NEXT: vmv.x.s a1, v9 ; RV64ZVE32F-NEXT: andi a1, a1, 1 ; RV64ZVE32F-NEXT: beqz a1, .LBB58_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vlse16.v v8, (a0), zero ; RV64ZVE32F-NEXT: .LBB58_2: # %else ; RV64ZVE32F-NEXT: ret @@ -7232,7 +7232,7 @@ ; ; RV64ZVE32F-LABEL: mgather_v2f16: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v0 ; RV64ZVE32F-NEXT: andi a3, a2, 1 ; RV64ZVE32F-NEXT: bnez a3, .LBB59_3 @@ -7243,15 +7243,15 @@ ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB59_3: # %cond.load ; RV64ZVE32F-NEXT: flh ft0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, tu, ma ; RV64ZVE32F-NEXT: vfmv.s.f v8, ft0 ; RV64ZVE32F-NEXT: andi a0, a2, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB59_2 ; RV64ZVE32F-NEXT: .LBB59_4: # %cond.load1 ; RV64ZVE32F-NEXT: flh ft0, 0(a1) -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v9, ft0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1 ; RV64ZVE32F-NEXT: ret %v = call <2 x half> @llvm.masked.gather.v2f16.v2p0f16(<2 x half*> %ptrs, i32 2, <2 x i1> %m, <2 x half> %passthru) @@ -7277,7 +7277,7 @@ ; ; RV64ZVE32F-LABEL: mgather_v4f16: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: bnez a2, .LBB60_5 @@ -7295,34 +7295,34 @@ ; RV64ZVE32F-NEXT: .LBB60_5: # %cond.load ; RV64ZVE32F-NEXT: ld a2, 0(a0) ; RV64ZVE32F-NEXT: flh ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, mf2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, mf2, tu, ma ; RV64ZVE32F-NEXT: vfmv.s.f v8, ft0 ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB60_2 ; RV64ZVE32F-NEXT: .LBB60_6: # %cond.load1 ; RV64ZVE32F-NEXT: ld a2, 8(a0) ; RV64ZVE32F-NEXT: flh ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v9, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1 ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: beqz a2, .LBB60_3 ; RV64ZVE32F-NEXT: .LBB60_7: # %cond.load4 ; RV64ZVE32F-NEXT: ld a2, 16(a0) ; RV64ZVE32F-NEXT: flh ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v9, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 3, e16, mf2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 3, e16, mf2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 2 ; RV64ZVE32F-NEXT: andi a1, a1, 8 ; RV64ZVE32F-NEXT: beqz a1, .LBB60_4 ; RV64ZVE32F-NEXT: .LBB60_8: # %cond.load7 ; RV64ZVE32F-NEXT: ld a0, 24(a0) ; RV64ZVE32F-NEXT: flh ft0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v9, ft0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 3 ; RV64ZVE32F-NEXT: ret %v = call <4 x half> @llvm.masked.gather.v4f16.v4p0f16(<4 x half*> %ptrs, i32 2, <4 x i1> %m, <4 x half> %passthru) @@ -7332,21 +7332,21 @@ define <4 x half> @mgather_truemask_v4f16(<4 x half*> %ptrs, <4 x half> %passthru) { ; RV32-LABEL: mgather_truemask_v4f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; RV32-NEXT: vluxei32.v v9, (zero), v8 ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64V-LABEL: mgather_truemask_v4f16: ; RV64V: # %bb.0: -; RV64V-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; RV64V-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; RV64V-NEXT: vluxei64.v v10, (zero), v8 ; RV64V-NEXT: vmv1r.v v8, v10 ; RV64V-NEXT: ret ; ; RV64ZVE32F-LABEL: mgather_truemask_v4f16: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: vmset.m v9 ; RV64ZVE32F-NEXT: vmv.x.s a1, v9 ; RV64ZVE32F-NEXT: beqz zero, .LBB61_5 @@ -7364,34 +7364,34 @@ ; RV64ZVE32F-NEXT: .LBB61_5: # %cond.load ; RV64ZVE32F-NEXT: ld a2, 0(a0) ; RV64ZVE32F-NEXT: flh ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, mf2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, mf2, tu, ma ; RV64ZVE32F-NEXT: vfmv.s.f v8, ft0 ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB61_2 ; RV64ZVE32F-NEXT: .LBB61_6: # %cond.load1 ; RV64ZVE32F-NEXT: ld a2, 8(a0) ; RV64ZVE32F-NEXT: flh ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v9, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1 ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: beqz a2, .LBB61_3 ; RV64ZVE32F-NEXT: .LBB61_7: # %cond.load4 ; RV64ZVE32F-NEXT: ld a2, 16(a0) ; RV64ZVE32F-NEXT: flh ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v9, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 3, e16, mf2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 3, e16, mf2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 2 ; RV64ZVE32F-NEXT: andi a1, a1, 8 ; RV64ZVE32F-NEXT: beqz a1, .LBB61_4 ; RV64ZVE32F-NEXT: .LBB61_8: # %cond.load7 ; RV64ZVE32F-NEXT: ld a0, 24(a0) ; RV64ZVE32F-NEXT: flh ft0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v9, ft0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 3 ; RV64ZVE32F-NEXT: ret %mhead = insertelement <4 x i1> poison, i1 1, i32 0 @@ -7437,7 +7437,7 @@ ; ; RV64ZVE32F-LABEL: mgather_v8f16: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: bnez a2, .LBB63_9 @@ -7467,70 +7467,70 @@ ; RV64ZVE32F-NEXT: .LBB63_9: # %cond.load ; RV64ZVE32F-NEXT: ld a2, 0(a0) ; RV64ZVE32F-NEXT: flh ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vfmv.s.f v8, ft0 ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB63_2 ; RV64ZVE32F-NEXT: .LBB63_10: # %cond.load1 ; RV64ZVE32F-NEXT: ld a2, 8(a0) ; RV64ZVE32F-NEXT: flh ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v9, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1 ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: beqz a2, .LBB63_3 ; RV64ZVE32F-NEXT: .LBB63_11: # %cond.load4 ; RV64ZVE32F-NEXT: ld a2, 16(a0) ; RV64ZVE32F-NEXT: flh ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v9, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 3, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 3, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 2 ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: beqz a2, .LBB63_4 ; RV64ZVE32F-NEXT: .LBB63_12: # %cond.load7 ; RV64ZVE32F-NEXT: ld a2, 24(a0) ; RV64ZVE32F-NEXT: flh ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v9, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 3 ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB63_5 ; RV64ZVE32F-NEXT: .LBB63_13: # %cond.load10 ; RV64ZVE32F-NEXT: ld a2, 32(a0) ; RV64ZVE32F-NEXT: flh ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v9, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 5, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 5, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 4 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB63_6 ; RV64ZVE32F-NEXT: .LBB63_14: # %cond.load13 ; RV64ZVE32F-NEXT: ld a2, 40(a0) ; RV64ZVE32F-NEXT: flh ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v9, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 6, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 6, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 5 ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: beqz a2, .LBB63_7 ; RV64ZVE32F-NEXT: .LBB63_15: # %cond.load16 ; RV64ZVE32F-NEXT: ld a2, 48(a0) ; RV64ZVE32F-NEXT: flh ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v9, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 6 ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB63_8 ; RV64ZVE32F-NEXT: .LBB63_16: # %cond.load19 ; RV64ZVE32F-NEXT: ld a0, 56(a0) ; RV64ZVE32F-NEXT: flh ft0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v9, ft0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 7 ; RV64ZVE32F-NEXT: ret %v = call <8 x half> @llvm.masked.gather.v8f16.v8p0f16(<8 x half*> %ptrs, i32 2, <8 x i1> %m, <8 x half> %passthru) @@ -7540,7 +7540,7 @@ define <8 x half> @mgather_baseidx_v8i8_v8f16(half* %base, <8 x i8> %idxs, <8 x i1> %m, <8 x half> %passthru) { ; RV32-LABEL: mgather_baseidx_v8i8_v8f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v10, v8 ; RV32-NEXT: vadd.vv v10, v10, v10 ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu @@ -7550,7 +7550,7 @@ ; ; RV64V-LABEL: mgather_baseidx_v8i8_v8f16: ; RV64V: # %bb.0: -; RV64V-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64V-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64V-NEXT: vsext.vf8 v12, v8 ; RV64V-NEXT: vadd.vv v12, v12, v12 ; RV64V-NEXT: vsetvli zero, zero, e16, m1, ta, mu @@ -7560,7 +7560,7 @@ ; ; RV64ZVE32F-LABEL: mgather_baseidx_v8i8_v8f16: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB64_2 @@ -7569,24 +7569,24 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flh ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vfmv.s.f v9, ft0 ; RV64ZVE32F-NEXT: .LBB64_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB64_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flh ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v10, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 1 ; RV64ZVE32F-NEXT: .LBB64_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB64_6 @@ -7595,12 +7595,12 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flh ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v11, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 3, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 3, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 2 ; RV64ZVE32F-NEXT: .LBB64_6: # %else5 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 4 ; RV64ZVE32F-NEXT: bnez a2, .LBB64_13 @@ -7611,18 +7611,18 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB64_10 ; RV64ZVE32F-NEXT: .LBB64_9: # %cond.load13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flh ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v10, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 6, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 6, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 5 ; RV64ZVE32F-NEXT: .LBB64_10: # %else14 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB64_15 @@ -7633,27 +7633,27 @@ ; RV64ZVE32F-NEXT: vmv1r.v v8, v9 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB64_13: # %cond.load7 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flh ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v10, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 3 ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB64_8 ; RV64ZVE32F-NEXT: .LBB64_14: # %cond.load10 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flh ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v10, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 5, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 5, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 4 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB64_9 @@ -7663,22 +7663,22 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flh ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v10, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 6 ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB64_12 ; RV64ZVE32F-NEXT: .LBB64_16: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v8 ; RV64ZVE32F-NEXT: slli a1, a1, 1 ; RV64ZVE32F-NEXT: add a0, a0, a1 ; RV64ZVE32F-NEXT: flh ft0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v8, ft0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7 ; RV64ZVE32F-NEXT: vmv1r.v v8, v9 ; RV64ZVE32F-NEXT: ret @@ -7690,7 +7690,7 @@ define <8 x half> @mgather_baseidx_sext_v8i8_v8f16(half* %base, <8 x i8> %idxs, <8 x i1> %m, <8 x half> %passthru) { ; RV32-LABEL: mgather_baseidx_sext_v8i8_v8f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v10, v8 ; RV32-NEXT: vadd.vv v10, v10, v10 ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu @@ -7700,7 +7700,7 @@ ; ; RV64V-LABEL: mgather_baseidx_sext_v8i8_v8f16: ; RV64V: # %bb.0: -; RV64V-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64V-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64V-NEXT: vsext.vf8 v12, v8 ; RV64V-NEXT: vadd.vv v12, v12, v12 ; RV64V-NEXT: vsetvli zero, zero, e16, m1, ta, mu @@ -7710,7 +7710,7 @@ ; ; RV64ZVE32F-LABEL: mgather_baseidx_sext_v8i8_v8f16: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB65_2 @@ -7719,24 +7719,24 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flh ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vfmv.s.f v9, ft0 ; RV64ZVE32F-NEXT: .LBB65_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB65_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flh ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v10, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 1 ; RV64ZVE32F-NEXT: .LBB65_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB65_6 @@ -7745,12 +7745,12 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flh ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v11, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 3, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 3, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 2 ; RV64ZVE32F-NEXT: .LBB65_6: # %else5 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 4 ; RV64ZVE32F-NEXT: bnez a2, .LBB65_13 @@ -7761,18 +7761,18 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB65_10 ; RV64ZVE32F-NEXT: .LBB65_9: # %cond.load13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flh ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v10, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 6, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 6, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 5 ; RV64ZVE32F-NEXT: .LBB65_10: # %else14 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB65_15 @@ -7783,27 +7783,27 @@ ; RV64ZVE32F-NEXT: vmv1r.v v8, v9 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB65_13: # %cond.load7 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flh ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v10, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 3 ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB65_8 ; RV64ZVE32F-NEXT: .LBB65_14: # %cond.load10 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flh ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v10, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 5, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 5, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 4 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB65_9 @@ -7813,22 +7813,22 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flh ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v10, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 6 ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB65_12 ; RV64ZVE32F-NEXT: .LBB65_16: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v8 ; RV64ZVE32F-NEXT: slli a1, a1, 1 ; RV64ZVE32F-NEXT: add a0, a0, a1 ; RV64ZVE32F-NEXT: flh ft0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v8, ft0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7 ; RV64ZVE32F-NEXT: vmv1r.v v8, v9 ; RV64ZVE32F-NEXT: ret @@ -7841,7 +7841,7 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(half* %base, <8 x i8> %idxs, <8 x i1> %m, <8 x half> %passthru) { ; RV32-LABEL: mgather_baseidx_zext_v8i8_v8f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vzext.vf4 v10, v8 ; RV32-NEXT: vadd.vv v10, v10, v10 ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu @@ -7851,7 +7851,7 @@ ; ; RV64V-LABEL: mgather_baseidx_zext_v8i8_v8f16: ; RV64V: # %bb.0: -; RV64V-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64V-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64V-NEXT: vzext.vf8 v12, v8 ; RV64V-NEXT: vadd.vv v12, v12, v12 ; RV64V-NEXT: vsetvli zero, zero, e16, m1, ta, mu @@ -7861,7 +7861,7 @@ ; ; RV64ZVE32F-LABEL: mgather_baseidx_zext_v8i8_v8f16: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB66_2 @@ -7871,25 +7871,25 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flh ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vfmv.s.f v9, ft0 ; RV64ZVE32F-NEXT: .LBB66_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB66_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: andi a2, a2, 255 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flh ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v10, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 1 ; RV64ZVE32F-NEXT: .LBB66_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB66_6 @@ -7899,12 +7899,12 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flh ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v11, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 3, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 3, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 2 ; RV64ZVE32F-NEXT: .LBB66_6: # %else5 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 4 ; RV64ZVE32F-NEXT: bnez a2, .LBB66_13 @@ -7915,19 +7915,19 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB66_10 ; RV64ZVE32F-NEXT: .LBB66_9: # %cond.load13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: andi a2, a2, 255 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flh ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v10, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 6, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 6, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 5 ; RV64ZVE32F-NEXT: .LBB66_10: # %else14 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB66_15 @@ -7938,29 +7938,29 @@ ; RV64ZVE32F-NEXT: vmv1r.v v8, v9 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB66_13: # %cond.load7 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: andi a2, a2, 255 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flh ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v10, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 3 ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB66_8 ; RV64ZVE32F-NEXT: .LBB66_14: # %cond.load10 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: andi a2, a2, 255 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flh ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v10, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 5, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 5, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 4 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB66_9 @@ -7971,23 +7971,23 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flh ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v10, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 6 ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB66_12 ; RV64ZVE32F-NEXT: .LBB66_16: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v8 ; RV64ZVE32F-NEXT: andi a1, a1, 255 ; RV64ZVE32F-NEXT: slli a1, a1, 1 ; RV64ZVE32F-NEXT: add a0, a0, a1 ; RV64ZVE32F-NEXT: flh ft0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v8, ft0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7 ; RV64ZVE32F-NEXT: vmv1r.v v8, v9 ; RV64ZVE32F-NEXT: ret @@ -8000,7 +8000,7 @@ define <8 x half> @mgather_baseidx_v8f16(half* %base, <8 x i16> %idxs, <8 x i1> %m, <8 x half> %passthru) { ; RV32-LABEL: mgather_baseidx_v8f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf2 v10, v8 ; RV32-NEXT: vadd.vv v10, v10, v10 ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu @@ -8010,7 +8010,7 @@ ; ; RV64V-LABEL: mgather_baseidx_v8f16: ; RV64V: # %bb.0: -; RV64V-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64V-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64V-NEXT: vsext.vf4 v12, v8 ; RV64V-NEXT: vadd.vv v12, v12, v12 ; RV64V-NEXT: vsetvli zero, zero, e16, m1, ta, mu @@ -8020,33 +8020,33 @@ ; ; RV64ZVE32F-LABEL: mgather_baseidx_v8f16: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB67_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flh ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vfmv.s.f v9, ft0 ; RV64ZVE32F-NEXT: .LBB67_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB67_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flh ft0, 0(a2) ; RV64ZVE32F-NEXT: vfmv.s.f v10, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 1 ; RV64ZVE32F-NEXT: .LBB67_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB67_6 @@ -8056,10 +8056,10 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flh ft0, 0(a2) ; RV64ZVE32F-NEXT: vfmv.s.f v11, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 3, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 3, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 2 ; RV64ZVE32F-NEXT: .LBB67_6: # %else5 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 4 ; RV64ZVE32F-NEXT: bnez a2, .LBB67_13 @@ -8070,17 +8070,17 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB67_10 ; RV64ZVE32F-NEXT: .LBB67_9: # %cond.load13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flh ft0, 0(a2) ; RV64ZVE32F-NEXT: vfmv.s.f v10, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 6, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 6, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 5 ; RV64ZVE32F-NEXT: .LBB67_10: # %else14 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB67_15 @@ -8091,26 +8091,26 @@ ; RV64ZVE32F-NEXT: vmv1r.v v8, v9 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB67_13: # %cond.load7 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flh ft0, 0(a2) ; RV64ZVE32F-NEXT: vfmv.s.f v10, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 3 ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB67_8 ; RV64ZVE32F-NEXT: .LBB67_14: # %cond.load10 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flh ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v10, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 5, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 5, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 4 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB67_9 @@ -8121,19 +8121,19 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flh ft0, 0(a2) ; RV64ZVE32F-NEXT: vfmv.s.f v10, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 7, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 6 ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB67_12 ; RV64ZVE32F-NEXT: .LBB67_16: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v8 ; RV64ZVE32F-NEXT: slli a1, a1, 1 ; RV64ZVE32F-NEXT: add a0, a0, a1 ; RV64ZVE32F-NEXT: flh ft0, 0(a0) ; RV64ZVE32F-NEXT: vfmv.s.f v8, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e16, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 7 ; RV64ZVE32F-NEXT: vmv1r.v v8, v9 ; RV64ZVE32F-NEXT: ret @@ -8168,14 +8168,14 @@ ; ; RV64ZVE32F-LABEL: mgather_v1f32: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.v.i v9, 0 ; RV64ZVE32F-NEXT: vmerge.vim v9, v9, 1, v0 ; RV64ZVE32F-NEXT: vmv.x.s a1, v9 ; RV64ZVE32F-NEXT: andi a1, a1, 1 ; RV64ZVE32F-NEXT: beqz a1, .LBB68_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vlse32.v v8, (a0), zero ; RV64ZVE32F-NEXT: .LBB68_2: # %else ; RV64ZVE32F-NEXT: ret @@ -8209,7 +8209,7 @@ ; ; RV64ZVE32F-LABEL: mgather_v2f32: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v0 ; RV64ZVE32F-NEXT: andi a3, a2, 1 ; RV64ZVE32F-NEXT: bnez a3, .LBB69_3 @@ -8220,15 +8220,15 @@ ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB69_3: # %cond.load ; RV64ZVE32F-NEXT: flw ft0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, tu, ma ; RV64ZVE32F-NEXT: vfmv.s.f v8, ft0 ; RV64ZVE32F-NEXT: andi a0, a2, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB69_2 ; RV64ZVE32F-NEXT: .LBB69_4: # %cond.load1 ; RV64ZVE32F-NEXT: flw ft0, 0(a1) -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v9, ft0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1 ; RV64ZVE32F-NEXT: ret %v = call <2 x float> @llvm.masked.gather.v2f32.v2p0f32(<2 x float*> %ptrs, i32 4, <2 x i1> %m, <2 x float> %passthru) @@ -8254,7 +8254,7 @@ ; ; RV64ZVE32F-LABEL: mgather_v4f32: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: bnez a2, .LBB70_5 @@ -8272,34 +8272,34 @@ ; RV64ZVE32F-NEXT: .LBB70_5: # %cond.load ; RV64ZVE32F-NEXT: ld a2, 0(a0) ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, tu, ma ; RV64ZVE32F-NEXT: vfmv.s.f v8, ft0 ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB70_2 ; RV64ZVE32F-NEXT: .LBB70_6: # %cond.load1 ; RV64ZVE32F-NEXT: ld a2, 8(a0) ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v9, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1 ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: beqz a2, .LBB70_3 ; RV64ZVE32F-NEXT: .LBB70_7: # %cond.load4 ; RV64ZVE32F-NEXT: ld a2, 16(a0) ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v9, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 2 ; RV64ZVE32F-NEXT: andi a1, a1, 8 ; RV64ZVE32F-NEXT: beqz a1, .LBB70_4 ; RV64ZVE32F-NEXT: .LBB70_8: # %cond.load7 ; RV64ZVE32F-NEXT: ld a0, 24(a0) ; RV64ZVE32F-NEXT: flw ft0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v9, ft0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 3 ; RV64ZVE32F-NEXT: ret %v = call <4 x float> @llvm.masked.gather.v4f32.v4p0f32(<4 x float*> %ptrs, i32 4, <4 x i1> %m, <4 x float> %passthru) @@ -8309,20 +8309,20 @@ define <4 x float> @mgather_truemask_v4f32(<4 x float*> %ptrs, <4 x float> %passthru) { ; RV32-LABEL: mgather_truemask_v4f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vluxei32.v v8, (zero), v8 ; RV32-NEXT: ret ; ; RV64V-LABEL: mgather_truemask_v4f32: ; RV64V: # %bb.0: -; RV64V-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV64V-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64V-NEXT: vluxei64.v v10, (zero), v8 ; RV64V-NEXT: vmv.v.v v8, v10 ; RV64V-NEXT: ret ; ; RV64ZVE32F-LABEL: mgather_truemask_v4f32: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: vmset.m v9 ; RV64ZVE32F-NEXT: vmv.x.s a1, v9 ; RV64ZVE32F-NEXT: beqz zero, .LBB71_5 @@ -8340,34 +8340,34 @@ ; RV64ZVE32F-NEXT: .LBB71_5: # %cond.load ; RV64ZVE32F-NEXT: ld a2, 0(a0) ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, tu, ma ; RV64ZVE32F-NEXT: vfmv.s.f v8, ft0 ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB71_2 ; RV64ZVE32F-NEXT: .LBB71_6: # %cond.load1 ; RV64ZVE32F-NEXT: ld a2, 8(a0) ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v9, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1 ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: beqz a2, .LBB71_3 ; RV64ZVE32F-NEXT: .LBB71_7: # %cond.load4 ; RV64ZVE32F-NEXT: ld a2, 16(a0) ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v9, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 2 ; RV64ZVE32F-NEXT: andi a1, a1, 8 ; RV64ZVE32F-NEXT: beqz a1, .LBB71_4 ; RV64ZVE32F-NEXT: .LBB71_8: # %cond.load7 ; RV64ZVE32F-NEXT: ld a0, 24(a0) ; RV64ZVE32F-NEXT: flw ft0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v9, ft0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 3 ; RV64ZVE32F-NEXT: ret %mhead = insertelement <4 x i1> poison, i1 1, i32 0 @@ -8413,7 +8413,7 @@ ; ; RV64ZVE32F-LABEL: mgather_v8f32: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: bnez a2, .LBB73_9 @@ -8443,70 +8443,70 @@ ; RV64ZVE32F-NEXT: .LBB73_9: # %cond.load ; RV64ZVE32F-NEXT: ld a2, 0(a0) ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vfmv.s.f v8, ft0 ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB73_2 ; RV64ZVE32F-NEXT: .LBB73_10: # %cond.load1 ; RV64ZVE32F-NEXT: ld a2, 8(a0) ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v10, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v10, 1 ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: beqz a2, .LBB73_3 ; RV64ZVE32F-NEXT: .LBB73_11: # %cond.load4 ; RV64ZVE32F-NEXT: ld a2, 16(a0) ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v10, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v10, 2 ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: beqz a2, .LBB73_4 ; RV64ZVE32F-NEXT: .LBB73_12: # %cond.load7 ; RV64ZVE32F-NEXT: ld a2, 24(a0) ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v10, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v10, 3 ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB73_5 ; RV64ZVE32F-NEXT: .LBB73_13: # %cond.load10 ; RV64ZVE32F-NEXT: ld a2, 32(a0) ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v10, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v10, 4 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB73_6 ; RV64ZVE32F-NEXT: .LBB73_14: # %cond.load13 ; RV64ZVE32F-NEXT: ld a2, 40(a0) ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v10, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v10, 5 ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: beqz a2, .LBB73_7 ; RV64ZVE32F-NEXT: .LBB73_15: # %cond.load16 ; RV64ZVE32F-NEXT: ld a2, 48(a0) ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v10, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v10, 6 ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB73_8 ; RV64ZVE32F-NEXT: .LBB73_16: # %cond.load19 ; RV64ZVE32F-NEXT: ld a0, 56(a0) ; RV64ZVE32F-NEXT: flw ft0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v10, ft0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v10, 7 ; RV64ZVE32F-NEXT: ret %v = call <8 x float> @llvm.masked.gather.v8f32.v8p0f32(<8 x float*> %ptrs, i32 4, <8 x i1> %m, <8 x float> %passthru) @@ -8525,7 +8525,7 @@ ; ; RV64V-LABEL: mgather_baseidx_v8i8_v8f32: ; RV64V: # %bb.0: -; RV64V-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64V-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64V-NEXT: vsext.vf8 v12, v8 ; RV64V-NEXT: vsll.vi v12, v12, 2 ; RV64V-NEXT: vsetvli zero, zero, e32, m2, ta, mu @@ -8535,7 +8535,7 @@ ; ; RV64ZVE32F-LABEL: mgather_baseidx_v8i8_v8f32: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB74_2 @@ -8544,24 +8544,24 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vfmv.s.f v10, ft0 ; RV64ZVE32F-NEXT: .LBB74_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB74_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 1 ; RV64ZVE32F-NEXT: .LBB74_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB74_6 @@ -8570,12 +8570,12 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 2 ; RV64ZVE32F-NEXT: .LBB74_6: # %else5 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 4 ; RV64ZVE32F-NEXT: bnez a2, .LBB74_13 @@ -8586,18 +8586,18 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB74_10 ; RV64ZVE32F-NEXT: .LBB74_9: # %cond.load13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5 ; RV64ZVE32F-NEXT: .LBB74_10: # %else14 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB74_15 @@ -8608,27 +8608,27 @@ ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB74_13: # %cond.load7 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 3 ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB74_8 ; RV64ZVE32F-NEXT: .LBB74_14: # %cond.load10 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB74_9 @@ -8638,22 +8638,22 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB74_12 ; RV64ZVE32F-NEXT: .LBB74_16: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v8 ; RV64ZVE32F-NEXT: slli a1, a1, 2 ; RV64ZVE32F-NEXT: add a0, a0, a1 ; RV64ZVE32F-NEXT: flw ft0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v8, ft0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret @@ -8674,7 +8674,7 @@ ; ; RV64V-LABEL: mgather_baseidx_sext_v8i8_v8f32: ; RV64V: # %bb.0: -; RV64V-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64V-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64V-NEXT: vsext.vf8 v12, v8 ; RV64V-NEXT: vsll.vi v12, v12, 2 ; RV64V-NEXT: vsetvli zero, zero, e32, m2, ta, mu @@ -8684,7 +8684,7 @@ ; ; RV64ZVE32F-LABEL: mgather_baseidx_sext_v8i8_v8f32: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB75_2 @@ -8693,24 +8693,24 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vfmv.s.f v10, ft0 ; RV64ZVE32F-NEXT: .LBB75_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB75_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 1 ; RV64ZVE32F-NEXT: .LBB75_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB75_6 @@ -8719,12 +8719,12 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 2 ; RV64ZVE32F-NEXT: .LBB75_6: # %else5 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 4 ; RV64ZVE32F-NEXT: bnez a2, .LBB75_13 @@ -8735,18 +8735,18 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB75_10 ; RV64ZVE32F-NEXT: .LBB75_9: # %cond.load13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5 ; RV64ZVE32F-NEXT: .LBB75_10: # %else14 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB75_15 @@ -8757,27 +8757,27 @@ ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB75_13: # %cond.load7 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 3 ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB75_8 ; RV64ZVE32F-NEXT: .LBB75_14: # %cond.load10 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB75_9 @@ -8787,22 +8787,22 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB75_12 ; RV64ZVE32F-NEXT: .LBB75_16: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v8 ; RV64ZVE32F-NEXT: slli a1, a1, 2 ; RV64ZVE32F-NEXT: add a0, a0, a1 ; RV64ZVE32F-NEXT: flw ft0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v8, ft0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret @@ -8824,7 +8824,7 @@ ; ; RV64V-LABEL: mgather_baseidx_zext_v8i8_v8f32: ; RV64V: # %bb.0: -; RV64V-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64V-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64V-NEXT: vzext.vf8 v12, v8 ; RV64V-NEXT: vsll.vi v12, v12, 2 ; RV64V-NEXT: vsetvli zero, zero, e32, m2, ta, mu @@ -8834,7 +8834,7 @@ ; ; RV64ZVE32F-LABEL: mgather_baseidx_zext_v8i8_v8f32: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB76_2 @@ -8844,25 +8844,25 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vfmv.s.f v10, ft0 ; RV64ZVE32F-NEXT: .LBB76_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB76_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: andi a2, a2, 255 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 1 ; RV64ZVE32F-NEXT: .LBB76_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB76_6 @@ -8872,12 +8872,12 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 2 ; RV64ZVE32F-NEXT: .LBB76_6: # %else5 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 4 ; RV64ZVE32F-NEXT: bnez a2, .LBB76_13 @@ -8888,19 +8888,19 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB76_10 ; RV64ZVE32F-NEXT: .LBB76_9: # %cond.load13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: andi a2, a2, 255 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5 ; RV64ZVE32F-NEXT: .LBB76_10: # %else14 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB76_15 @@ -8911,29 +8911,29 @@ ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB76_13: # %cond.load7 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: andi a2, a2, 255 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 3 ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB76_8 ; RV64ZVE32F-NEXT: .LBB76_14: # %cond.load10 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: andi a2, a2, 255 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB76_9 @@ -8944,23 +8944,23 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB76_12 ; RV64ZVE32F-NEXT: .LBB76_16: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v8 ; RV64ZVE32F-NEXT: andi a1, a1, 255 ; RV64ZVE32F-NEXT: slli a1, a1, 2 ; RV64ZVE32F-NEXT: add a0, a0, a1 ; RV64ZVE32F-NEXT: flw ft0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v8, ft0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret @@ -8982,7 +8982,7 @@ ; ; RV64V-LABEL: mgather_baseidx_v8i16_v8f32: ; RV64V: # %bb.0: -; RV64V-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64V-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64V-NEXT: vsext.vf4 v12, v8 ; RV64V-NEXT: vsll.vi v12, v12, 2 ; RV64V-NEXT: vsetvli zero, zero, e32, m2, ta, mu @@ -8992,34 +8992,34 @@ ; ; RV64ZVE32F-LABEL: mgather_baseidx_v8i16_v8f32: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB77_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vfmv.s.f v10, ft0 ; RV64ZVE32F-NEXT: .LBB77_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB77_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 1 ; RV64ZVE32F-NEXT: .LBB77_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB77_6 @@ -9028,12 +9028,12 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 2 ; RV64ZVE32F-NEXT: .LBB77_6: # %else5 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 4 ; RV64ZVE32F-NEXT: bnez a2, .LBB77_13 @@ -9044,18 +9044,18 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB77_10 ; RV64ZVE32F-NEXT: .LBB77_9: # %cond.load13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5 ; RV64ZVE32F-NEXT: .LBB77_10: # %else14 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB77_15 @@ -9066,27 +9066,27 @@ ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB77_13: # %cond.load7 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 3 ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB77_8 ; RV64ZVE32F-NEXT: .LBB77_14: # %cond.load10 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB77_9 @@ -9096,22 +9096,22 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB77_12 ; RV64ZVE32F-NEXT: .LBB77_16: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v8 ; RV64ZVE32F-NEXT: slli a1, a1, 2 ; RV64ZVE32F-NEXT: add a0, a0, a1 ; RV64ZVE32F-NEXT: flw ft0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v8, ft0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret @@ -9132,7 +9132,7 @@ ; ; RV64V-LABEL: mgather_baseidx_sext_v8i16_v8f32: ; RV64V: # %bb.0: -; RV64V-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64V-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64V-NEXT: vsext.vf4 v12, v8 ; RV64V-NEXT: vsll.vi v12, v12, 2 ; RV64V-NEXT: vsetvli zero, zero, e32, m2, ta, mu @@ -9142,34 +9142,34 @@ ; ; RV64ZVE32F-LABEL: mgather_baseidx_sext_v8i16_v8f32: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB78_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vfmv.s.f v10, ft0 ; RV64ZVE32F-NEXT: .LBB78_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB78_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 1 ; RV64ZVE32F-NEXT: .LBB78_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB78_6 @@ -9178,12 +9178,12 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 2 ; RV64ZVE32F-NEXT: .LBB78_6: # %else5 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 4 ; RV64ZVE32F-NEXT: bnez a2, .LBB78_13 @@ -9194,18 +9194,18 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB78_10 ; RV64ZVE32F-NEXT: .LBB78_9: # %cond.load13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5 ; RV64ZVE32F-NEXT: .LBB78_10: # %else14 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB78_15 @@ -9216,27 +9216,27 @@ ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB78_13: # %cond.load7 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 3 ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB78_8 ; RV64ZVE32F-NEXT: .LBB78_14: # %cond.load10 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB78_9 @@ -9246,22 +9246,22 @@ ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB78_12 ; RV64ZVE32F-NEXT: .LBB78_16: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v8 ; RV64ZVE32F-NEXT: slli a1, a1, 2 ; RV64ZVE32F-NEXT: add a0, a0, a1 ; RV64ZVE32F-NEXT: flw ft0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v8, ft0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret @@ -9283,7 +9283,7 @@ ; ; RV64V-LABEL: mgather_baseidx_zext_v8i16_v8f32: ; RV64V: # %bb.0: -; RV64V-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64V-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64V-NEXT: vzext.vf4 v12, v8 ; RV64V-NEXT: vsll.vi v12, v12, 2 ; RV64V-NEXT: vsetvli zero, zero, e32, m2, ta, mu @@ -9294,37 +9294,37 @@ ; RV64ZVE32F-LABEL: mgather_baseidx_zext_v8i16_v8f32: ; RV64ZVE32F: # %bb.0: ; RV64ZVE32F-NEXT: lui a1, 16 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v0 ; RV64ZVE32F-NEXT: andi a3, a2, 1 ; RV64ZVE32F-NEXT: addiw a1, a1, -1 ; RV64ZVE32F-NEXT: beqz a3, .LBB79_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a3, v8 ; RV64ZVE32F-NEXT: and a3, a3, a1 ; RV64ZVE32F-NEXT: slli a3, a3, 2 ; RV64ZVE32F-NEXT: add a3, a0, a3 ; RV64ZVE32F-NEXT: flw ft0, 0(a3) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vfmv.s.f v10, ft0 ; RV64ZVE32F-NEXT: .LBB79_2: # %else ; RV64ZVE32F-NEXT: andi a3, a2, 2 ; RV64ZVE32F-NEXT: beqz a3, .LBB79_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a3, v9 ; RV64ZVE32F-NEXT: and a3, a3, a1 ; RV64ZVE32F-NEXT: slli a3, a3, 2 ; RV64ZVE32F-NEXT: add a3, a0, a3 ; RV64ZVE32F-NEXT: flw ft0, 0(a3) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 1 ; RV64ZVE32F-NEXT: .LBB79_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a3, a2, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV64ZVE32F-NEXT: beqz a3, .LBB79_6 @@ -9334,12 +9334,12 @@ ; RV64ZVE32F-NEXT: slli a3, a3, 2 ; RV64ZVE32F-NEXT: add a3, a0, a3 ; RV64ZVE32F-NEXT: flw ft0, 0(a3) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 2 ; RV64ZVE32F-NEXT: .LBB79_6: # %else5 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma ; RV64ZVE32F-NEXT: andi a3, a2, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 4 ; RV64ZVE32F-NEXT: bnez a3, .LBB79_13 @@ -9350,19 +9350,19 @@ ; RV64ZVE32F-NEXT: andi a3, a2, 32 ; RV64ZVE32F-NEXT: beqz a3, .LBB79_10 ; RV64ZVE32F-NEXT: .LBB79_9: # %cond.load13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a3, v9 ; RV64ZVE32F-NEXT: and a3, a3, a1 ; RV64ZVE32F-NEXT: slli a3, a3, 2 ; RV64ZVE32F-NEXT: add a3, a0, a3 ; RV64ZVE32F-NEXT: flw ft0, 0(a3) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 5 ; RV64ZVE32F-NEXT: .LBB79_10: # %else14 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a3, a2, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: bnez a3, .LBB79_15 @@ -9373,29 +9373,29 @@ ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB79_13: # %cond.load7 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a3, v9 ; RV64ZVE32F-NEXT: and a3, a3, a1 ; RV64ZVE32F-NEXT: slli a3, a3, 2 ; RV64ZVE32F-NEXT: add a3, a0, a3 ; RV64ZVE32F-NEXT: flw ft0, 0(a3) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 3 ; RV64ZVE32F-NEXT: andi a3, a2, 16 ; RV64ZVE32F-NEXT: beqz a3, .LBB79_8 ; RV64ZVE32F-NEXT: .LBB79_14: # %cond.load10 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a3, v8 ; RV64ZVE32F-NEXT: and a3, a3, a1 ; RV64ZVE32F-NEXT: slli a3, a3, 2 ; RV64ZVE32F-NEXT: add a3, a0, a3 ; RV64ZVE32F-NEXT: flw ft0, 0(a3) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4 ; RV64ZVE32F-NEXT: andi a3, a2, 32 ; RV64ZVE32F-NEXT: bnez a3, .LBB79_9 @@ -9406,23 +9406,23 @@ ; RV64ZVE32F-NEXT: slli a3, a3, 2 ; RV64ZVE32F-NEXT: add a3, a0, a3 ; RV64ZVE32F-NEXT: flw ft0, 0(a3) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 ; RV64ZVE32F-NEXT: andi a2, a2, -128 ; RV64ZVE32F-NEXT: beqz a2, .LBB79_12 ; RV64ZVE32F-NEXT: .LBB79_16: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: and a1, a2, a1 ; RV64ZVE32F-NEXT: slli a1, a1, 2 ; RV64ZVE32F-NEXT: add a0, a0, a1 ; RV64ZVE32F-NEXT: flw ft0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v8, ft0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret @@ -9443,7 +9443,7 @@ ; ; RV64V-LABEL: mgather_baseidx_v8f32: ; RV64V: # %bb.0: -; RV64V-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64V-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64V-NEXT: vsext.vf2 v12, v8 ; RV64V-NEXT: vsll.vi v12, v12, 2 ; RV64V-NEXT: vsetvli zero, zero, e32, m2, ta, mu @@ -9453,35 +9453,35 @@ ; ; RV64ZVE32F-LABEL: mgather_baseidx_v8f32: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB80_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vfmv.s.f v10, ft0 ; RV64ZVE32F-NEXT: .LBB80_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB80_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v12 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 1 ; RV64ZVE32F-NEXT: .LBB80_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 4 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB80_12 @@ -9495,17 +9495,17 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB80_9 ; RV64ZVE32F-NEXT: .LBB80_8: # %cond.load13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v12, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) ; RV64ZVE32F-NEXT: vfmv.s.f v8, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5 ; RV64ZVE32F-NEXT: .LBB80_9: # %else14 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v12, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB80_15 @@ -9521,31 +9521,31 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) ; RV64ZVE32F-NEXT: vfmv.s.f v14, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 3, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 2 ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: beqz a2, .LBB80_6 ; RV64ZVE32F-NEXT: .LBB80_13: # %cond.load7 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) ; RV64ZVE32F-NEXT: vfmv.s.f v8, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 3 ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB80_7 ; RV64ZVE32F-NEXT: .LBB80_14: # %cond.load10 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v12 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vfmv.s.f v8, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 5, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 4 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB80_8 @@ -9556,19 +9556,19 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: flw ft0, 0(a2) ; RV64ZVE32F-NEXT: vfmv.s.f v12, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 7, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 6 ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB80_11 ; RV64ZVE32F-NEXT: .LBB80_16: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v8 ; RV64ZVE32F-NEXT: slli a1, a1, 2 ; RV64ZVE32F-NEXT: add a0, a0, a1 ; RV64ZVE32F-NEXT: flw ft0, 0(a0) ; RV64ZVE32F-NEXT: vfmv.s.f v8, ft0 -; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e32, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 7 ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret @@ -9596,14 +9596,14 @@ ; ; RV32ZVE32F-LABEL: mgather_v1f64: ; RV32ZVE32F: # %bb.0: -; RV32ZVE32F-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.v.i v9, 0 ; RV32ZVE32F-NEXT: vmerge.vim v9, v9, 1, v0 ; RV32ZVE32F-NEXT: vmv.x.s a0, v9 ; RV32ZVE32F-NEXT: andi a0, a0, 1 ; RV32ZVE32F-NEXT: beqz a0, .LBB81_2 ; RV32ZVE32F-NEXT: # %bb.1: # %cond.load -; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a0, v8 ; RV32ZVE32F-NEXT: fld fa0, 0(a0) ; RV32ZVE32F-NEXT: .LBB81_2: # %else @@ -9611,7 +9611,7 @@ ; ; RV64ZVE32F-LABEL: mgather_v1f64: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.v.i v8, 0 ; RV64ZVE32F-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64ZVE32F-NEXT: vmv.x.s a1, v8 @@ -9644,7 +9644,7 @@ ; ; RV32ZVE32F-LABEL: mgather_v2f64: ; RV32ZVE32F: # %bb.0: -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a0, v0 ; RV32ZVE32F-NEXT: andi a1, a0, 1 ; RV32ZVE32F-NEXT: bnez a1, .LBB82_3 @@ -9654,13 +9654,13 @@ ; RV32ZVE32F-NEXT: .LBB82_2: # %else2 ; RV32ZVE32F-NEXT: ret ; RV32ZVE32F-NEXT: .LBB82_3: # %cond.load -; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a1, v8 ; RV32ZVE32F-NEXT: fld fa0, 0(a1) ; RV32ZVE32F-NEXT: andi a0, a0, 2 ; RV32ZVE32F-NEXT: beqz a0, .LBB82_2 ; RV32ZVE32F-NEXT: .LBB82_4: # %cond.load1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a0, v8 ; RV32ZVE32F-NEXT: fld fa1, 0(a0) @@ -9668,7 +9668,7 @@ ; ; RV64ZVE32F-LABEL: mgather_v2f64: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v0 ; RV64ZVE32F-NEXT: andi a3, a2, 1 ; RV64ZVE32F-NEXT: bnez a3, .LBB82_3 @@ -9707,7 +9707,7 @@ ; ; RV32ZVE32F-LABEL: mgather_v4f64: ; RV32ZVE32F: # %bb.0: -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a1, v0 ; RV32ZVE32F-NEXT: andi a2, a1, 1 ; RV32ZVE32F-NEXT: bnez a2, .LBB83_6 @@ -9721,7 +9721,7 @@ ; RV32ZVE32F-NEXT: andi a1, a1, 8 ; RV32ZVE32F-NEXT: beqz a1, .LBB83_5 ; RV32ZVE32F-NEXT: .LBB83_4: # %cond.load7 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s a1, v8 ; RV32ZVE32F-NEXT: fld fa3, 0(a1) @@ -9732,20 +9732,20 @@ ; RV32ZVE32F-NEXT: fsd fa3, 24(a0) ; RV32ZVE32F-NEXT: ret ; RV32ZVE32F-NEXT: .LBB83_6: # %cond.load -; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a2, v8 ; RV32ZVE32F-NEXT: fld fa0, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 2 ; RV32ZVE32F-NEXT: beqz a2, .LBB83_2 ; RV32ZVE32F-NEXT: .LBB83_7: # %cond.load1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a2, v9 ; RV32ZVE32F-NEXT: fld fa1, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 4 ; RV32ZVE32F-NEXT: beqz a2, .LBB83_3 ; RV32ZVE32F-NEXT: .LBB83_8: # %cond.load4 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s a2, v9 ; RV32ZVE32F-NEXT: fld fa2, 0(a2) @@ -9755,7 +9755,7 @@ ; ; RV64ZVE32F-LABEL: mgather_v4f64: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v0 ; RV64ZVE32F-NEXT: andi a3, a2, 1 ; RV64ZVE32F-NEXT: bnez a3, .LBB83_6 @@ -9800,20 +9800,20 @@ define <4 x double> @mgather_truemask_v4f64(<4 x double*> %ptrs, <4 x double> %passthru) { ; RV32V-LABEL: mgather_truemask_v4f64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32V-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32V-NEXT: vluxei32.v v10, (zero), v8 ; RV32V-NEXT: vmv.v.v v8, v10 ; RV32V-NEXT: ret ; ; RV64V-LABEL: mgather_truemask_v4f64: ; RV64V: # %bb.0: -; RV64V-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64V-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64V-NEXT: vluxei64.v v8, (zero), v8 ; RV64V-NEXT: ret ; ; RV32ZVE32F-LABEL: mgather_truemask_v4f64: ; RV32ZVE32F: # %bb.0: -; RV32ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV32ZVE32F-NEXT: vmset.m v9 ; RV32ZVE32F-NEXT: vmv.x.s a1, v9 ; RV32ZVE32F-NEXT: beqz zero, .LBB84_6 @@ -9827,7 +9827,7 @@ ; RV32ZVE32F-NEXT: andi a1, a1, 8 ; RV32ZVE32F-NEXT: beqz a1, .LBB84_5 ; RV32ZVE32F-NEXT: .LBB84_4: # %cond.load7 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s a1, v8 ; RV32ZVE32F-NEXT: fld fa3, 0(a1) @@ -9838,20 +9838,20 @@ ; RV32ZVE32F-NEXT: fsd fa3, 24(a0) ; RV32ZVE32F-NEXT: ret ; RV32ZVE32F-NEXT: .LBB84_6: # %cond.load -; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a2, v8 ; RV32ZVE32F-NEXT: fld fa0, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 2 ; RV32ZVE32F-NEXT: beqz a2, .LBB84_2 ; RV32ZVE32F-NEXT: .LBB84_7: # %cond.load1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a2, v9 ; RV32ZVE32F-NEXT: fld fa1, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 4 ; RV32ZVE32F-NEXT: beqz a2, .LBB84_3 ; RV32ZVE32F-NEXT: .LBB84_8: # %cond.load4 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s a2, v9 ; RV32ZVE32F-NEXT: fld fa2, 0(a2) @@ -9861,7 +9861,7 @@ ; ; RV64ZVE32F-LABEL: mgather_truemask_v4f64: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: vmset.m v8 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: beqz zero, .LBB84_6 @@ -9954,7 +9954,7 @@ ; ; RV32ZVE32F-LABEL: mgather_v8f64: ; RV32ZVE32F: # %bb.0: -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a1, v0 ; RV32ZVE32F-NEXT: andi a2, a1, 1 ; RV32ZVE32F-NEXT: bnez a2, .LBB86_10 @@ -9980,7 +9980,7 @@ ; RV32ZVE32F-NEXT: andi a1, a1, -128 ; RV32ZVE32F-NEXT: beqz a1, .LBB86_9 ; RV32ZVE32F-NEXT: .LBB86_8: # %cond.load19 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV32ZVE32F-NEXT: vmv.x.s a1, v8 ; RV32ZVE32F-NEXT: fld fa7, 0(a1) @@ -9995,48 +9995,48 @@ ; RV32ZVE32F-NEXT: fsd fa7, 56(a0) ; RV32ZVE32F-NEXT: ret ; RV32ZVE32F-NEXT: .LBB86_10: # %cond.load -; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a2, v8 ; RV32ZVE32F-NEXT: fld fa0, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 2 ; RV32ZVE32F-NEXT: beqz a2, .LBB86_2 ; RV32ZVE32F-NEXT: .LBB86_11: # %cond.load1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa1, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 4 ; RV32ZVE32F-NEXT: beqz a2, .LBB86_3 ; RV32ZVE32F-NEXT: .LBB86_12: # %cond.load4 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa2, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 8 ; RV32ZVE32F-NEXT: beqz a2, .LBB86_4 ; RV32ZVE32F-NEXT: .LBB86_13: # %cond.load7 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa3, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 16 ; RV32ZVE32F-NEXT: beqz a2, .LBB86_5 ; RV32ZVE32F-NEXT: .LBB86_14: # %cond.load10 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa4, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 32 ; RV32ZVE32F-NEXT: beqz a2, .LBB86_6 ; RV32ZVE32F-NEXT: .LBB86_15: # %cond.load13 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa5, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 64 ; RV32ZVE32F-NEXT: beqz a2, .LBB86_7 ; RV32ZVE32F-NEXT: .LBB86_16: # %cond.load16 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa6, 0(a2) @@ -10046,7 +10046,7 @@ ; ; RV64ZVE32F-LABEL: mgather_v8f64: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v0 ; RV64ZVE32F-NEXT: andi a3, a2, 1 ; RV64ZVE32F-NEXT: bnez a3, .LBB86_10 @@ -10127,7 +10127,7 @@ define <8 x double> @mgather_baseidx_v8i8_v8f64(double* %base, <8 x i8> %idxs, <8 x i1> %m, <8 x double> %passthru) { ; RV32V-LABEL: mgather_baseidx_v8i8_v8f64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32V-NEXT: vsext.vf4 v10, v8 ; RV32V-NEXT: vsll.vi v8, v10, 3 ; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu @@ -10146,11 +10146,11 @@ ; ; RV32ZVE32F-LABEL: mgather_baseidx_v8i8_v8f64: ; RV32ZVE32F: # %bb.0: -; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vsext.vf4 v10, v8 ; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3 ; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1 -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a1, v0 ; RV32ZVE32F-NEXT: andi a2, a1, 1 ; RV32ZVE32F-NEXT: bnez a2, .LBB87_10 @@ -10176,7 +10176,7 @@ ; RV32ZVE32F-NEXT: andi a1, a1, -128 ; RV32ZVE32F-NEXT: beqz a1, .LBB87_9 ; RV32ZVE32F-NEXT: .LBB87_8: # %cond.load19 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV32ZVE32F-NEXT: vmv.x.s a1, v8 ; RV32ZVE32F-NEXT: fld fa7, 0(a1) @@ -10191,48 +10191,48 @@ ; RV32ZVE32F-NEXT: fsd fa7, 56(a0) ; RV32ZVE32F-NEXT: ret ; RV32ZVE32F-NEXT: .LBB87_10: # %cond.load -; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a2, v8 ; RV32ZVE32F-NEXT: fld fa0, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 2 ; RV32ZVE32F-NEXT: beqz a2, .LBB87_2 ; RV32ZVE32F-NEXT: .LBB87_11: # %cond.load1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa1, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 4 ; RV32ZVE32F-NEXT: beqz a2, .LBB87_3 ; RV32ZVE32F-NEXT: .LBB87_12: # %cond.load4 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa2, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 8 ; RV32ZVE32F-NEXT: beqz a2, .LBB87_4 ; RV32ZVE32F-NEXT: .LBB87_13: # %cond.load7 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa3, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 16 ; RV32ZVE32F-NEXT: beqz a2, .LBB87_5 ; RV32ZVE32F-NEXT: .LBB87_14: # %cond.load10 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa4, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 32 ; RV32ZVE32F-NEXT: beqz a2, .LBB87_6 ; RV32ZVE32F-NEXT: .LBB87_15: # %cond.load13 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa5, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 64 ; RV32ZVE32F-NEXT: beqz a2, .LBB87_7 ; RV32ZVE32F-NEXT: .LBB87_16: # %cond.load16 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa6, 0(a2) @@ -10242,7 +10242,7 @@ ; ; RV64ZVE32F-LABEL: mgather_baseidx_v8i8_v8f64: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v0 ; RV64ZVE32F-NEXT: andi a3, a2, 1 ; RV64ZVE32F-NEXT: beqz a3, .LBB87_2 @@ -10255,14 +10255,14 @@ ; RV64ZVE32F-NEXT: andi a3, a2, 2 ; RV64ZVE32F-NEXT: beqz a3, .LBB87_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a3, v9 ; RV64ZVE32F-NEXT: slli a3, a3, 3 ; RV64ZVE32F-NEXT: add a3, a1, a3 ; RV64ZVE32F-NEXT: fld fa1, 0(a3) ; RV64ZVE32F-NEXT: .LBB87_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a3, a2, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV64ZVE32F-NEXT: beqz a3, .LBB87_6 @@ -10272,7 +10272,7 @@ ; RV64ZVE32F-NEXT: add a3, a1, a3 ; RV64ZVE32F-NEXT: fld fa2, 0(a3) ; RV64ZVE32F-NEXT: .LBB87_6: # %else5 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a3, a2, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 4 ; RV64ZVE32F-NEXT: bnez a3, .LBB87_15 @@ -10283,14 +10283,14 @@ ; RV64ZVE32F-NEXT: andi a3, a2, 32 ; RV64ZVE32F-NEXT: beqz a3, .LBB87_10 ; RV64ZVE32F-NEXT: .LBB87_9: # %cond.load13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a3, v9 ; RV64ZVE32F-NEXT: slli a3, a3, 3 ; RV64ZVE32F-NEXT: add a3, a1, a3 ; RV64ZVE32F-NEXT: fld fa5, 0(a3) ; RV64ZVE32F-NEXT: .LBB87_10: # %else14 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a3, a2, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: beqz a3, .LBB87_12 @@ -10303,7 +10303,7 @@ ; RV64ZVE32F-NEXT: andi a2, a2, -128 ; RV64ZVE32F-NEXT: beqz a2, .LBB87_14 ; RV64ZVE32F-NEXT: # %bb.13: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 3 @@ -10320,7 +10320,7 @@ ; RV64ZVE32F-NEXT: fsd fa7, 56(a0) ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB87_15: # %cond.load7 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a3, v9 ; RV64ZVE32F-NEXT: slli a3, a3, 3 @@ -10329,7 +10329,7 @@ ; RV64ZVE32F-NEXT: andi a3, a2, 16 ; RV64ZVE32F-NEXT: beqz a3, .LBB87_8 ; RV64ZVE32F-NEXT: .LBB87_16: # %cond.load10 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a3, v8 ; RV64ZVE32F-NEXT: slli a3, a3, 3 ; RV64ZVE32F-NEXT: add a3, a1, a3 @@ -10345,7 +10345,7 @@ define <8 x double> @mgather_baseidx_sext_v8i8_v8f64(double* %base, <8 x i8> %idxs, <8 x i1> %m, <8 x double> %passthru) { ; RV32V-LABEL: mgather_baseidx_sext_v8i8_v8f64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32V-NEXT: vsext.vf4 v10, v8 ; RV32V-NEXT: vsll.vi v8, v10, 3 ; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu @@ -10364,11 +10364,11 @@ ; ; RV32ZVE32F-LABEL: mgather_baseidx_sext_v8i8_v8f64: ; RV32ZVE32F: # %bb.0: -; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vsext.vf4 v10, v8 ; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3 ; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1 -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a1, v0 ; RV32ZVE32F-NEXT: andi a2, a1, 1 ; RV32ZVE32F-NEXT: bnez a2, .LBB88_10 @@ -10394,7 +10394,7 @@ ; RV32ZVE32F-NEXT: andi a1, a1, -128 ; RV32ZVE32F-NEXT: beqz a1, .LBB88_9 ; RV32ZVE32F-NEXT: .LBB88_8: # %cond.load19 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV32ZVE32F-NEXT: vmv.x.s a1, v8 ; RV32ZVE32F-NEXT: fld fa7, 0(a1) @@ -10409,48 +10409,48 @@ ; RV32ZVE32F-NEXT: fsd fa7, 56(a0) ; RV32ZVE32F-NEXT: ret ; RV32ZVE32F-NEXT: .LBB88_10: # %cond.load -; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a2, v8 ; RV32ZVE32F-NEXT: fld fa0, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 2 ; RV32ZVE32F-NEXT: beqz a2, .LBB88_2 ; RV32ZVE32F-NEXT: .LBB88_11: # %cond.load1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa1, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 4 ; RV32ZVE32F-NEXT: beqz a2, .LBB88_3 ; RV32ZVE32F-NEXT: .LBB88_12: # %cond.load4 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa2, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 8 ; RV32ZVE32F-NEXT: beqz a2, .LBB88_4 ; RV32ZVE32F-NEXT: .LBB88_13: # %cond.load7 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa3, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 16 ; RV32ZVE32F-NEXT: beqz a2, .LBB88_5 ; RV32ZVE32F-NEXT: .LBB88_14: # %cond.load10 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa4, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 32 ; RV32ZVE32F-NEXT: beqz a2, .LBB88_6 ; RV32ZVE32F-NEXT: .LBB88_15: # %cond.load13 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa5, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 64 ; RV32ZVE32F-NEXT: beqz a2, .LBB88_7 ; RV32ZVE32F-NEXT: .LBB88_16: # %cond.load16 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa6, 0(a2) @@ -10460,7 +10460,7 @@ ; ; RV64ZVE32F-LABEL: mgather_baseidx_sext_v8i8_v8f64: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v0 ; RV64ZVE32F-NEXT: andi a3, a2, 1 ; RV64ZVE32F-NEXT: beqz a3, .LBB88_2 @@ -10473,14 +10473,14 @@ ; RV64ZVE32F-NEXT: andi a3, a2, 2 ; RV64ZVE32F-NEXT: beqz a3, .LBB88_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a3, v9 ; RV64ZVE32F-NEXT: slli a3, a3, 3 ; RV64ZVE32F-NEXT: add a3, a1, a3 ; RV64ZVE32F-NEXT: fld fa1, 0(a3) ; RV64ZVE32F-NEXT: .LBB88_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a3, a2, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV64ZVE32F-NEXT: beqz a3, .LBB88_6 @@ -10490,7 +10490,7 @@ ; RV64ZVE32F-NEXT: add a3, a1, a3 ; RV64ZVE32F-NEXT: fld fa2, 0(a3) ; RV64ZVE32F-NEXT: .LBB88_6: # %else5 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a3, a2, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 4 ; RV64ZVE32F-NEXT: bnez a3, .LBB88_15 @@ -10501,14 +10501,14 @@ ; RV64ZVE32F-NEXT: andi a3, a2, 32 ; RV64ZVE32F-NEXT: beqz a3, .LBB88_10 ; RV64ZVE32F-NEXT: .LBB88_9: # %cond.load13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a3, v9 ; RV64ZVE32F-NEXT: slli a3, a3, 3 ; RV64ZVE32F-NEXT: add a3, a1, a3 ; RV64ZVE32F-NEXT: fld fa5, 0(a3) ; RV64ZVE32F-NEXT: .LBB88_10: # %else14 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a3, a2, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: beqz a3, .LBB88_12 @@ -10521,7 +10521,7 @@ ; RV64ZVE32F-NEXT: andi a2, a2, -128 ; RV64ZVE32F-NEXT: beqz a2, .LBB88_14 ; RV64ZVE32F-NEXT: # %bb.13: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 3 @@ -10538,7 +10538,7 @@ ; RV64ZVE32F-NEXT: fsd fa7, 56(a0) ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB88_15: # %cond.load7 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a3, v9 ; RV64ZVE32F-NEXT: slli a3, a3, 3 @@ -10547,7 +10547,7 @@ ; RV64ZVE32F-NEXT: andi a3, a2, 16 ; RV64ZVE32F-NEXT: beqz a3, .LBB88_8 ; RV64ZVE32F-NEXT: .LBB88_16: # %cond.load10 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a3, v8 ; RV64ZVE32F-NEXT: slli a3, a3, 3 ; RV64ZVE32F-NEXT: add a3, a1, a3 @@ -10564,7 +10564,7 @@ define <8 x double> @mgather_baseidx_zext_v8i8_v8f64(double* %base, <8 x i8> %idxs, <8 x i1> %m, <8 x double> %passthru) { ; RV32V-LABEL: mgather_baseidx_zext_v8i8_v8f64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32V-NEXT: vzext.vf4 v10, v8 ; RV32V-NEXT: vsll.vi v8, v10, 3 ; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu @@ -10583,11 +10583,11 @@ ; ; RV32ZVE32F-LABEL: mgather_baseidx_zext_v8i8_v8f64: ; RV32ZVE32F: # %bb.0: -; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vzext.vf4 v10, v8 ; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3 ; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1 -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a1, v0 ; RV32ZVE32F-NEXT: andi a2, a1, 1 ; RV32ZVE32F-NEXT: bnez a2, .LBB89_10 @@ -10613,7 +10613,7 @@ ; RV32ZVE32F-NEXT: andi a1, a1, -128 ; RV32ZVE32F-NEXT: beqz a1, .LBB89_9 ; RV32ZVE32F-NEXT: .LBB89_8: # %cond.load19 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV32ZVE32F-NEXT: vmv.x.s a1, v8 ; RV32ZVE32F-NEXT: fld fa7, 0(a1) @@ -10628,48 +10628,48 @@ ; RV32ZVE32F-NEXT: fsd fa7, 56(a0) ; RV32ZVE32F-NEXT: ret ; RV32ZVE32F-NEXT: .LBB89_10: # %cond.load -; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a2, v8 ; RV32ZVE32F-NEXT: fld fa0, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 2 ; RV32ZVE32F-NEXT: beqz a2, .LBB89_2 ; RV32ZVE32F-NEXT: .LBB89_11: # %cond.load1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa1, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 4 ; RV32ZVE32F-NEXT: beqz a2, .LBB89_3 ; RV32ZVE32F-NEXT: .LBB89_12: # %cond.load4 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa2, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 8 ; RV32ZVE32F-NEXT: beqz a2, .LBB89_4 ; RV32ZVE32F-NEXT: .LBB89_13: # %cond.load7 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa3, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 16 ; RV32ZVE32F-NEXT: beqz a2, .LBB89_5 ; RV32ZVE32F-NEXT: .LBB89_14: # %cond.load10 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa4, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 32 ; RV32ZVE32F-NEXT: beqz a2, .LBB89_6 ; RV32ZVE32F-NEXT: .LBB89_15: # %cond.load13 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa5, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 64 ; RV32ZVE32F-NEXT: beqz a2, .LBB89_7 ; RV32ZVE32F-NEXT: .LBB89_16: # %cond.load16 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa6, 0(a2) @@ -10679,7 +10679,7 @@ ; ; RV64ZVE32F-LABEL: mgather_baseidx_zext_v8i8_v8f64: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v0 ; RV64ZVE32F-NEXT: andi a3, a2, 1 ; RV64ZVE32F-NEXT: beqz a3, .LBB89_2 @@ -10693,7 +10693,7 @@ ; RV64ZVE32F-NEXT: andi a3, a2, 2 ; RV64ZVE32F-NEXT: beqz a3, .LBB89_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a3, v9 ; RV64ZVE32F-NEXT: andi a3, a3, 255 @@ -10701,7 +10701,7 @@ ; RV64ZVE32F-NEXT: add a3, a1, a3 ; RV64ZVE32F-NEXT: fld fa1, 0(a3) ; RV64ZVE32F-NEXT: .LBB89_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a3, a2, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV64ZVE32F-NEXT: beqz a3, .LBB89_6 @@ -10712,7 +10712,7 @@ ; RV64ZVE32F-NEXT: add a3, a1, a3 ; RV64ZVE32F-NEXT: fld fa2, 0(a3) ; RV64ZVE32F-NEXT: .LBB89_6: # %else5 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a3, a2, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 4 ; RV64ZVE32F-NEXT: bnez a3, .LBB89_15 @@ -10723,7 +10723,7 @@ ; RV64ZVE32F-NEXT: andi a3, a2, 32 ; RV64ZVE32F-NEXT: beqz a3, .LBB89_10 ; RV64ZVE32F-NEXT: .LBB89_9: # %cond.load13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a3, v9 ; RV64ZVE32F-NEXT: andi a3, a3, 255 @@ -10731,7 +10731,7 @@ ; RV64ZVE32F-NEXT: add a3, a1, a3 ; RV64ZVE32F-NEXT: fld fa5, 0(a3) ; RV64ZVE32F-NEXT: .LBB89_10: # %else14 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a3, a2, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: beqz a3, .LBB89_12 @@ -10745,7 +10745,7 @@ ; RV64ZVE32F-NEXT: andi a2, a2, -128 ; RV64ZVE32F-NEXT: beqz a2, .LBB89_14 ; RV64ZVE32F-NEXT: # %bb.13: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: andi a2, a2, 255 @@ -10763,7 +10763,7 @@ ; RV64ZVE32F-NEXT: fsd fa7, 56(a0) ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB89_15: # %cond.load7 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a3, v9 ; RV64ZVE32F-NEXT: andi a3, a3, 255 @@ -10773,7 +10773,7 @@ ; RV64ZVE32F-NEXT: andi a3, a2, 16 ; RV64ZVE32F-NEXT: beqz a3, .LBB89_8 ; RV64ZVE32F-NEXT: .LBB89_16: # %cond.load10 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a3, v8 ; RV64ZVE32F-NEXT: andi a3, a3, 255 ; RV64ZVE32F-NEXT: slli a3, a3, 3 @@ -10791,7 +10791,7 @@ define <8 x double> @mgather_baseidx_v8i16_v8f64(double* %base, <8 x i16> %idxs, <8 x i1> %m, <8 x double> %passthru) { ; RV32V-LABEL: mgather_baseidx_v8i16_v8f64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32V-NEXT: vsext.vf2 v10, v8 ; RV32V-NEXT: vsll.vi v8, v10, 3 ; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu @@ -10810,11 +10810,11 @@ ; ; RV32ZVE32F-LABEL: mgather_baseidx_v8i16_v8f64: ; RV32ZVE32F: # %bb.0: -; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vsext.vf2 v10, v8 ; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3 ; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1 -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a1, v0 ; RV32ZVE32F-NEXT: andi a2, a1, 1 ; RV32ZVE32F-NEXT: bnez a2, .LBB90_10 @@ -10840,7 +10840,7 @@ ; RV32ZVE32F-NEXT: andi a1, a1, -128 ; RV32ZVE32F-NEXT: beqz a1, .LBB90_9 ; RV32ZVE32F-NEXT: .LBB90_8: # %cond.load19 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV32ZVE32F-NEXT: vmv.x.s a1, v8 ; RV32ZVE32F-NEXT: fld fa7, 0(a1) @@ -10855,48 +10855,48 @@ ; RV32ZVE32F-NEXT: fsd fa7, 56(a0) ; RV32ZVE32F-NEXT: ret ; RV32ZVE32F-NEXT: .LBB90_10: # %cond.load -; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a2, v8 ; RV32ZVE32F-NEXT: fld fa0, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 2 ; RV32ZVE32F-NEXT: beqz a2, .LBB90_2 ; RV32ZVE32F-NEXT: .LBB90_11: # %cond.load1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa1, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 4 ; RV32ZVE32F-NEXT: beqz a2, .LBB90_3 ; RV32ZVE32F-NEXT: .LBB90_12: # %cond.load4 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa2, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 8 ; RV32ZVE32F-NEXT: beqz a2, .LBB90_4 ; RV32ZVE32F-NEXT: .LBB90_13: # %cond.load7 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa3, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 16 ; RV32ZVE32F-NEXT: beqz a2, .LBB90_5 ; RV32ZVE32F-NEXT: .LBB90_14: # %cond.load10 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa4, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 32 ; RV32ZVE32F-NEXT: beqz a2, .LBB90_6 ; RV32ZVE32F-NEXT: .LBB90_15: # %cond.load13 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa5, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 64 ; RV32ZVE32F-NEXT: beqz a2, .LBB90_7 ; RV32ZVE32F-NEXT: .LBB90_16: # %cond.load16 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa6, 0(a2) @@ -10906,12 +10906,12 @@ ; ; RV64ZVE32F-LABEL: mgather_baseidx_v8i16_v8f64: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v0 ; RV64ZVE32F-NEXT: andi a3, a2, 1 ; RV64ZVE32F-NEXT: beqz a3, .LBB90_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a3, v8 ; RV64ZVE32F-NEXT: slli a3, a3, 3 ; RV64ZVE32F-NEXT: add a3, a1, a3 @@ -10920,14 +10920,14 @@ ; RV64ZVE32F-NEXT: andi a3, a2, 2 ; RV64ZVE32F-NEXT: beqz a3, .LBB90_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a3, v9 ; RV64ZVE32F-NEXT: slli a3, a3, 3 ; RV64ZVE32F-NEXT: add a3, a1, a3 ; RV64ZVE32F-NEXT: fld fa1, 0(a3) ; RV64ZVE32F-NEXT: .LBB90_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a3, a2, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV64ZVE32F-NEXT: beqz a3, .LBB90_6 @@ -10937,7 +10937,7 @@ ; RV64ZVE32F-NEXT: add a3, a1, a3 ; RV64ZVE32F-NEXT: fld fa2, 0(a3) ; RV64ZVE32F-NEXT: .LBB90_6: # %else5 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma ; RV64ZVE32F-NEXT: andi a3, a2, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 4 ; RV64ZVE32F-NEXT: bnez a3, .LBB90_15 @@ -10948,14 +10948,14 @@ ; RV64ZVE32F-NEXT: andi a3, a2, 32 ; RV64ZVE32F-NEXT: beqz a3, .LBB90_10 ; RV64ZVE32F-NEXT: .LBB90_9: # %cond.load13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a3, v9 ; RV64ZVE32F-NEXT: slli a3, a3, 3 ; RV64ZVE32F-NEXT: add a3, a1, a3 ; RV64ZVE32F-NEXT: fld fa5, 0(a3) ; RV64ZVE32F-NEXT: .LBB90_10: # %else14 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a3, a2, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: beqz a3, .LBB90_12 @@ -10968,7 +10968,7 @@ ; RV64ZVE32F-NEXT: andi a2, a2, -128 ; RV64ZVE32F-NEXT: beqz a2, .LBB90_14 ; RV64ZVE32F-NEXT: # %bb.13: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 3 @@ -10985,7 +10985,7 @@ ; RV64ZVE32F-NEXT: fsd fa7, 56(a0) ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB90_15: # %cond.load7 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a3, v9 ; RV64ZVE32F-NEXT: slli a3, a3, 3 @@ -10994,7 +10994,7 @@ ; RV64ZVE32F-NEXT: andi a3, a2, 16 ; RV64ZVE32F-NEXT: beqz a3, .LBB90_8 ; RV64ZVE32F-NEXT: .LBB90_16: # %cond.load10 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a3, v8 ; RV64ZVE32F-NEXT: slli a3, a3, 3 ; RV64ZVE32F-NEXT: add a3, a1, a3 @@ -11010,7 +11010,7 @@ define <8 x double> @mgather_baseidx_sext_v8i16_v8f64(double* %base, <8 x i16> %idxs, <8 x i1> %m, <8 x double> %passthru) { ; RV32V-LABEL: mgather_baseidx_sext_v8i16_v8f64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32V-NEXT: vsext.vf2 v10, v8 ; RV32V-NEXT: vsll.vi v8, v10, 3 ; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu @@ -11029,11 +11029,11 @@ ; ; RV32ZVE32F-LABEL: mgather_baseidx_sext_v8i16_v8f64: ; RV32ZVE32F: # %bb.0: -; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vsext.vf2 v10, v8 ; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3 ; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1 -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a1, v0 ; RV32ZVE32F-NEXT: andi a2, a1, 1 ; RV32ZVE32F-NEXT: bnez a2, .LBB91_10 @@ -11059,7 +11059,7 @@ ; RV32ZVE32F-NEXT: andi a1, a1, -128 ; RV32ZVE32F-NEXT: beqz a1, .LBB91_9 ; RV32ZVE32F-NEXT: .LBB91_8: # %cond.load19 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV32ZVE32F-NEXT: vmv.x.s a1, v8 ; RV32ZVE32F-NEXT: fld fa7, 0(a1) @@ -11074,48 +11074,48 @@ ; RV32ZVE32F-NEXT: fsd fa7, 56(a0) ; RV32ZVE32F-NEXT: ret ; RV32ZVE32F-NEXT: .LBB91_10: # %cond.load -; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a2, v8 ; RV32ZVE32F-NEXT: fld fa0, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 2 ; RV32ZVE32F-NEXT: beqz a2, .LBB91_2 ; RV32ZVE32F-NEXT: .LBB91_11: # %cond.load1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa1, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 4 ; RV32ZVE32F-NEXT: beqz a2, .LBB91_3 ; RV32ZVE32F-NEXT: .LBB91_12: # %cond.load4 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa2, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 8 ; RV32ZVE32F-NEXT: beqz a2, .LBB91_4 ; RV32ZVE32F-NEXT: .LBB91_13: # %cond.load7 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa3, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 16 ; RV32ZVE32F-NEXT: beqz a2, .LBB91_5 ; RV32ZVE32F-NEXT: .LBB91_14: # %cond.load10 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa4, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 32 ; RV32ZVE32F-NEXT: beqz a2, .LBB91_6 ; RV32ZVE32F-NEXT: .LBB91_15: # %cond.load13 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa5, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 64 ; RV32ZVE32F-NEXT: beqz a2, .LBB91_7 ; RV32ZVE32F-NEXT: .LBB91_16: # %cond.load16 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa6, 0(a2) @@ -11125,12 +11125,12 @@ ; ; RV64ZVE32F-LABEL: mgather_baseidx_sext_v8i16_v8f64: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v0 ; RV64ZVE32F-NEXT: andi a3, a2, 1 ; RV64ZVE32F-NEXT: beqz a3, .LBB91_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a3, v8 ; RV64ZVE32F-NEXT: slli a3, a3, 3 ; RV64ZVE32F-NEXT: add a3, a1, a3 @@ -11139,14 +11139,14 @@ ; RV64ZVE32F-NEXT: andi a3, a2, 2 ; RV64ZVE32F-NEXT: beqz a3, .LBB91_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a3, v9 ; RV64ZVE32F-NEXT: slli a3, a3, 3 ; RV64ZVE32F-NEXT: add a3, a1, a3 ; RV64ZVE32F-NEXT: fld fa1, 0(a3) ; RV64ZVE32F-NEXT: .LBB91_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a3, a2, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV64ZVE32F-NEXT: beqz a3, .LBB91_6 @@ -11156,7 +11156,7 @@ ; RV64ZVE32F-NEXT: add a3, a1, a3 ; RV64ZVE32F-NEXT: fld fa2, 0(a3) ; RV64ZVE32F-NEXT: .LBB91_6: # %else5 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma ; RV64ZVE32F-NEXT: andi a3, a2, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 4 ; RV64ZVE32F-NEXT: bnez a3, .LBB91_15 @@ -11167,14 +11167,14 @@ ; RV64ZVE32F-NEXT: andi a3, a2, 32 ; RV64ZVE32F-NEXT: beqz a3, .LBB91_10 ; RV64ZVE32F-NEXT: .LBB91_9: # %cond.load13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a3, v9 ; RV64ZVE32F-NEXT: slli a3, a3, 3 ; RV64ZVE32F-NEXT: add a3, a1, a3 ; RV64ZVE32F-NEXT: fld fa5, 0(a3) ; RV64ZVE32F-NEXT: .LBB91_10: # %else14 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a3, a2, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: beqz a3, .LBB91_12 @@ -11187,7 +11187,7 @@ ; RV64ZVE32F-NEXT: andi a2, a2, -128 ; RV64ZVE32F-NEXT: beqz a2, .LBB91_14 ; RV64ZVE32F-NEXT: # %bb.13: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 3 @@ -11204,7 +11204,7 @@ ; RV64ZVE32F-NEXT: fsd fa7, 56(a0) ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB91_15: # %cond.load7 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a3, v9 ; RV64ZVE32F-NEXT: slli a3, a3, 3 @@ -11213,7 +11213,7 @@ ; RV64ZVE32F-NEXT: andi a3, a2, 16 ; RV64ZVE32F-NEXT: beqz a3, .LBB91_8 ; RV64ZVE32F-NEXT: .LBB91_16: # %cond.load10 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a3, v8 ; RV64ZVE32F-NEXT: slli a3, a3, 3 ; RV64ZVE32F-NEXT: add a3, a1, a3 @@ -11230,7 +11230,7 @@ define <8 x double> @mgather_baseidx_zext_v8i16_v8f64(double* %base, <8 x i16> %idxs, <8 x i1> %m, <8 x double> %passthru) { ; RV32V-LABEL: mgather_baseidx_zext_v8i16_v8f64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32V-NEXT: vzext.vf2 v10, v8 ; RV32V-NEXT: vsll.vi v8, v10, 3 ; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu @@ -11249,11 +11249,11 @@ ; ; RV32ZVE32F-LABEL: mgather_baseidx_zext_v8i16_v8f64: ; RV32ZVE32F: # %bb.0: -; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vzext.vf2 v10, v8 ; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3 ; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1 -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a1, v0 ; RV32ZVE32F-NEXT: andi a2, a1, 1 ; RV32ZVE32F-NEXT: bnez a2, .LBB92_10 @@ -11279,7 +11279,7 @@ ; RV32ZVE32F-NEXT: andi a1, a1, -128 ; RV32ZVE32F-NEXT: beqz a1, .LBB92_9 ; RV32ZVE32F-NEXT: .LBB92_8: # %cond.load19 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV32ZVE32F-NEXT: vmv.x.s a1, v8 ; RV32ZVE32F-NEXT: fld fa7, 0(a1) @@ -11294,48 +11294,48 @@ ; RV32ZVE32F-NEXT: fsd fa7, 56(a0) ; RV32ZVE32F-NEXT: ret ; RV32ZVE32F-NEXT: .LBB92_10: # %cond.load -; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a2, v8 ; RV32ZVE32F-NEXT: fld fa0, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 2 ; RV32ZVE32F-NEXT: beqz a2, .LBB92_2 ; RV32ZVE32F-NEXT: .LBB92_11: # %cond.load1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa1, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 4 ; RV32ZVE32F-NEXT: beqz a2, .LBB92_3 ; RV32ZVE32F-NEXT: .LBB92_12: # %cond.load4 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa2, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 8 ; RV32ZVE32F-NEXT: beqz a2, .LBB92_4 ; RV32ZVE32F-NEXT: .LBB92_13: # %cond.load7 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa3, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 16 ; RV32ZVE32F-NEXT: beqz a2, .LBB92_5 ; RV32ZVE32F-NEXT: .LBB92_14: # %cond.load10 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa4, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 32 ; RV32ZVE32F-NEXT: beqz a2, .LBB92_6 ; RV32ZVE32F-NEXT: .LBB92_15: # %cond.load13 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa5, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 64 ; RV32ZVE32F-NEXT: beqz a2, .LBB92_7 ; RV32ZVE32F-NEXT: .LBB92_16: # %cond.load16 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa6, 0(a2) @@ -11346,13 +11346,13 @@ ; RV64ZVE32F-LABEL: mgather_baseidx_zext_v8i16_v8f64: ; RV64ZVE32F: # %bb.0: ; RV64ZVE32F-NEXT: lui a2, 16 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a3, v0 ; RV64ZVE32F-NEXT: andi a4, a3, 1 ; RV64ZVE32F-NEXT: addiw a2, a2, -1 ; RV64ZVE32F-NEXT: beqz a4, .LBB92_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a4, v8 ; RV64ZVE32F-NEXT: and a4, a4, a2 ; RV64ZVE32F-NEXT: slli a4, a4, 3 @@ -11362,7 +11362,7 @@ ; RV64ZVE32F-NEXT: andi a4, a3, 2 ; RV64ZVE32F-NEXT: beqz a4, .LBB92_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a4, v9 ; RV64ZVE32F-NEXT: and a4, a4, a2 @@ -11370,7 +11370,7 @@ ; RV64ZVE32F-NEXT: add a4, a1, a4 ; RV64ZVE32F-NEXT: fld fa1, 0(a4) ; RV64ZVE32F-NEXT: .LBB92_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a4, a3, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV64ZVE32F-NEXT: beqz a4, .LBB92_6 @@ -11381,7 +11381,7 @@ ; RV64ZVE32F-NEXT: add a4, a1, a4 ; RV64ZVE32F-NEXT: fld fa2, 0(a4) ; RV64ZVE32F-NEXT: .LBB92_6: # %else5 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma ; RV64ZVE32F-NEXT: andi a4, a3, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 4 ; RV64ZVE32F-NEXT: bnez a4, .LBB92_15 @@ -11392,7 +11392,7 @@ ; RV64ZVE32F-NEXT: andi a4, a3, 32 ; RV64ZVE32F-NEXT: beqz a4, .LBB92_10 ; RV64ZVE32F-NEXT: .LBB92_9: # %cond.load13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a4, v9 ; RV64ZVE32F-NEXT: and a4, a4, a2 @@ -11400,7 +11400,7 @@ ; RV64ZVE32F-NEXT: add a4, a1, a4 ; RV64ZVE32F-NEXT: fld fa5, 0(a4) ; RV64ZVE32F-NEXT: .LBB92_10: # %else14 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a4, a3, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: beqz a4, .LBB92_12 @@ -11414,7 +11414,7 @@ ; RV64ZVE32F-NEXT: andi a3, a3, -128 ; RV64ZVE32F-NEXT: beqz a3, .LBB92_14 ; RV64ZVE32F-NEXT: # %bb.13: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a3, v8 ; RV64ZVE32F-NEXT: and a2, a3, a2 @@ -11432,7 +11432,7 @@ ; RV64ZVE32F-NEXT: fsd fa7, 56(a0) ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB92_15: # %cond.load7 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a4, v9 ; RV64ZVE32F-NEXT: and a4, a4, a2 @@ -11442,7 +11442,7 @@ ; RV64ZVE32F-NEXT: andi a4, a3, 16 ; RV64ZVE32F-NEXT: beqz a4, .LBB92_8 ; RV64ZVE32F-NEXT: .LBB92_16: # %cond.load10 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a4, v8 ; RV64ZVE32F-NEXT: and a4, a4, a2 ; RV64ZVE32F-NEXT: slli a4, a4, 3 @@ -11460,7 +11460,7 @@ define <8 x double> @mgather_baseidx_v8i32_v8f64(double* %base, <8 x i32> %idxs, <8 x i1> %m, <8 x double> %passthru) { ; RV32V-LABEL: mgather_baseidx_v8i32_v8f64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32V-NEXT: vsll.vi v8, v8, 3 ; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32V-NEXT: vluxei32.v v12, (a0), v8, v0.t @@ -11478,10 +11478,10 @@ ; ; RV32ZVE32F-LABEL: mgather_baseidx_v8i32_v8f64: ; RV32ZVE32F: # %bb.0: -; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3 ; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1 -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a1, v0 ; RV32ZVE32F-NEXT: andi a2, a1, 1 ; RV32ZVE32F-NEXT: bnez a2, .LBB93_10 @@ -11507,7 +11507,7 @@ ; RV32ZVE32F-NEXT: andi a1, a1, -128 ; RV32ZVE32F-NEXT: beqz a1, .LBB93_9 ; RV32ZVE32F-NEXT: .LBB93_8: # %cond.load19 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV32ZVE32F-NEXT: vmv.x.s a1, v8 ; RV32ZVE32F-NEXT: fld fa7, 0(a1) @@ -11522,48 +11522,48 @@ ; RV32ZVE32F-NEXT: fsd fa7, 56(a0) ; RV32ZVE32F-NEXT: ret ; RV32ZVE32F-NEXT: .LBB93_10: # %cond.load -; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a2, v8 ; RV32ZVE32F-NEXT: fld fa0, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 2 ; RV32ZVE32F-NEXT: beqz a2, .LBB93_2 ; RV32ZVE32F-NEXT: .LBB93_11: # %cond.load1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa1, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 4 ; RV32ZVE32F-NEXT: beqz a2, .LBB93_3 ; RV32ZVE32F-NEXT: .LBB93_12: # %cond.load4 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa2, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 8 ; RV32ZVE32F-NEXT: beqz a2, .LBB93_4 ; RV32ZVE32F-NEXT: .LBB93_13: # %cond.load7 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa3, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 16 ; RV32ZVE32F-NEXT: beqz a2, .LBB93_5 ; RV32ZVE32F-NEXT: .LBB93_14: # %cond.load10 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa4, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 32 ; RV32ZVE32F-NEXT: beqz a2, .LBB93_6 ; RV32ZVE32F-NEXT: .LBB93_15: # %cond.load13 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa5, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 64 ; RV32ZVE32F-NEXT: beqz a2, .LBB93_7 ; RV32ZVE32F-NEXT: .LBB93_16: # %cond.load16 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa6, 0(a2) @@ -11573,12 +11573,12 @@ ; ; RV64ZVE32F-LABEL: mgather_baseidx_v8i32_v8f64: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v0 ; RV64ZVE32F-NEXT: andi a3, a2, 1 ; RV64ZVE32F-NEXT: beqz a3, .LBB93_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a3, v8 ; RV64ZVE32F-NEXT: slli a3, a3, 3 ; RV64ZVE32F-NEXT: add a3, a1, a3 @@ -11587,16 +11587,16 @@ ; RV64ZVE32F-NEXT: andi a3, a2, 2 ; RV64ZVE32F-NEXT: beqz a3, .LBB93_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a3, v10 ; RV64ZVE32F-NEXT: slli a3, a3, 3 ; RV64ZVE32F-NEXT: add a3, a1, a3 ; RV64ZVE32F-NEXT: fld fa1, 0(a3) ; RV64ZVE32F-NEXT: .LBB93_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV64ZVE32F-NEXT: andi a3, a2, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: bnez a3, .LBB93_14 @@ -11610,14 +11610,14 @@ ; RV64ZVE32F-NEXT: andi a3, a2, 32 ; RV64ZVE32F-NEXT: beqz a3, .LBB93_9 ; RV64ZVE32F-NEXT: .LBB93_8: # %cond.load13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a3, v8 ; RV64ZVE32F-NEXT: slli a3, a3, 3 ; RV64ZVE32F-NEXT: add a3, a1, a3 ; RV64ZVE32F-NEXT: fld fa5, 0(a3) ; RV64ZVE32F-NEXT: .LBB93_9: # %else14 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV64ZVE32F-NEXT: andi a3, a2, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 2 ; RV64ZVE32F-NEXT: beqz a3, .LBB93_11 @@ -11630,7 +11630,7 @@ ; RV64ZVE32F-NEXT: andi a2, a2, -128 ; RV64ZVE32F-NEXT: beqz a2, .LBB93_13 ; RV64ZVE32F-NEXT: # %bb.12: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 3 @@ -11654,7 +11654,7 @@ ; RV64ZVE32F-NEXT: andi a3, a2, 8 ; RV64ZVE32F-NEXT: beqz a3, .LBB93_6 ; RV64ZVE32F-NEXT: .LBB93_15: # %cond.load7 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a3, v8 ; RV64ZVE32F-NEXT: slli a3, a3, 3 @@ -11663,7 +11663,7 @@ ; RV64ZVE32F-NEXT: andi a3, a2, 16 ; RV64ZVE32F-NEXT: beqz a3, .LBB93_7 ; RV64ZVE32F-NEXT: .LBB93_16: # %cond.load10 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a3, v10 ; RV64ZVE32F-NEXT: slli a3, a3, 3 ; RV64ZVE32F-NEXT: add a3, a1, a3 @@ -11679,7 +11679,7 @@ define <8 x double> @mgather_baseidx_sext_v8i32_v8f64(double* %base, <8 x i32> %idxs, <8 x i1> %m, <8 x double> %passthru) { ; RV32V-LABEL: mgather_baseidx_sext_v8i32_v8f64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32V-NEXT: vsll.vi v8, v8, 3 ; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32V-NEXT: vluxei32.v v12, (a0), v8, v0.t @@ -11697,10 +11697,10 @@ ; ; RV32ZVE32F-LABEL: mgather_baseidx_sext_v8i32_v8f64: ; RV32ZVE32F: # %bb.0: -; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3 ; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1 -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a1, v0 ; RV32ZVE32F-NEXT: andi a2, a1, 1 ; RV32ZVE32F-NEXT: bnez a2, .LBB94_10 @@ -11726,7 +11726,7 @@ ; RV32ZVE32F-NEXT: andi a1, a1, -128 ; RV32ZVE32F-NEXT: beqz a1, .LBB94_9 ; RV32ZVE32F-NEXT: .LBB94_8: # %cond.load19 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV32ZVE32F-NEXT: vmv.x.s a1, v8 ; RV32ZVE32F-NEXT: fld fa7, 0(a1) @@ -11741,48 +11741,48 @@ ; RV32ZVE32F-NEXT: fsd fa7, 56(a0) ; RV32ZVE32F-NEXT: ret ; RV32ZVE32F-NEXT: .LBB94_10: # %cond.load -; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a2, v8 ; RV32ZVE32F-NEXT: fld fa0, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 2 ; RV32ZVE32F-NEXT: beqz a2, .LBB94_2 ; RV32ZVE32F-NEXT: .LBB94_11: # %cond.load1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa1, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 4 ; RV32ZVE32F-NEXT: beqz a2, .LBB94_3 ; RV32ZVE32F-NEXT: .LBB94_12: # %cond.load4 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa2, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 8 ; RV32ZVE32F-NEXT: beqz a2, .LBB94_4 ; RV32ZVE32F-NEXT: .LBB94_13: # %cond.load7 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa3, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 16 ; RV32ZVE32F-NEXT: beqz a2, .LBB94_5 ; RV32ZVE32F-NEXT: .LBB94_14: # %cond.load10 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa4, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 32 ; RV32ZVE32F-NEXT: beqz a2, .LBB94_6 ; RV32ZVE32F-NEXT: .LBB94_15: # %cond.load13 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa5, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 64 ; RV32ZVE32F-NEXT: beqz a2, .LBB94_7 ; RV32ZVE32F-NEXT: .LBB94_16: # %cond.load16 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa6, 0(a2) @@ -11792,12 +11792,12 @@ ; ; RV64ZVE32F-LABEL: mgather_baseidx_sext_v8i32_v8f64: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v0 ; RV64ZVE32F-NEXT: andi a3, a2, 1 ; RV64ZVE32F-NEXT: beqz a3, .LBB94_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a3, v8 ; RV64ZVE32F-NEXT: slli a3, a3, 3 ; RV64ZVE32F-NEXT: add a3, a1, a3 @@ -11806,16 +11806,16 @@ ; RV64ZVE32F-NEXT: andi a3, a2, 2 ; RV64ZVE32F-NEXT: beqz a3, .LBB94_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a3, v10 ; RV64ZVE32F-NEXT: slli a3, a3, 3 ; RV64ZVE32F-NEXT: add a3, a1, a3 ; RV64ZVE32F-NEXT: fld fa1, 0(a3) ; RV64ZVE32F-NEXT: .LBB94_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV64ZVE32F-NEXT: andi a3, a2, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: bnez a3, .LBB94_14 @@ -11829,14 +11829,14 @@ ; RV64ZVE32F-NEXT: andi a3, a2, 32 ; RV64ZVE32F-NEXT: beqz a3, .LBB94_9 ; RV64ZVE32F-NEXT: .LBB94_8: # %cond.load13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a3, v8 ; RV64ZVE32F-NEXT: slli a3, a3, 3 ; RV64ZVE32F-NEXT: add a3, a1, a3 ; RV64ZVE32F-NEXT: fld fa5, 0(a3) ; RV64ZVE32F-NEXT: .LBB94_9: # %else14 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV64ZVE32F-NEXT: andi a3, a2, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 2 ; RV64ZVE32F-NEXT: beqz a3, .LBB94_11 @@ -11849,7 +11849,7 @@ ; RV64ZVE32F-NEXT: andi a2, a2, -128 ; RV64ZVE32F-NEXT: beqz a2, .LBB94_13 ; RV64ZVE32F-NEXT: # %bb.12: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 3 @@ -11873,7 +11873,7 @@ ; RV64ZVE32F-NEXT: andi a3, a2, 8 ; RV64ZVE32F-NEXT: beqz a3, .LBB94_6 ; RV64ZVE32F-NEXT: .LBB94_15: # %cond.load7 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a3, v8 ; RV64ZVE32F-NEXT: slli a3, a3, 3 @@ -11882,7 +11882,7 @@ ; RV64ZVE32F-NEXT: andi a3, a2, 16 ; RV64ZVE32F-NEXT: beqz a3, .LBB94_7 ; RV64ZVE32F-NEXT: .LBB94_16: # %cond.load10 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a3, v10 ; RV64ZVE32F-NEXT: slli a3, a3, 3 ; RV64ZVE32F-NEXT: add a3, a1, a3 @@ -11899,7 +11899,7 @@ define <8 x double> @mgather_baseidx_zext_v8i32_v8f64(double* %base, <8 x i32> %idxs, <8 x i1> %m, <8 x double> %passthru) { ; RV32V-LABEL: mgather_baseidx_zext_v8i32_v8f64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32V-NEXT: vsll.vi v8, v8, 3 ; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32V-NEXT: vluxei32.v v12, (a0), v8, v0.t @@ -11917,10 +11917,10 @@ ; ; RV32ZVE32F-LABEL: mgather_baseidx_zext_v8i32_v8f64: ; RV32ZVE32F: # %bb.0: -; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3 ; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1 -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a1, v0 ; RV32ZVE32F-NEXT: andi a2, a1, 1 ; RV32ZVE32F-NEXT: bnez a2, .LBB95_10 @@ -11946,7 +11946,7 @@ ; RV32ZVE32F-NEXT: andi a1, a1, -128 ; RV32ZVE32F-NEXT: beqz a1, .LBB95_9 ; RV32ZVE32F-NEXT: .LBB95_8: # %cond.load19 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV32ZVE32F-NEXT: vmv.x.s a1, v8 ; RV32ZVE32F-NEXT: fld fa7, 0(a1) @@ -11961,48 +11961,48 @@ ; RV32ZVE32F-NEXT: fsd fa7, 56(a0) ; RV32ZVE32F-NEXT: ret ; RV32ZVE32F-NEXT: .LBB95_10: # %cond.load -; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a2, v8 ; RV32ZVE32F-NEXT: fld fa0, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 2 ; RV32ZVE32F-NEXT: beqz a2, .LBB95_2 ; RV32ZVE32F-NEXT: .LBB95_11: # %cond.load1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa1, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 4 ; RV32ZVE32F-NEXT: beqz a2, .LBB95_3 ; RV32ZVE32F-NEXT: .LBB95_12: # %cond.load4 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa2, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 8 ; RV32ZVE32F-NEXT: beqz a2, .LBB95_4 ; RV32ZVE32F-NEXT: .LBB95_13: # %cond.load7 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa3, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 16 ; RV32ZVE32F-NEXT: beqz a2, .LBB95_5 ; RV32ZVE32F-NEXT: .LBB95_14: # %cond.load10 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa4, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 32 ; RV32ZVE32F-NEXT: beqz a2, .LBB95_6 ; RV32ZVE32F-NEXT: .LBB95_15: # %cond.load13 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa5, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 64 ; RV32ZVE32F-NEXT: beqz a2, .LBB95_7 ; RV32ZVE32F-NEXT: .LBB95_16: # %cond.load16 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa6, 0(a2) @@ -12012,12 +12012,12 @@ ; ; RV64ZVE32F-LABEL: mgather_baseidx_zext_v8i32_v8f64: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v0 ; RV64ZVE32F-NEXT: andi a3, a2, 1 ; RV64ZVE32F-NEXT: beqz a3, .LBB95_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a3, v8 ; RV64ZVE32F-NEXT: slli a3, a3, 32 ; RV64ZVE32F-NEXT: srli a3, a3, 29 @@ -12027,7 +12027,7 @@ ; RV64ZVE32F-NEXT: andi a3, a2, 2 ; RV64ZVE32F-NEXT: beqz a3, .LBB95_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a3, v10 ; RV64ZVE32F-NEXT: slli a3, a3, 32 @@ -12035,9 +12035,9 @@ ; RV64ZVE32F-NEXT: add a3, a1, a3 ; RV64ZVE32F-NEXT: fld fa1, 0(a3) ; RV64ZVE32F-NEXT: .LBB95_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV64ZVE32F-NEXT: andi a3, a2, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: bnez a3, .LBB95_14 @@ -12051,7 +12051,7 @@ ; RV64ZVE32F-NEXT: andi a3, a2, 32 ; RV64ZVE32F-NEXT: beqz a3, .LBB95_9 ; RV64ZVE32F-NEXT: .LBB95_8: # %cond.load13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a3, v8 ; RV64ZVE32F-NEXT: slli a3, a3, 32 @@ -12059,7 +12059,7 @@ ; RV64ZVE32F-NEXT: add a3, a1, a3 ; RV64ZVE32F-NEXT: fld fa5, 0(a3) ; RV64ZVE32F-NEXT: .LBB95_9: # %else14 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV64ZVE32F-NEXT: andi a3, a2, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 2 ; RV64ZVE32F-NEXT: beqz a3, .LBB95_11 @@ -12073,7 +12073,7 @@ ; RV64ZVE32F-NEXT: andi a2, a2, -128 ; RV64ZVE32F-NEXT: beqz a2, .LBB95_13 ; RV64ZVE32F-NEXT: # %bb.12: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 32 @@ -12099,7 +12099,7 @@ ; RV64ZVE32F-NEXT: andi a3, a2, 8 ; RV64ZVE32F-NEXT: beqz a3, .LBB95_6 ; RV64ZVE32F-NEXT: .LBB95_15: # %cond.load7 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a3, v8 ; RV64ZVE32F-NEXT: slli a3, a3, 32 @@ -12109,7 +12109,7 @@ ; RV64ZVE32F-NEXT: andi a3, a2, 16 ; RV64ZVE32F-NEXT: beqz a3, .LBB95_7 ; RV64ZVE32F-NEXT: .LBB95_16: # %cond.load10 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a3, v10 ; RV64ZVE32F-NEXT: slli a3, a3, 32 ; RV64ZVE32F-NEXT: srli a3, a3, 29 @@ -12127,7 +12127,7 @@ define <8 x double> @mgather_baseidx_v8f64(double* %base, <8 x i64> %idxs, <8 x i1> %m, <8 x double> %passthru) { ; RV32V-LABEL: mgather_baseidx_v8f64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32V-NEXT: vnsrl.wi v16, v8, 0 ; RV32V-NEXT: vsll.vi v8, v16, 3 ; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu @@ -12171,11 +12171,11 @@ ; RV32ZVE32F-NEXT: sw a4, 4(sp) ; RV32ZVE32F-NEXT: sw a3, 0(sp) ; RV32ZVE32F-NEXT: mv a2, sp -; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vle32.v v8, (a2) ; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3 ; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1 -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a1, v0 ; RV32ZVE32F-NEXT: andi a2, a1, 1 ; RV32ZVE32F-NEXT: bnez a2, .LBB96_10 @@ -12201,7 +12201,7 @@ ; RV32ZVE32F-NEXT: andi a1, a1, -128 ; RV32ZVE32F-NEXT: beqz a1, .LBB96_9 ; RV32ZVE32F-NEXT: .LBB96_8: # %cond.load19 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV32ZVE32F-NEXT: vmv.x.s a1, v8 ; RV32ZVE32F-NEXT: fld fa7, 0(a1) @@ -12220,48 +12220,48 @@ ; RV32ZVE32F-NEXT: addi sp, sp, 64 ; RV32ZVE32F-NEXT: ret ; RV32ZVE32F-NEXT: .LBB96_10: # %cond.load -; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a2, v8 ; RV32ZVE32F-NEXT: fld fa0, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 2 ; RV32ZVE32F-NEXT: beqz a2, .LBB96_2 ; RV32ZVE32F-NEXT: .LBB96_11: # %cond.load1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa1, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 4 ; RV32ZVE32F-NEXT: beqz a2, .LBB96_3 ; RV32ZVE32F-NEXT: .LBB96_12: # %cond.load4 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa2, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 8 ; RV32ZVE32F-NEXT: beqz a2, .LBB96_4 ; RV32ZVE32F-NEXT: .LBB96_13: # %cond.load7 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa3, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 16 ; RV32ZVE32F-NEXT: beqz a2, .LBB96_5 ; RV32ZVE32F-NEXT: .LBB96_14: # %cond.load10 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa4, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 32 ; RV32ZVE32F-NEXT: beqz a2, .LBB96_6 ; RV32ZVE32F-NEXT: .LBB96_15: # %cond.load13 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa5, 0(a2) ; RV32ZVE32F-NEXT: andi a2, a1, 64 ; RV32ZVE32F-NEXT: beqz a2, .LBB96_7 ; RV32ZVE32F-NEXT: .LBB96_16: # %cond.load16 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV32ZVE32F-NEXT: vmv.x.s a2, v10 ; RV32ZVE32F-NEXT: fld fa6, 0(a2) @@ -12271,7 +12271,7 @@ ; ; RV64ZVE32F-LABEL: mgather_baseidx_v8f64: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a3, v0 ; RV64ZVE32F-NEXT: andi a4, a3, 1 ; RV64ZVE32F-NEXT: bnez a4, .LBB96_10 @@ -12371,7 +12371,7 @@ define <16 x i8> @mgather_baseidx_v16i8(i8* %base, <16 x i8> %idxs, <16 x i1> %m, <16 x i8> %passthru) { ; RV32-LABEL: mgather_baseidx_v16i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (a0), v12, v0.t @@ -12380,7 +12380,7 @@ ; ; RV64V-LABEL: mgather_baseidx_v16i8: ; RV64V: # %bb.0: -; RV64V-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64V-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64V-NEXT: vsext.vf8 v16, v8 ; RV64V-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV64V-NEXT: vluxei64.v v9, (a0), v16, v0.t @@ -12389,31 +12389,31 @@ ; ; RV64ZVE32F-LABEL: mgather_baseidx_v16i8: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB97_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load -; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 16, e8, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 16, e8, m1, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v9, a2 ; RV64ZVE32F-NEXT: .LBB97_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB97_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 1 ; RV64ZVE32F-NEXT: .LBB97_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v11, v8, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB97_6 @@ -12422,10 +12422,10 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 3, e8, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 3, e8, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 2 ; RV64ZVE32F-NEXT: .LBB97_6: # %else5 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV64ZVE32F-NEXT: bnez a2, .LBB97_26 @@ -12436,18 +12436,18 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB97_10 ; RV64ZVE32F-NEXT: .LBB97_9: # %cond.load13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: vmv.s.x v11, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 6, e8, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 6, e8, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 5 ; RV64ZVE32F-NEXT: .LBB97_10: # %else14 -; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 8 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB97_28 @@ -12461,16 +12461,16 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 512 ; RV64ZVE32F-NEXT: beqz a2, .LBB97_15 ; RV64ZVE32F-NEXT: .LBB97_14: # %cond.load25 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 10, e8, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 10, e8, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 9 ; RV64ZVE32F-NEXT: .LBB97_15: # %else26 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 1024 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB97_17 @@ -12479,10 +12479,10 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: vmv.s.x v11, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 11, e8, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 11, e8, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 10 ; RV64ZVE32F-NEXT: .LBB97_17: # %else29 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: slli a2, a1, 52 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 4 ; RV64ZVE32F-NEXT: bltz a2, .LBB97_31 @@ -12493,16 +12493,16 @@ ; RV64ZVE32F-NEXT: slli a2, a1, 50 ; RV64ZVE32F-NEXT: bgez a2, .LBB97_21 ; RV64ZVE32F-NEXT: .LBB97_20: # %cond.load37 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 14, e8, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 14, e8, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 13 ; RV64ZVE32F-NEXT: .LBB97_21: # %else38 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: slli a2, a1, 49 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: bgez a2, .LBB97_23 @@ -12511,43 +12511,43 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 15, e8, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 15, e8, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 14 ; RV64ZVE32F-NEXT: .LBB97_23: # %else41 ; RV64ZVE32F-NEXT: lui a2, 1048568 ; RV64ZVE32F-NEXT: and a1, a1, a2 ; RV64ZVE32F-NEXT: beqz a1, .LBB97_25 ; RV64ZVE32F-NEXT: # %bb.24: # %cond.load43 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v8 ; RV64ZVE32F-NEXT: add a0, a0, a1 ; RV64ZVE32F-NEXT: lb a0, 0(a0) ; RV64ZVE32F-NEXT: vmv.s.x v8, a0 -; RV64ZVE32F-NEXT: vsetivli zero, 16, e8, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 16, e8, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 15 ; RV64ZVE32F-NEXT: .LBB97_25: # %else44 ; RV64ZVE32F-NEXT: vmv1r.v v8, v9 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB97_26: # %cond.load7 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v11, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: vmv.s.x v11, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 3 ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB97_8 ; RV64ZVE32F-NEXT: .LBB97_27: # %cond.load10 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v11, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 5, e8, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 5, e8, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 4 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB97_9 @@ -12557,52 +12557,52 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: vmv.s.x v11, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e8, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 7, e8, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 6 ; RV64ZVE32F-NEXT: andi a2, a1, 128 ; RV64ZVE32F-NEXT: beqz a2, .LBB97_12 ; RV64ZVE32F-NEXT: .LBB97_29: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 7 ; RV64ZVE32F-NEXT: andi a2, a1, 256 ; RV64ZVE32F-NEXT: beqz a2, .LBB97_13 ; RV64ZVE32F-NEXT: .LBB97_30: # %cond.load22 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 9, e8, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 9, e8, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 8 ; RV64ZVE32F-NEXT: andi a2, a1, 512 ; RV64ZVE32F-NEXT: bnez a2, .LBB97_14 ; RV64ZVE32F-NEXT: j .LBB97_15 ; RV64ZVE32F-NEXT: .LBB97_31: # %cond.load31 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 12, e8, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 12, e8, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 11 ; RV64ZVE32F-NEXT: slli a2, a1, 51 ; RV64ZVE32F-NEXT: bgez a2, .LBB97_19 ; RV64ZVE32F-NEXT: .LBB97_32: # %cond.load34 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 13, e8, m1, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 13, e8, m1, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v9, v10, 12 ; RV64ZVE32F-NEXT: slli a2, a1, 50 ; RV64ZVE32F-NEXT: bltz a2, .LBB97_20 @@ -12618,7 +12618,7 @@ ; RV32-LABEL: mgather_baseidx_v32i8: ; RV32: # %bb.0: ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; RV32-NEXT: vsext.vf4 v16, v8 ; RV32-NEXT: vsetvli zero, zero, e8, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (a0), v16, v0.t @@ -12627,56 +12627,56 @@ ; ; RV64V-LABEL: mgather_baseidx_v32i8: ; RV64V: # %bb.0: -; RV64V-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64V-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64V-NEXT: vsext.vf8 v16, v8 ; RV64V-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV64V-NEXT: vmv1r.v v12, v10 ; RV64V-NEXT: vluxei64.v v12, (a0), v16, v0.t -; RV64V-NEXT: vsetivli zero, 16, e8, m2, ta, mu +; RV64V-NEXT: vsetivli zero, 16, e8, m2, ta, ma ; RV64V-NEXT: vslidedown.vi v10, v10, 16 ; RV64V-NEXT: vslidedown.vi v8, v8, 16 -; RV64V-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64V-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64V-NEXT: vsext.vf8 v16, v8 -; RV64V-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64V-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64V-NEXT: vslidedown.vi v0, v0, 2 ; RV64V-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; RV64V-NEXT: vluxei64.v v10, (a0), v16, v0.t ; RV64V-NEXT: li a0, 32 -; RV64V-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; RV64V-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; RV64V-NEXT: vslideup.vi v12, v10, 16 ; RV64V-NEXT: vmv2r.v v8, v12 ; RV64V-NEXT: ret ; ; RV64ZVE32F-LABEL: mgather_baseidx_v32i8: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB98_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load -; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v10, a2 ; RV64ZVE32F-NEXT: .LBB98_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB98_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v12 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 1 ; RV64ZVE32F-NEXT: .LBB98_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB98_6 @@ -12685,12 +12685,12 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v14, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 3, e8, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 3, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 2 ; RV64ZVE32F-NEXT: .LBB98_6: # %else5 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v13, v8, 4 ; RV64ZVE32F-NEXT: bnez a2, .LBB98_50 @@ -12701,20 +12701,20 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB98_10 ; RV64ZVE32F-NEXT: .LBB98_9: # %cond.load13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v13, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v12 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v14, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 6, e8, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 6, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 5 ; RV64ZVE32F-NEXT: .LBB98_10: # %else14 -; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 8 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v13, v13, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB98_52 @@ -12728,18 +12728,18 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 512 ; RV64ZVE32F-NEXT: beqz a2, .LBB98_15 ; RV64ZVE32F-NEXT: .LBB98_14: # %cond.load25 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v13, v12, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v13 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v14, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 10, e8, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 10, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 9 ; RV64ZVE32F-NEXT: .LBB98_15: # %else26 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 1024 ; RV64ZVE32F-NEXT: vslidedown.vi v13, v12, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB98_17 @@ -12748,28 +12748,28 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v14, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 11, e8, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 11, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 10 ; RV64ZVE32F-NEXT: .LBB98_17: # %else29 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: slli a2, a1, 52 ; RV64ZVE32F-NEXT: vslidedown.vi v12, v12, 4 ; RV64ZVE32F-NEXT: bgez a2, .LBB98_19 ; RV64ZVE32F-NEXT: # %bb.18: # %cond.load31 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v13, v13, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v13 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v14, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 12, e8, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 12, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 11 ; RV64ZVE32F-NEXT: .LBB98_19: # %else32 -; RV64ZVE32F-NEXT: vsetivli zero, 16, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 16, e8, m2, ta, ma ; RV64ZVE32F-NEXT: slli a2, a1, 51 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 16 ; RV64ZVE32F-NEXT: bgez a2, .LBB98_21 @@ -12778,26 +12778,26 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v14, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 13, e8, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 13, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 12 ; RV64ZVE32F-NEXT: .LBB98_21: # %else35 ; RV64ZVE32F-NEXT: slli a2, a1, 50 ; RV64ZVE32F-NEXT: bgez a2, .LBB98_23 ; RV64ZVE32F-NEXT: # %bb.22: # %cond.load37 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v13, v12, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v13 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v14, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 14, e8, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 14, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 13 ; RV64ZVE32F-NEXT: .LBB98_23: # %else38 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: slli a2, a1, 49 ; RV64ZVE32F-NEXT: vslidedown.vi v12, v12, 2 ; RV64ZVE32F-NEXT: bltz a2, .LBB98_55 @@ -12811,18 +12811,18 @@ ; RV64ZVE32F-NEXT: slli a2, a1, 46 ; RV64ZVE32F-NEXT: bgez a2, .LBB98_28 ; RV64ZVE32F-NEXT: .LBB98_27: # %cond.load49 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v12 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 18, e8, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 18, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 17 ; RV64ZVE32F-NEXT: .LBB98_28: # %else50 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: slli a2, a1, 45 ; RV64ZVE32F-NEXT: vslidedown.vi v13, v8, 2 ; RV64ZVE32F-NEXT: bgez a2, .LBB98_30 @@ -12831,12 +12831,12 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v14, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 19, e8, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 19, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 18 ; RV64ZVE32F-NEXT: .LBB98_30: # %else53 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: slli a2, a1, 44 ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 4 ; RV64ZVE32F-NEXT: bltz a2, .LBB98_58 @@ -12847,20 +12847,20 @@ ; RV64ZVE32F-NEXT: slli a2, a1, 42 ; RV64ZVE32F-NEXT: bgez a2, .LBB98_34 ; RV64ZVE32F-NEXT: .LBB98_33: # %cond.load61 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v13, v12, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v13 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v14, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 22, e8, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 22, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 21 ; RV64ZVE32F-NEXT: .LBB98_34: # %else62 -; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 8 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: slli a2, a1, 41 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v12, 2 ; RV64ZVE32F-NEXT: bltz a2, .LBB98_60 @@ -12874,18 +12874,18 @@ ; RV64ZVE32F-NEXT: slli a2, a1, 38 ; RV64ZVE32F-NEXT: bgez a2, .LBB98_39 ; RV64ZVE32F-NEXT: .LBB98_38: # %cond.load73 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 26, e8, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 26, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 25 ; RV64ZVE32F-NEXT: .LBB98_39: # %else74 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: slli a2, a1, 37 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV64ZVE32F-NEXT: bgez a2, .LBB98_41 @@ -12894,12 +12894,12 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 27, e8, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 27, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 26 ; RV64ZVE32F-NEXT: .LBB98_41: # %else77 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: slli a2, a1, 36 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 4 ; RV64ZVE32F-NEXT: bltz a2, .LBB98_63 @@ -12910,18 +12910,18 @@ ; RV64ZVE32F-NEXT: slli a2, a1, 34 ; RV64ZVE32F-NEXT: bgez a2, .LBB98_45 ; RV64ZVE32F-NEXT: .LBB98_44: # %cond.load85 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 30, e8, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 30, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 29 ; RV64ZVE32F-NEXT: .LBB98_45: # %else86 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: slli a2, a1, 33 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: bgez a2, .LBB98_47 @@ -12930,50 +12930,50 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 31, e8, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 31, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 30 ; RV64ZVE32F-NEXT: .LBB98_47: # %else89 ; RV64ZVE32F-NEXT: lui a2, 524288 ; RV64ZVE32F-NEXT: and a1, a1, a2 ; RV64ZVE32F-NEXT: beqz a1, .LBB98_49 ; RV64ZVE32F-NEXT: # %bb.48: # %cond.load91 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v8 ; RV64ZVE32F-NEXT: add a0, a0, a1 ; RV64ZVE32F-NEXT: lb a0, 0(a0) ; RV64ZVE32F-NEXT: li a1, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, m2, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 31 ; RV64ZVE32F-NEXT: .LBB98_49: # %else92 ; RV64ZVE32F-NEXT: vmv2r.v v8, v10 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB98_50: # %cond.load7 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v12, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v12 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v14, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 3 ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB98_8 ; RV64ZVE32F-NEXT: .LBB98_51: # %cond.load10 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v13 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v14, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 5, e8, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 5, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 4 ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: bnez a2, .LBB98_9 @@ -12983,34 +12983,34 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v14, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 7, e8, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 7, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 6 ; RV64ZVE32F-NEXT: andi a2, a1, 128 ; RV64ZVE32F-NEXT: beqz a2, .LBB98_12 ; RV64ZVE32F-NEXT: .LBB98_53: # %cond.load19 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v13, v13, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v13 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v14, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 7 ; RV64ZVE32F-NEXT: andi a2, a1, 256 ; RV64ZVE32F-NEXT: beqz a2, .LBB98_13 ; RV64ZVE32F-NEXT: .LBB98_54: # %cond.load22 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v12 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v14, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 9, e8, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 9, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 8 ; RV64ZVE32F-NEXT: andi a2, a1, 512 ; RV64ZVE32F-NEXT: bnez a2, .LBB98_14 @@ -13020,60 +13020,60 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v14, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 15, e8, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 15, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 14 ; RV64ZVE32F-NEXT: slli a2, a1, 48 ; RV64ZVE32F-NEXT: bgez a2, .LBB98_25 ; RV64ZVE32F-NEXT: .LBB98_56: # %cond.load43 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v12, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v12 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 16, e8, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 16, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 15 ; RV64ZVE32F-NEXT: slli a2, a1, 47 ; RV64ZVE32F-NEXT: bgez a2, .LBB98_26 ; RV64ZVE32F-NEXT: .LBB98_57: # %cond.load46 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 17, e8, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 17, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 16 ; RV64ZVE32F-NEXT: slli a2, a1, 46 ; RV64ZVE32F-NEXT: bltz a2, .LBB98_27 ; RV64ZVE32F-NEXT: j .LBB98_28 ; RV64ZVE32F-NEXT: .LBB98_58: # %cond.load55 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v13, v13, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v13 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v14, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 20, e8, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 20, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 19 ; RV64ZVE32F-NEXT: slli a2, a1, 43 ; RV64ZVE32F-NEXT: bgez a2, .LBB98_32 ; RV64ZVE32F-NEXT: .LBB98_59: # %cond.load58 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v12 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v14, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 21, e8, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 21, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 20 ; RV64ZVE32F-NEXT: slli a2, a1, 42 ; RV64ZVE32F-NEXT: bltz a2, .LBB98_33 @@ -13083,60 +13083,60 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 23, e8, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 23, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 22 ; RV64ZVE32F-NEXT: slli a2, a1, 40 ; RV64ZVE32F-NEXT: bgez a2, .LBB98_36 ; RV64ZVE32F-NEXT: .LBB98_61: # %cond.load67 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 24, e8, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 24, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 23 ; RV64ZVE32F-NEXT: slli a2, a1, 39 ; RV64ZVE32F-NEXT: bgez a2, .LBB98_37 ; RV64ZVE32F-NEXT: .LBB98_62: # %cond.load70 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 25, e8, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 25, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 24 ; RV64ZVE32F-NEXT: slli a2, a1, 38 ; RV64ZVE32F-NEXT: bltz a2, .LBB98_38 ; RV64ZVE32F-NEXT: j .LBB98_39 ; RV64ZVE32F-NEXT: .LBB98_63: # %cond.load79 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 28, e8, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 28, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 27 ; RV64ZVE32F-NEXT: slli a2, a1, 35 ; RV64ZVE32F-NEXT: bgez a2, .LBB98_43 ; RV64ZVE32F-NEXT: .LBB98_64: # %cond.load82 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: lb a2, 0(a2) ; RV64ZVE32F-NEXT: li a3, 32 -; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, a3, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vmv.s.x v12, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 29, e8, m2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 29, e8, m2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 28 ; RV64ZVE32F-NEXT: slli a2, a1, 34 ; RV64ZVE32F-NEXT: bltz a2, .LBB98_44 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll @@ -400,11 +400,11 @@ ; RV32-LABEL: masked_load_v32i64: ; RV32: # %bb.0: ; RV32-NEXT: addi a3, a1, 128 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vle64.v v16, (a3) ; RV32-NEXT: vle64.v v0, (a1) ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; RV32-NEXT: vmv.v.i v24, 0 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vmseq.vv v8, v0, v24 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll @@ -13,32 +13,32 @@ define void @mscatter_v1i8(<1 x i8> %val, <1 x i8*> %ptrs, <1 x i1> %m) { ; RV32V-LABEL: mscatter_v1i8: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; RV32V-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; RV32V-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_v1i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret ; ; RV32ZVE32F-LABEL: mscatter_v1i8: ; RV32ZVE32F: # %bb.0: -; RV32ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32ZVE32F-NEXT: ret ; ; RV64ZVE32F-LABEL: mscatter_v1i8: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.v.i v9, 0 ; RV64ZVE32F-NEXT: vmerge.vim v9, v9, 1, v0 ; RV64ZVE32F-NEXT: vmv.x.s a1, v9 ; RV64ZVE32F-NEXT: andi a1, a1, 1 ; RV64ZVE32F-NEXT: beqz a1, .LBB0_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.store -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vse8.v v8, (a0) ; RV64ZVE32F-NEXT: .LBB0_2: # %else ; RV64ZVE32F-NEXT: ret @@ -51,25 +51,25 @@ define void @mscatter_v2i8(<2 x i8> %val, <2 x i8*> %ptrs, <2 x i1> %m) { ; RV32V-LABEL: mscatter_v2i8: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; RV32V-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; RV32V-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_v2i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret ; ; RV32ZVE32F-LABEL: mscatter_v2i8: ; RV32ZVE32F: # %bb.0: -; RV32ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32ZVE32F-NEXT: ret ; ; RV64ZVE32F-LABEL: mscatter_v2i8: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v0 ; RV64ZVE32F-NEXT: andi a3, a2, 1 ; RV64ZVE32F-NEXT: bnez a3, .LBB1_3 @@ -79,12 +79,12 @@ ; RV64ZVE32F-NEXT: .LBB1_2: # %else2 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB1_3: # %cond.store -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vse8.v v8, (a0) ; RV64ZVE32F-NEXT: andi a0, a2, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB1_2 ; RV64ZVE32F-NEXT: .LBB1_4: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vse8.v v8, (a1) ; RV64ZVE32F-NEXT: ret @@ -95,28 +95,28 @@ define void @mscatter_v2i16_truncstore_v2i8(<2 x i16> %val, <2 x i8*> %ptrs, <2 x i1> %m) { ; RV32V-LABEL: mscatter_v2i16_truncstore_v2i8: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; RV32V-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; RV32V-NEXT: vnsrl.wi v8, v8, 0 ; RV32V-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_v2i16_truncstore_v2i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret ; ; RV32ZVE32F-LABEL: mscatter_v2i16_truncstore_v2i8: ; RV32ZVE32F: # %bb.0: -; RV32ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vnsrl.wi v8, v8, 0 ; RV32ZVE32F-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32ZVE32F-NEXT: ret ; ; RV64ZVE32F-LABEL: mscatter_v2i16_truncstore_v2i8: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v0 ; RV64ZVE32F-NEXT: andi a3, a2, 1 ; RV64ZVE32F-NEXT: vnsrl.wi v8, v8, 0 @@ -127,12 +127,12 @@ ; RV64ZVE32F-NEXT: .LBB2_2: # %else2 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB2_3: # %cond.store -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vse8.v v8, (a0) ; RV64ZVE32F-NEXT: andi a0, a2, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB2_2 ; RV64ZVE32F-NEXT: .LBB2_4: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vse8.v v8, (a1) ; RV64ZVE32F-NEXT: ret @@ -144,36 +144,36 @@ define void @mscatter_v2i32_truncstore_v2i8(<2 x i32> %val, <2 x i8*> %ptrs, <2 x i1> %m) { ; RV32V-LABEL: mscatter_v2i32_truncstore_v2i8: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; RV32V-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; RV32V-NEXT: vnsrl.wi v8, v8, 0 -; RV32V-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; RV32V-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; RV32V-NEXT: vnsrl.wi v8, v8, 0 ; RV32V-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_v2i32_truncstore_v2i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; RV64-NEXT: vnsrl.wi v8, v8, 0 -; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret ; ; RV32ZVE32F-LABEL: mscatter_v2i32_truncstore_v2i8: ; RV32ZVE32F: # %bb.0: -; RV32ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV32ZVE32F-NEXT: vnsrl.wi v8, v8, 0 -; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vnsrl.wi v8, v8, 0 ; RV32ZVE32F-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32ZVE32F-NEXT: ret ; ; RV64ZVE32F-LABEL: mscatter_v2i32_truncstore_v2i8: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vnsrl.wi v8, v8, 0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v0 ; RV64ZVE32F-NEXT: andi a3, a2, 1 ; RV64ZVE32F-NEXT: vnsrl.wi v8, v8, 0 @@ -184,12 +184,12 @@ ; RV64ZVE32F-NEXT: .LBB3_2: # %else2 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB3_3: # %cond.store -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vse8.v v8, (a0) ; RV64ZVE32F-NEXT: andi a0, a2, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB3_2 ; RV64ZVE32F-NEXT: .LBB3_4: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vse8.v v8, (a1) ; RV64ZVE32F-NEXT: ret @@ -201,22 +201,22 @@ define void @mscatter_v2i64_truncstore_v2i8(<2 x i64> %val, <2 x i8*> %ptrs, <2 x i1> %m) { ; RV32V-LABEL: mscatter_v2i64_truncstore_v2i8: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV32V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV32V-NEXT: vnsrl.wi v8, v8, 0 -; RV32V-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; RV32V-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; RV32V-NEXT: vnsrl.wi v8, v8, 0 -; RV32V-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; RV32V-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; RV32V-NEXT: vnsrl.wi v8, v8, 0 ; RV32V-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_v2i64_truncstore_v2i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-NEXT: vnsrl.wi v8, v8, 0 -; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; RV64-NEXT: vnsrl.wi v8, v8, 0 -; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret @@ -230,11 +230,11 @@ ; RV32ZVE32F-NEXT: sb a1, 15(sp) ; RV32ZVE32F-NEXT: sb a0, 14(sp) ; RV32ZVE32F-NEXT: addi a0, sp, 15 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vle8.v v9, (a0) ; RV32ZVE32F-NEXT: addi a0, sp, 14 ; RV32ZVE32F-NEXT: vle8.v v10, (a0) -; RV32ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, tu, mu +; RV32ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, tu, ma ; RV32ZVE32F-NEXT: vslideup.vi v10, v9, 1 ; RV32ZVE32F-NEXT: vsoxei32.v v10, (zero), v8, v0.t ; RV32ZVE32F-NEXT: addi sp, sp, 16 @@ -247,11 +247,11 @@ ; RV64ZVE32F-NEXT: sb a1, 15(sp) ; RV64ZVE32F-NEXT: sb a0, 14(sp) ; RV64ZVE32F-NEXT: addi a0, sp, 15 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vle8.v v9, (a0) ; RV64ZVE32F-NEXT: addi a0, sp, 14 ; RV64ZVE32F-NEXT: vle8.v v8, (a0) -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, tu, ma ; RV64ZVE32F-NEXT: vmv.x.s a0, v0 ; RV64ZVE32F-NEXT: andi a1, a0, 1 ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1 @@ -263,12 +263,12 @@ ; RV64ZVE32F-NEXT: addi sp, sp, 16 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB4_3: # %cond.store -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vse8.v v8, (a2) ; RV64ZVE32F-NEXT: andi a0, a0, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB4_2 ; RV64ZVE32F-NEXT: .LBB4_4: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vse8.v v8, (a3) ; RV64ZVE32F-NEXT: addi sp, sp, 16 @@ -283,13 +283,13 @@ define void @mscatter_v4i8(<4 x i8> %val, <4 x i8*> %ptrs, <4 x i1> %m) { ; RV32-LABEL: mscatter_v4i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v4i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret ; @@ -298,7 +298,7 @@ ; RV64ZVE32F-NEXT: ld a1, 24(a0) ; RV64ZVE32F-NEXT: ld a2, 16(a0) ; RV64ZVE32F-NEXT: ld a4, 8(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a3, v0 ; RV64ZVE32F-NEXT: andi a5, a3, 1 ; RV64ZVE32F-NEXT: bnez a5, .LBB5_5 @@ -315,24 +315,24 @@ ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB5_5: # %cond.store ; RV64ZVE32F-NEXT: ld a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vse8.v v8, (a0) ; RV64ZVE32F-NEXT: andi a0, a3, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB5_2 ; RV64ZVE32F-NEXT: .LBB5_6: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vse8.v v9, (a4) ; RV64ZVE32F-NEXT: andi a0, a3, 4 ; RV64ZVE32F-NEXT: beqz a0, .LBB5_3 ; RV64ZVE32F-NEXT: .LBB5_7: # %cond.store3 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV64ZVE32F-NEXT: vse8.v v9, (a2) ; RV64ZVE32F-NEXT: andi a0, a3, 8 ; RV64ZVE32F-NEXT: beqz a0, .LBB5_4 ; RV64ZVE32F-NEXT: .LBB5_8: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 3 ; RV64ZVE32F-NEXT: vse8.v v8, (a1) ; RV64ZVE32F-NEXT: ret @@ -343,13 +343,13 @@ define void @mscatter_truemask_v4i8(<4 x i8> %val, <4 x i8*> %ptrs) { ; RV32-LABEL: mscatter_truemask_v4i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_truemask_v4i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v10 ; RV64-NEXT: ret ; @@ -358,7 +358,7 @@ ; RV64ZVE32F-NEXT: ld a1, 24(a0) ; RV64ZVE32F-NEXT: ld a2, 16(a0) ; RV64ZVE32F-NEXT: ld a4, 8(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: vmset.m v9 ; RV64ZVE32F-NEXT: vmv.x.s a3, v9 ; RV64ZVE32F-NEXT: beqz zero, .LBB6_5 @@ -375,24 +375,24 @@ ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB6_5: # %cond.store ; RV64ZVE32F-NEXT: ld a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vse8.v v8, (a0) ; RV64ZVE32F-NEXT: andi a0, a3, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB6_2 ; RV64ZVE32F-NEXT: .LBB6_6: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vse8.v v9, (a4) ; RV64ZVE32F-NEXT: andi a0, a3, 4 ; RV64ZVE32F-NEXT: beqz a0, .LBB6_3 ; RV64ZVE32F-NEXT: .LBB6_7: # %cond.store3 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV64ZVE32F-NEXT: vse8.v v9, (a2) ; RV64ZVE32F-NEXT: andi a0, a3, 8 ; RV64ZVE32F-NEXT: beqz a0, .LBB6_4 ; RV64ZVE32F-NEXT: .LBB6_8: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 3 ; RV64ZVE32F-NEXT: vse8.v v8, (a1) ; RV64ZVE32F-NEXT: ret @@ -415,13 +415,13 @@ define void @mscatter_v8i8(<8 x i8> %val, <8 x i8*> %ptrs, <8 x i1> %m) { ; RV32-LABEL: mscatter_v8i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v8i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t ; RV64-NEXT: ret ; @@ -434,7 +434,7 @@ ; RV64ZVE32F-NEXT: ld a6, 24(a0) ; RV64ZVE32F-NEXT: ld a7, 16(a0) ; RV64ZVE32F-NEXT: ld t0, 8(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a3, v0 ; RV64ZVE32F-NEXT: andi t1, a3, 1 ; RV64ZVE32F-NEXT: bnez t1, .LBB8_9 @@ -463,48 +463,48 @@ ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB8_9: # %cond.store ; RV64ZVE32F-NEXT: ld a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: vse8.v v8, (a0) ; RV64ZVE32F-NEXT: andi a0, a3, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB8_2 ; RV64ZVE32F-NEXT: .LBB8_10: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vse8.v v9, (t0) ; RV64ZVE32F-NEXT: andi a0, a3, 4 ; RV64ZVE32F-NEXT: beqz a0, .LBB8_3 ; RV64ZVE32F-NEXT: .LBB8_11: # %cond.store3 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV64ZVE32F-NEXT: vse8.v v9, (a7) ; RV64ZVE32F-NEXT: andi a0, a3, 8 ; RV64ZVE32F-NEXT: beqz a0, .LBB8_4 ; RV64ZVE32F-NEXT: .LBB8_12: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 3 ; RV64ZVE32F-NEXT: vse8.v v9, (a6) ; RV64ZVE32F-NEXT: andi a0, a3, 16 ; RV64ZVE32F-NEXT: beqz a0, .LBB8_5 ; RV64ZVE32F-NEXT: .LBB8_13: # %cond.store7 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4 ; RV64ZVE32F-NEXT: vse8.v v9, (a5) ; RV64ZVE32F-NEXT: andi a0, a3, 32 ; RV64ZVE32F-NEXT: beqz a0, .LBB8_6 ; RV64ZVE32F-NEXT: .LBB8_14: # %cond.store9 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 5 ; RV64ZVE32F-NEXT: vse8.v v9, (a4) ; RV64ZVE32F-NEXT: andi a0, a3, 64 ; RV64ZVE32F-NEXT: beqz a0, .LBB8_7 ; RV64ZVE32F-NEXT: .LBB8_15: # %cond.store11 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 6 ; RV64ZVE32F-NEXT: vse8.v v9, (a2) ; RV64ZVE32F-NEXT: andi a0, a3, -128 ; RV64ZVE32F-NEXT: beqz a0, .LBB8_8 ; RV64ZVE32F-NEXT: .LBB8_16: # %cond.store13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV64ZVE32F-NEXT: vse8.v v8, (a1) ; RV64ZVE32F-NEXT: ret @@ -515,55 +515,55 @@ define void @mscatter_baseidx_v8i8(<8 x i8> %val, i8* %base, <8 x i8> %idxs, <8 x i1> %m) { ; RV32-LABEL: mscatter_baseidx_v8i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v10, v9 -; RV32-NEXT: vsetvli zero, zero, e8, mf2, ta, mu +; RV32-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_v8i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v12, v9 -; RV64-NEXT: vsetvli zero, zero, e8, mf2, ta, mu +; RV64-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret ; ; RV64ZVE32F-LABEL: mscatter_baseidx_v8i8: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB9_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.store ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: vse8.v v8, (a2) ; RV64ZVE32F-NEXT: .LBB9_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB9_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV64ZVE32F-NEXT: vse8.v v10, (a2) ; RV64ZVE32F-NEXT: .LBB9_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB9_6 ; RV64ZVE32F-NEXT: # %bb.5: # %cond.store3 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v8, 2 ; RV64ZVE32F-NEXT: vse8.v v11, (a2) ; RV64ZVE32F-NEXT: .LBB9_6: # %else4 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 4 ; RV64ZVE32F-NEXT: bnez a2, .LBB9_13 @@ -574,15 +574,15 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB9_10 ; RV64ZVE32F-NEXT: .LBB9_9: # %cond.store9 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV64ZVE32F-NEXT: vse8.v v10, (a2) ; RV64ZVE32F-NEXT: .LBB9_10: # %else10 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB9_15 @@ -592,20 +592,20 @@ ; RV64ZVE32F-NEXT: .LBB9_12: # %else14 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB9_13: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV64ZVE32F-NEXT: vse8.v v10, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB9_8 ; RV64ZVE32F-NEXT: .LBB9_14: # %cond.store7 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV64ZVE32F-NEXT: vse8.v v10, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 32 @@ -614,17 +614,17 @@ ; RV64ZVE32F-NEXT: .LBB9_15: # %cond.store11 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV64ZVE32F-NEXT: vse8.v v10, (a2) ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB9_12 ; RV64ZVE32F-NEXT: .LBB9_16: # %cond.store13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v9 ; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV64ZVE32F-NEXT: vse8.v v8, (a0) ; RV64ZVE32F-NEXT: ret @@ -638,32 +638,32 @@ define void @mscatter_v1i16(<1 x i16> %val, <1 x i16*> %ptrs, <1 x i1> %m) { ; RV32V-LABEL: mscatter_v1i16: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; RV32V-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; RV32V-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_v1i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret ; ; RV32ZVE32F-LABEL: mscatter_v1i16: ; RV32ZVE32F: # %bb.0: -; RV32ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV32ZVE32F-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32ZVE32F-NEXT: ret ; ; RV64ZVE32F-LABEL: mscatter_v1i16: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.v.i v9, 0 ; RV64ZVE32F-NEXT: vmerge.vim v9, v9, 1, v0 ; RV64ZVE32F-NEXT: vmv.x.s a1, v9 ; RV64ZVE32F-NEXT: andi a1, a1, 1 ; RV64ZVE32F-NEXT: beqz a1, .LBB10_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.store -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vse16.v v8, (a0) ; RV64ZVE32F-NEXT: .LBB10_2: # %else ; RV64ZVE32F-NEXT: ret @@ -676,25 +676,25 @@ define void @mscatter_v2i16(<2 x i16> %val, <2 x i16*> %ptrs, <2 x i1> %m) { ; RV32V-LABEL: mscatter_v2i16: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; RV32V-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; RV32V-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_v2i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret ; ; RV32ZVE32F-LABEL: mscatter_v2i16: ; RV32ZVE32F: # %bb.0: -; RV32ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV32ZVE32F-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32ZVE32F-NEXT: ret ; ; RV64ZVE32F-LABEL: mscatter_v2i16: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v0 ; RV64ZVE32F-NEXT: andi a3, a2, 1 ; RV64ZVE32F-NEXT: bnez a3, .LBB11_3 @@ -704,12 +704,12 @@ ; RV64ZVE32F-NEXT: .LBB11_2: # %else2 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB11_3: # %cond.store -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vse16.v v8, (a0) ; RV64ZVE32F-NEXT: andi a0, a2, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB11_2 ; RV64ZVE32F-NEXT: .LBB11_4: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vse16.v v8, (a1) ; RV64ZVE32F-NEXT: ret @@ -720,30 +720,30 @@ define void @mscatter_v2i32_truncstore_v2i16(<2 x i32> %val, <2 x i16*> %ptrs, <2 x i1> %m) { ; RV32V-LABEL: mscatter_v2i32_truncstore_v2i16: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; RV32V-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; RV32V-NEXT: vnsrl.wi v8, v8, 0 ; RV32V-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_v2i32_truncstore_v2i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret ; ; RV32ZVE32F-LABEL: mscatter_v2i32_truncstore_v2i16: ; RV32ZVE32F: # %bb.0: -; RV32ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV32ZVE32F-NEXT: vnsrl.wi v8, v8, 0 ; RV32ZVE32F-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32ZVE32F-NEXT: ret ; ; RV64ZVE32F-LABEL: mscatter_v2i32_truncstore_v2i16: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vnsrl.wi v8, v8, 0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v0 ; RV64ZVE32F-NEXT: andi a3, a2, 1 ; RV64ZVE32F-NEXT: bnez a3, .LBB12_3 @@ -753,12 +753,12 @@ ; RV64ZVE32F-NEXT: .LBB12_2: # %else2 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB12_3: # %cond.store -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vse16.v v8, (a0) ; RV64ZVE32F-NEXT: andi a0, a2, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB12_2 ; RV64ZVE32F-NEXT: .LBB12_4: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vse16.v v8, (a1) ; RV64ZVE32F-NEXT: ret @@ -770,18 +770,18 @@ define void @mscatter_v2i64_truncstore_v2i16(<2 x i64> %val, <2 x i16*> %ptrs, <2 x i1> %m) { ; RV32V-LABEL: mscatter_v2i64_truncstore_v2i16: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV32V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV32V-NEXT: vnsrl.wi v8, v8, 0 -; RV32V-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; RV32V-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; RV32V-NEXT: vnsrl.wi v8, v8, 0 ; RV32V-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_v2i64_truncstore_v2i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-NEXT: vnsrl.wi v8, v8, 0 -; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret @@ -795,11 +795,11 @@ ; RV32ZVE32F-NEXT: sh a1, 14(sp) ; RV32ZVE32F-NEXT: sh a0, 12(sp) ; RV32ZVE32F-NEXT: addi a0, sp, 14 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV32ZVE32F-NEXT: vle16.v v9, (a0) ; RV32ZVE32F-NEXT: addi a0, sp, 12 ; RV32ZVE32F-NEXT: vle16.v v10, (a0) -; RV32ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, tu, mu +; RV32ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, tu, ma ; RV32ZVE32F-NEXT: vslideup.vi v10, v9, 1 ; RV32ZVE32F-NEXT: vsoxei32.v v10, (zero), v8, v0.t ; RV32ZVE32F-NEXT: addi sp, sp, 16 @@ -812,13 +812,13 @@ ; RV64ZVE32F-NEXT: sh a1, 14(sp) ; RV64ZVE32F-NEXT: sh a0, 12(sp) ; RV64ZVE32F-NEXT: addi a0, sp, 14 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vle16.v v9, (a0) ; RV64ZVE32F-NEXT: addi a0, sp, 12 ; RV64ZVE32F-NEXT: vle16.v v8, (a0) -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, tu, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, tu, ma ; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a0, v0 ; RV64ZVE32F-NEXT: andi a1, a0, 1 ; RV64ZVE32F-NEXT: bnez a1, .LBB13_3 @@ -829,12 +829,12 @@ ; RV64ZVE32F-NEXT: addi sp, sp, 16 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB13_3: # %cond.store -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vse16.v v8, (a2) ; RV64ZVE32F-NEXT: andi a0, a0, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB13_2 ; RV64ZVE32F-NEXT: .LBB13_4: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vse16.v v8, (a3) ; RV64ZVE32F-NEXT: addi sp, sp, 16 @@ -849,13 +849,13 @@ define void @mscatter_v4i16(<4 x i16> %val, <4 x i16*> %ptrs, <4 x i1> %m) { ; RV32-LABEL: mscatter_v4i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v4i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret ; @@ -864,7 +864,7 @@ ; RV64ZVE32F-NEXT: ld a1, 24(a0) ; RV64ZVE32F-NEXT: ld a2, 16(a0) ; RV64ZVE32F-NEXT: ld a4, 8(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a3, v0 ; RV64ZVE32F-NEXT: andi a5, a3, 1 ; RV64ZVE32F-NEXT: bnez a5, .LBB14_5 @@ -881,24 +881,24 @@ ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB14_5: # %cond.store ; RV64ZVE32F-NEXT: ld a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vse16.v v8, (a0) ; RV64ZVE32F-NEXT: andi a0, a3, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB14_2 ; RV64ZVE32F-NEXT: .LBB14_6: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vse16.v v9, (a4) ; RV64ZVE32F-NEXT: andi a0, a3, 4 ; RV64ZVE32F-NEXT: beqz a0, .LBB14_3 ; RV64ZVE32F-NEXT: .LBB14_7: # %cond.store3 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV64ZVE32F-NEXT: vse16.v v9, (a2) ; RV64ZVE32F-NEXT: andi a0, a3, 8 ; RV64ZVE32F-NEXT: beqz a0, .LBB14_4 ; RV64ZVE32F-NEXT: .LBB14_8: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 3 ; RV64ZVE32F-NEXT: vse16.v v8, (a1) ; RV64ZVE32F-NEXT: ret @@ -909,13 +909,13 @@ define void @mscatter_truemask_v4i16(<4 x i16> %val, <4 x i16*> %ptrs) { ; RV32-LABEL: mscatter_truemask_v4i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_truemask_v4i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v10 ; RV64-NEXT: ret ; @@ -924,7 +924,7 @@ ; RV64ZVE32F-NEXT: ld a1, 24(a0) ; RV64ZVE32F-NEXT: ld a2, 16(a0) ; RV64ZVE32F-NEXT: ld a4, 8(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: vmset.m v9 ; RV64ZVE32F-NEXT: vmv.x.s a3, v9 ; RV64ZVE32F-NEXT: beqz zero, .LBB15_5 @@ -941,24 +941,24 @@ ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB15_5: # %cond.store ; RV64ZVE32F-NEXT: ld a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vse16.v v8, (a0) ; RV64ZVE32F-NEXT: andi a0, a3, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB15_2 ; RV64ZVE32F-NEXT: .LBB15_6: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vse16.v v9, (a4) ; RV64ZVE32F-NEXT: andi a0, a3, 4 ; RV64ZVE32F-NEXT: beqz a0, .LBB15_3 ; RV64ZVE32F-NEXT: .LBB15_7: # %cond.store3 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV64ZVE32F-NEXT: vse16.v v9, (a2) ; RV64ZVE32F-NEXT: andi a0, a3, 8 ; RV64ZVE32F-NEXT: beqz a0, .LBB15_4 ; RV64ZVE32F-NEXT: .LBB15_8: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 3 ; RV64ZVE32F-NEXT: vse16.v v8, (a1) ; RV64ZVE32F-NEXT: ret @@ -981,13 +981,13 @@ define void @mscatter_v8i16(<8 x i16> %val, <8 x i16*> %ptrs, <8 x i1> %m) { ; RV32-LABEL: mscatter_v8i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v8i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t ; RV64-NEXT: ret ; @@ -1000,7 +1000,7 @@ ; RV64ZVE32F-NEXT: ld a6, 24(a0) ; RV64ZVE32F-NEXT: ld a7, 16(a0) ; RV64ZVE32F-NEXT: ld t0, 8(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a3, v0 ; RV64ZVE32F-NEXT: andi t1, a3, 1 ; RV64ZVE32F-NEXT: bnez t1, .LBB17_9 @@ -1029,48 +1029,48 @@ ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB17_9: # %cond.store ; RV64ZVE32F-NEXT: ld a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vse16.v v8, (a0) ; RV64ZVE32F-NEXT: andi a0, a3, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB17_2 ; RV64ZVE32F-NEXT: .LBB17_10: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vse16.v v9, (t0) ; RV64ZVE32F-NEXT: andi a0, a3, 4 ; RV64ZVE32F-NEXT: beqz a0, .LBB17_3 ; RV64ZVE32F-NEXT: .LBB17_11: # %cond.store3 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV64ZVE32F-NEXT: vse16.v v9, (a7) ; RV64ZVE32F-NEXT: andi a0, a3, 8 ; RV64ZVE32F-NEXT: beqz a0, .LBB17_4 ; RV64ZVE32F-NEXT: .LBB17_12: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 3 ; RV64ZVE32F-NEXT: vse16.v v9, (a6) ; RV64ZVE32F-NEXT: andi a0, a3, 16 ; RV64ZVE32F-NEXT: beqz a0, .LBB17_5 ; RV64ZVE32F-NEXT: .LBB17_13: # %cond.store7 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4 ; RV64ZVE32F-NEXT: vse16.v v9, (a5) ; RV64ZVE32F-NEXT: andi a0, a3, 32 ; RV64ZVE32F-NEXT: beqz a0, .LBB17_6 ; RV64ZVE32F-NEXT: .LBB17_14: # %cond.store9 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 5 ; RV64ZVE32F-NEXT: vse16.v v9, (a4) ; RV64ZVE32F-NEXT: andi a0, a3, 64 ; RV64ZVE32F-NEXT: beqz a0, .LBB17_7 ; RV64ZVE32F-NEXT: .LBB17_15: # %cond.store11 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 6 ; RV64ZVE32F-NEXT: vse16.v v9, (a2) ; RV64ZVE32F-NEXT: andi a0, a3, -128 ; RV64ZVE32F-NEXT: beqz a0, .LBB17_8 ; RV64ZVE32F-NEXT: .LBB17_16: # %cond.store13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV64ZVE32F-NEXT: vse16.v v8, (a1) ; RV64ZVE32F-NEXT: ret @@ -1081,25 +1081,25 @@ define void @mscatter_baseidx_v8i8_v8i16(<8 x i16> %val, i16* %base, <8 x i8> %idxs, <8 x i1> %m) { ; RV32-LABEL: mscatter_baseidx_v8i8_v8i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v10, v9 ; RV32-NEXT: vadd.vv v10, v10, v10 -; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_v8i8_v8i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v12, v9 ; RV64-NEXT: vadd.vv v12, v12, v12 -; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret ; ; RV64ZVE32F-LABEL: mscatter_baseidx_v8i8_v8i16: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB18_2 @@ -1107,22 +1107,22 @@ ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vse16.v v8, (a2) ; RV64ZVE32F-NEXT: .LBB18_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB18_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV64ZVE32F-NEXT: vse16.v v10, (a2) ; RV64ZVE32F-NEXT: .LBB18_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB18_6 @@ -1130,11 +1130,11 @@ ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v8, 2 ; RV64ZVE32F-NEXT: vse16.v v11, (a2) ; RV64ZVE32F-NEXT: .LBB18_6: # %else4 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 4 ; RV64ZVE32F-NEXT: bnez a2, .LBB18_13 @@ -1145,16 +1145,16 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB18_10 ; RV64ZVE32F-NEXT: .LBB18_9: # %cond.store9 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV64ZVE32F-NEXT: vse16.v v10, (a2) ; RV64ZVE32F-NEXT: .LBB18_10: # %else10 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB18_15 @@ -1164,22 +1164,22 @@ ; RV64ZVE32F-NEXT: .LBB18_12: # %else14 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB18_13: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV64ZVE32F-NEXT: vse16.v v10, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB18_8 ; RV64ZVE32F-NEXT: .LBB18_14: # %cond.store7 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV64ZVE32F-NEXT: vse16.v v10, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 32 @@ -1189,18 +1189,18 @@ ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV64ZVE32F-NEXT: vse16.v v10, (a2) ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB18_12 ; RV64ZVE32F-NEXT: .LBB18_16: # %cond.store13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v9 ; RV64ZVE32F-NEXT: slli a1, a1, 1 ; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV64ZVE32F-NEXT: vse16.v v8, (a0) ; RV64ZVE32F-NEXT: ret @@ -1212,25 +1212,25 @@ define void @mscatter_baseidx_sext_v8i8_v8i16(<8 x i16> %val, i16* %base, <8 x i8> %idxs, <8 x i1> %m) { ; RV32-LABEL: mscatter_baseidx_sext_v8i8_v8i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v10, v9 ; RV32-NEXT: vadd.vv v10, v10, v10 -; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_sext_v8i8_v8i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v12, v9 ; RV64-NEXT: vadd.vv v12, v12, v12 -; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret ; ; RV64ZVE32F-LABEL: mscatter_baseidx_sext_v8i8_v8i16: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB19_2 @@ -1238,22 +1238,22 @@ ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vse16.v v8, (a2) ; RV64ZVE32F-NEXT: .LBB19_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB19_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV64ZVE32F-NEXT: vse16.v v10, (a2) ; RV64ZVE32F-NEXT: .LBB19_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB19_6 @@ -1261,11 +1261,11 @@ ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v8, 2 ; RV64ZVE32F-NEXT: vse16.v v11, (a2) ; RV64ZVE32F-NEXT: .LBB19_6: # %else4 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 4 ; RV64ZVE32F-NEXT: bnez a2, .LBB19_13 @@ -1276,16 +1276,16 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB19_10 ; RV64ZVE32F-NEXT: .LBB19_9: # %cond.store9 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV64ZVE32F-NEXT: vse16.v v10, (a2) ; RV64ZVE32F-NEXT: .LBB19_10: # %else10 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB19_15 @@ -1295,22 +1295,22 @@ ; RV64ZVE32F-NEXT: .LBB19_12: # %else14 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB19_13: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV64ZVE32F-NEXT: vse16.v v10, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB19_8 ; RV64ZVE32F-NEXT: .LBB19_14: # %cond.store7 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV64ZVE32F-NEXT: vse16.v v10, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 32 @@ -1320,18 +1320,18 @@ ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV64ZVE32F-NEXT: vse16.v v10, (a2) ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB19_12 ; RV64ZVE32F-NEXT: .LBB19_16: # %cond.store13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v9 ; RV64ZVE32F-NEXT: slli a1, a1, 1 ; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV64ZVE32F-NEXT: vse16.v v8, (a0) ; RV64ZVE32F-NEXT: ret @@ -1344,25 +1344,25 @@ define void @mscatter_baseidx_zext_v8i8_v8i16(<8 x i16> %val, i16* %base, <8 x i8> %idxs, <8 x i1> %m) { ; RV32-LABEL: mscatter_baseidx_zext_v8i8_v8i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vzext.vf4 v10, v9 ; RV32-NEXT: vadd.vv v10, v10, v10 -; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_zext_v8i8_v8i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vzext.vf8 v12, v9 ; RV64-NEXT: vadd.vv v12, v12, v12 -; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret ; ; RV64ZVE32F-LABEL: mscatter_baseidx_zext_v8i8_v8i16: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB20_2 @@ -1371,23 +1371,23 @@ ; RV64ZVE32F-NEXT: andi a2, a2, 255 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vse16.v v8, (a2) ; RV64ZVE32F-NEXT: .LBB20_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB20_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: andi a2, a2, 255 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV64ZVE32F-NEXT: vse16.v v10, (a2) ; RV64ZVE32F-NEXT: .LBB20_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB20_6 @@ -1396,11 +1396,11 @@ ; RV64ZVE32F-NEXT: andi a2, a2, 255 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v8, 2 ; RV64ZVE32F-NEXT: vse16.v v11, (a2) ; RV64ZVE32F-NEXT: .LBB20_6: # %else4 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 4 ; RV64ZVE32F-NEXT: bnez a2, .LBB20_13 @@ -1411,17 +1411,17 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB20_10 ; RV64ZVE32F-NEXT: .LBB20_9: # %cond.store9 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: andi a2, a2, 255 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV64ZVE32F-NEXT: vse16.v v10, (a2) ; RV64ZVE32F-NEXT: .LBB20_10: # %else10 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB20_15 @@ -1431,24 +1431,24 @@ ; RV64ZVE32F-NEXT: .LBB20_12: # %else14 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB20_13: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: andi a2, a2, 255 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV64ZVE32F-NEXT: vse16.v v10, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB20_8 ; RV64ZVE32F-NEXT: .LBB20_14: # %cond.store7 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: andi a2, a2, 255 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV64ZVE32F-NEXT: vse16.v v10, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 32 @@ -1459,19 +1459,19 @@ ; RV64ZVE32F-NEXT: andi a2, a2, 255 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV64ZVE32F-NEXT: vse16.v v10, (a2) ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB20_12 ; RV64ZVE32F-NEXT: .LBB20_16: # %cond.store13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v9 ; RV64ZVE32F-NEXT: andi a1, a1, 255 ; RV64ZVE32F-NEXT: slli a1, a1, 1 ; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV64ZVE32F-NEXT: vse16.v v8, (a0) ; RV64ZVE32F-NEXT: ret @@ -1484,49 +1484,49 @@ define void @mscatter_baseidx_v8i16(<8 x i16> %val, i16* %base, <8 x i16> %idxs, <8 x i1> %m) { ; RV32-LABEL: mscatter_baseidx_v8i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf2 v10, v9 ; RV32-NEXT: vadd.vv v10, v10, v10 -; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_v8i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf4 v12, v9 ; RV64-NEXT: vadd.vv v12, v12, v12 -; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret ; ; RV64ZVE32F-LABEL: mscatter_baseidx_v8i16: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB21_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.store -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vse16.v v8, (a2) ; RV64ZVE32F-NEXT: .LBB21_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB21_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV64ZVE32F-NEXT: vse16.v v10, (a2) ; RV64ZVE32F-NEXT: .LBB21_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB21_6 @@ -1534,11 +1534,11 @@ ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v8, 2 ; RV64ZVE32F-NEXT: vse16.v v11, (a2) ; RV64ZVE32F-NEXT: .LBB21_6: # %else4 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 4 ; RV64ZVE32F-NEXT: bnez a2, .LBB21_13 @@ -1549,16 +1549,16 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB21_10 ; RV64ZVE32F-NEXT: .LBB21_9: # %cond.store9 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV64ZVE32F-NEXT: vse16.v v10, (a2) ; RV64ZVE32F-NEXT: .LBB21_10: # %else10 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB21_15 @@ -1568,22 +1568,22 @@ ; RV64ZVE32F-NEXT: .LBB21_12: # %else14 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB21_13: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV64ZVE32F-NEXT: vse16.v v10, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB21_8 ; RV64ZVE32F-NEXT: .LBB21_14: # %cond.store7 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV64ZVE32F-NEXT: vse16.v v10, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 32 @@ -1593,18 +1593,18 @@ ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV64ZVE32F-NEXT: vse16.v v10, (a2) ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB21_12 ; RV64ZVE32F-NEXT: .LBB21_16: # %cond.store13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v9 ; RV64ZVE32F-NEXT: slli a1, a1, 1 ; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV64ZVE32F-NEXT: vse16.v v8, (a0) ; RV64ZVE32F-NEXT: ret @@ -1618,32 +1618,32 @@ define void @mscatter_v1i32(<1 x i32> %val, <1 x i32*> %ptrs, <1 x i1> %m) { ; RV32V-LABEL: mscatter_v1i32: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; RV32V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV32V-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_v1i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret ; ; RV32ZVE32F-LABEL: mscatter_v1i32: ; RV32ZVE32F: # %bb.0: -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32ZVE32F-NEXT: ret ; ; RV64ZVE32F-LABEL: mscatter_v1i32: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.v.i v9, 0 ; RV64ZVE32F-NEXT: vmerge.vim v9, v9, 1, v0 ; RV64ZVE32F-NEXT: vmv.x.s a1, v9 ; RV64ZVE32F-NEXT: andi a1, a1, 1 ; RV64ZVE32F-NEXT: beqz a1, .LBB22_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.store -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vse32.v v8, (a0) ; RV64ZVE32F-NEXT: .LBB22_2: # %else ; RV64ZVE32F-NEXT: ret @@ -1656,25 +1656,25 @@ define void @mscatter_v2i32(<2 x i32> %val, <2 x i32*> %ptrs, <2 x i1> %m) { ; RV32V-LABEL: mscatter_v2i32: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV32V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV32V-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_v2i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret ; ; RV32ZVE32F-LABEL: mscatter_v2i32: ; RV32ZVE32F: # %bb.0: -; RV32ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32ZVE32F-NEXT: ret ; ; RV64ZVE32F-LABEL: mscatter_v2i32: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v0 ; RV64ZVE32F-NEXT: andi a3, a2, 1 ; RV64ZVE32F-NEXT: bnez a3, .LBB23_3 @@ -1684,12 +1684,12 @@ ; RV64ZVE32F-NEXT: .LBB23_2: # %else2 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB23_3: # %cond.store -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vse32.v v8, (a0) ; RV64ZVE32F-NEXT: andi a0, a2, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB23_2 ; RV64ZVE32F-NEXT: .LBB23_4: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vse32.v v8, (a1) ; RV64ZVE32F-NEXT: ret @@ -1700,14 +1700,14 @@ define void @mscatter_v2i64_truncstore_v2i32(<2 x i64> %val, <2 x i32*> %ptrs, <2 x i1> %m) { ; RV32V-LABEL: mscatter_v2i64_truncstore_v2i32: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV32V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV32V-NEXT: vnsrl.wi v8, v8, 0 ; RV32V-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_v2i64_truncstore_v2i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret @@ -1716,20 +1716,20 @@ ; RV32ZVE32F: # %bb.0: ; RV32ZVE32F-NEXT: lw a1, 0(a0) ; RV32ZVE32F-NEXT: addi a0, a0, 8 -; RV32ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vlse32.v v9, (a0), zero -; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu +; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, ma ; RV32ZVE32F-NEXT: vmv.s.x v9, a1 ; RV32ZVE32F-NEXT: vsoxei32.v v9, (zero), v8, v0.t ; RV32ZVE32F-NEXT: ret ; ; RV64ZVE32F-LABEL: mscatter_v2i64_truncstore_v2i32: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.v.x v8, a1 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, tu, ma ; RV64ZVE32F-NEXT: vmv.s.x v8, a0 -; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a0, v0 ; RV64ZVE32F-NEXT: andi a1, a0, 1 ; RV64ZVE32F-NEXT: bnez a1, .LBB24_3 @@ -1739,12 +1739,12 @@ ; RV64ZVE32F-NEXT: .LBB24_2: # %else2 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB24_3: # %cond.store -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vse32.v v8, (a2) ; RV64ZVE32F-NEXT: andi a0, a0, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB24_2 ; RV64ZVE32F-NEXT: .LBB24_4: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vse32.v v8, (a3) ; RV64ZVE32F-NEXT: ret @@ -1758,13 +1758,13 @@ define void @mscatter_v4i32(<4 x i32> %val, <4 x i32*> %ptrs, <4 x i1> %m) { ; RV32-LABEL: mscatter_v4i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v4i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret ; @@ -1773,7 +1773,7 @@ ; RV64ZVE32F-NEXT: ld a1, 24(a0) ; RV64ZVE32F-NEXT: ld a2, 16(a0) ; RV64ZVE32F-NEXT: ld a4, 8(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a3, v0 ; RV64ZVE32F-NEXT: andi a5, a3, 1 ; RV64ZVE32F-NEXT: bnez a5, .LBB25_5 @@ -1790,24 +1790,24 @@ ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB25_5: # %cond.store ; RV64ZVE32F-NEXT: ld a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vse32.v v8, (a0) ; RV64ZVE32F-NEXT: andi a0, a3, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB25_2 ; RV64ZVE32F-NEXT: .LBB25_6: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vse32.v v9, (a4) ; RV64ZVE32F-NEXT: andi a0, a3, 4 ; RV64ZVE32F-NEXT: beqz a0, .LBB25_3 ; RV64ZVE32F-NEXT: .LBB25_7: # %cond.store3 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV64ZVE32F-NEXT: vse32.v v9, (a2) ; RV64ZVE32F-NEXT: andi a0, a3, 8 ; RV64ZVE32F-NEXT: beqz a0, .LBB25_4 ; RV64ZVE32F-NEXT: .LBB25_8: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 3 ; RV64ZVE32F-NEXT: vse32.v v8, (a1) ; RV64ZVE32F-NEXT: ret @@ -1818,13 +1818,13 @@ define void @mscatter_truemask_v4i32(<4 x i32> %val, <4 x i32*> %ptrs) { ; RV32-LABEL: mscatter_truemask_v4i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_truemask_v4i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v10 ; RV64-NEXT: ret ; @@ -1833,7 +1833,7 @@ ; RV64ZVE32F-NEXT: ld a1, 24(a0) ; RV64ZVE32F-NEXT: ld a2, 16(a0) ; RV64ZVE32F-NEXT: ld a4, 8(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: vmset.m v9 ; RV64ZVE32F-NEXT: vmv.x.s a3, v9 ; RV64ZVE32F-NEXT: beqz zero, .LBB26_5 @@ -1850,24 +1850,24 @@ ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB26_5: # %cond.store ; RV64ZVE32F-NEXT: ld a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vse32.v v8, (a0) ; RV64ZVE32F-NEXT: andi a0, a3, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB26_2 ; RV64ZVE32F-NEXT: .LBB26_6: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vse32.v v9, (a4) ; RV64ZVE32F-NEXT: andi a0, a3, 4 ; RV64ZVE32F-NEXT: beqz a0, .LBB26_3 ; RV64ZVE32F-NEXT: .LBB26_7: # %cond.store3 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV64ZVE32F-NEXT: vse32.v v9, (a2) ; RV64ZVE32F-NEXT: andi a0, a3, 8 ; RV64ZVE32F-NEXT: beqz a0, .LBB26_4 ; RV64ZVE32F-NEXT: .LBB26_8: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 3 ; RV64ZVE32F-NEXT: vse32.v v8, (a1) ; RV64ZVE32F-NEXT: ret @@ -1890,13 +1890,13 @@ define void @mscatter_v8i32(<8 x i32> %val, <8 x i32*> %ptrs, <8 x i1> %m) { ; RV32-LABEL: mscatter_v8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t ; RV64-NEXT: ret ; @@ -1909,7 +1909,7 @@ ; RV64ZVE32F-NEXT: ld a6, 24(a0) ; RV64ZVE32F-NEXT: ld a7, 16(a0) ; RV64ZVE32F-NEXT: ld t0, 8(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a3, v0 ; RV64ZVE32F-NEXT: andi t1, a3, 1 ; RV64ZVE32F-NEXT: bnez t1, .LBB28_9 @@ -1938,48 +1938,48 @@ ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB28_9: # %cond.store ; RV64ZVE32F-NEXT: ld a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vse32.v v8, (a0) ; RV64ZVE32F-NEXT: andi a0, a3, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB28_2 ; RV64ZVE32F-NEXT: .LBB28_10: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV64ZVE32F-NEXT: vse32.v v10, (t0) ; RV64ZVE32F-NEXT: andi a0, a3, 4 ; RV64ZVE32F-NEXT: beqz a0, .LBB28_3 ; RV64ZVE32F-NEXT: .LBB28_11: # %cond.store3 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV64ZVE32F-NEXT: vse32.v v10, (a7) ; RV64ZVE32F-NEXT: andi a0, a3, 8 ; RV64ZVE32F-NEXT: beqz a0, .LBB28_4 ; RV64ZVE32F-NEXT: .LBB28_12: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV64ZVE32F-NEXT: vse32.v v10, (a6) ; RV64ZVE32F-NEXT: andi a0, a3, 16 ; RV64ZVE32F-NEXT: beqz a0, .LBB28_5 ; RV64ZVE32F-NEXT: .LBB28_13: # %cond.store7 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV64ZVE32F-NEXT: vse32.v v10, (a5) ; RV64ZVE32F-NEXT: andi a0, a3, 32 ; RV64ZVE32F-NEXT: beqz a0, .LBB28_6 ; RV64ZVE32F-NEXT: .LBB28_14: # %cond.store9 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV64ZVE32F-NEXT: vse32.v v10, (a4) ; RV64ZVE32F-NEXT: andi a0, a3, 64 ; RV64ZVE32F-NEXT: beqz a0, .LBB28_7 ; RV64ZVE32F-NEXT: .LBB28_15: # %cond.store11 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV64ZVE32F-NEXT: vse32.v v10, (a2) ; RV64ZVE32F-NEXT: andi a0, a3, -128 ; RV64ZVE32F-NEXT: beqz a0, .LBB28_8 ; RV64ZVE32F-NEXT: .LBB28_16: # %cond.store13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV64ZVE32F-NEXT: vse32.v v8, (a1) ; RV64ZVE32F-NEXT: ret @@ -1990,7 +1990,7 @@ define void @mscatter_baseidx_v8i8_v8i32(<8 x i32> %val, i32* %base, <8 x i8> %idxs, <8 x i1> %m) { ; RV32-LABEL: mscatter_baseidx_v8i8_v8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v12, v10 ; RV32-NEXT: vsll.vi v10, v12, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t @@ -1998,16 +1998,16 @@ ; ; RV64-LABEL: mscatter_baseidx_v8i8_v8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v12, v10 ; RV64-NEXT: vsll.vi v12, v12, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret ; ; RV64ZVE32F-LABEL: mscatter_baseidx_v8i8_v8i32: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB29_2 @@ -2015,22 +2015,22 @@ ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vse32.v v8, (a2) ; RV64ZVE32F-NEXT: .LBB29_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB29_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 1 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: .LBB29_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB29_6 @@ -2038,11 +2038,11 @@ ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: .LBB29_6: # %else4 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 4 ; RV64ZVE32F-NEXT: bnez a2, .LBB29_13 @@ -2053,16 +2053,16 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB29_10 ; RV64ZVE32F-NEXT: .LBB29_9: # %cond.store9 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 5 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: .LBB29_10: # %else10 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB29_15 @@ -2072,22 +2072,22 @@ ; RV64ZVE32F-NEXT: .LBB29_12: # %else14 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB29_13: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v11, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 3 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB29_8 ; RV64ZVE32F-NEXT: .LBB29_14: # %cond.store7 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 4 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 32 @@ -2097,18 +2097,18 @@ ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 6 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB29_12 ; RV64ZVE32F-NEXT: .LBB29_16: # %cond.store13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v10 ; RV64ZVE32F-NEXT: slli a1, a1, 2 ; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV64ZVE32F-NEXT: vse32.v v8, (a0) ; RV64ZVE32F-NEXT: ret @@ -2120,7 +2120,7 @@ define void @mscatter_baseidx_sext_v8i8_v8i32(<8 x i32> %val, i32* %base, <8 x i8> %idxs, <8 x i1> %m) { ; RV32-LABEL: mscatter_baseidx_sext_v8i8_v8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v12, v10 ; RV32-NEXT: vsll.vi v10, v12, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t @@ -2128,16 +2128,16 @@ ; ; RV64-LABEL: mscatter_baseidx_sext_v8i8_v8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v12, v10 ; RV64-NEXT: vsll.vi v12, v12, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret ; ; RV64ZVE32F-LABEL: mscatter_baseidx_sext_v8i8_v8i32: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB30_2 @@ -2145,22 +2145,22 @@ ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vse32.v v8, (a2) ; RV64ZVE32F-NEXT: .LBB30_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB30_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 1 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: .LBB30_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB30_6 @@ -2168,11 +2168,11 @@ ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: .LBB30_6: # %else4 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 4 ; RV64ZVE32F-NEXT: bnez a2, .LBB30_13 @@ -2183,16 +2183,16 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB30_10 ; RV64ZVE32F-NEXT: .LBB30_9: # %cond.store9 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 5 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: .LBB30_10: # %else10 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB30_15 @@ -2202,22 +2202,22 @@ ; RV64ZVE32F-NEXT: .LBB30_12: # %else14 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB30_13: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v11, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 3 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB30_8 ; RV64ZVE32F-NEXT: .LBB30_14: # %cond.store7 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 4 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 32 @@ -2227,18 +2227,18 @@ ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 6 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB30_12 ; RV64ZVE32F-NEXT: .LBB30_16: # %cond.store13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v10 ; RV64ZVE32F-NEXT: slli a1, a1, 2 ; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV64ZVE32F-NEXT: vse32.v v8, (a0) ; RV64ZVE32F-NEXT: ret @@ -2251,7 +2251,7 @@ define void @mscatter_baseidx_zext_v8i8_v8i32(<8 x i32> %val, i32* %base, <8 x i8> %idxs, <8 x i1> %m) { ; RV32-LABEL: mscatter_baseidx_zext_v8i8_v8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vzext.vf4 v12, v10 ; RV32-NEXT: vsll.vi v10, v12, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t @@ -2259,16 +2259,16 @@ ; ; RV64-LABEL: mscatter_baseidx_zext_v8i8_v8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vzext.vf8 v12, v10 ; RV64-NEXT: vsll.vi v12, v12, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret ; ; RV64ZVE32F-LABEL: mscatter_baseidx_zext_v8i8_v8i32: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB31_2 @@ -2277,23 +2277,23 @@ ; RV64ZVE32F-NEXT: andi a2, a2, 255 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vse32.v v8, (a2) ; RV64ZVE32F-NEXT: .LBB31_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB31_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: andi a2, a2, 255 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 1 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: .LBB31_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB31_6 @@ -2302,11 +2302,11 @@ ; RV64ZVE32F-NEXT: andi a2, a2, 255 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: .LBB31_6: # %else4 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 4 ; RV64ZVE32F-NEXT: bnez a2, .LBB31_13 @@ -2317,17 +2317,17 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB31_10 ; RV64ZVE32F-NEXT: .LBB31_9: # %cond.store9 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: andi a2, a2, 255 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 5 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: .LBB31_10: # %else10 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB31_15 @@ -2337,24 +2337,24 @@ ; RV64ZVE32F-NEXT: .LBB31_12: # %else14 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB31_13: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v11, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: andi a2, a2, 255 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 3 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB31_8 ; RV64ZVE32F-NEXT: .LBB31_14: # %cond.store7 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: andi a2, a2, 255 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 4 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 32 @@ -2365,19 +2365,19 @@ ; RV64ZVE32F-NEXT: andi a2, a2, 255 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 6 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB31_12 ; RV64ZVE32F-NEXT: .LBB31_16: # %cond.store13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v10 ; RV64ZVE32F-NEXT: andi a1, a1, 255 ; RV64ZVE32F-NEXT: slli a1, a1, 2 ; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV64ZVE32F-NEXT: vse32.v v8, (a0) ; RV64ZVE32F-NEXT: ret @@ -2390,7 +2390,7 @@ define void @mscatter_baseidx_v8i16_v8i32(<8 x i32> %val, i32* %base, <8 x i16> %idxs, <8 x i1> %m) { ; RV32-LABEL: mscatter_baseidx_v8i16_v8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf2 v12, v10 ; RV32-NEXT: vsll.vi v10, v12, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t @@ -2398,40 +2398,40 @@ ; ; RV64-LABEL: mscatter_baseidx_v8i16_v8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf4 v12, v10 ; RV64-NEXT: vsll.vi v12, v12, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret ; ; RV64ZVE32F-LABEL: mscatter_baseidx_v8i16_v8i32: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB32_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.store -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vse32.v v8, (a2) ; RV64ZVE32F-NEXT: .LBB32_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB32_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 1 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: .LBB32_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB32_6 @@ -2439,11 +2439,11 @@ ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: .LBB32_6: # %else4 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 4 ; RV64ZVE32F-NEXT: bnez a2, .LBB32_13 @@ -2454,16 +2454,16 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB32_10 ; RV64ZVE32F-NEXT: .LBB32_9: # %cond.store9 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 5 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: .LBB32_10: # %else10 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB32_15 @@ -2473,22 +2473,22 @@ ; RV64ZVE32F-NEXT: .LBB32_12: # %else14 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB32_13: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v11, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 3 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB32_8 ; RV64ZVE32F-NEXT: .LBB32_14: # %cond.store7 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 4 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 32 @@ -2498,18 +2498,18 @@ ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 6 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB32_12 ; RV64ZVE32F-NEXT: .LBB32_16: # %cond.store13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v10 ; RV64ZVE32F-NEXT: slli a1, a1, 2 ; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV64ZVE32F-NEXT: vse32.v v8, (a0) ; RV64ZVE32F-NEXT: ret @@ -2521,7 +2521,7 @@ define void @mscatter_baseidx_sext_v8i16_v8i32(<8 x i32> %val, i32* %base, <8 x i16> %idxs, <8 x i1> %m) { ; RV32-LABEL: mscatter_baseidx_sext_v8i16_v8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf2 v12, v10 ; RV32-NEXT: vsll.vi v10, v12, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t @@ -2529,40 +2529,40 @@ ; ; RV64-LABEL: mscatter_baseidx_sext_v8i16_v8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf4 v12, v10 ; RV64-NEXT: vsll.vi v12, v12, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret ; ; RV64ZVE32F-LABEL: mscatter_baseidx_sext_v8i16_v8i32: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB33_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.store -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vse32.v v8, (a2) ; RV64ZVE32F-NEXT: .LBB33_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB33_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 1 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: .LBB33_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB33_6 @@ -2570,11 +2570,11 @@ ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: .LBB33_6: # %else4 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 4 ; RV64ZVE32F-NEXT: bnez a2, .LBB33_13 @@ -2585,16 +2585,16 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB33_10 ; RV64ZVE32F-NEXT: .LBB33_9: # %cond.store9 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 5 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: .LBB33_10: # %else10 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB33_15 @@ -2604,22 +2604,22 @@ ; RV64ZVE32F-NEXT: .LBB33_12: # %else14 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB33_13: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v11, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 3 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB33_8 ; RV64ZVE32F-NEXT: .LBB33_14: # %cond.store7 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 4 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 32 @@ -2629,18 +2629,18 @@ ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 6 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB33_12 ; RV64ZVE32F-NEXT: .LBB33_16: # %cond.store13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v10 ; RV64ZVE32F-NEXT: slli a1, a1, 2 ; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV64ZVE32F-NEXT: vse32.v v8, (a0) ; RV64ZVE32F-NEXT: ret @@ -2653,7 +2653,7 @@ define void @mscatter_baseidx_zext_v8i16_v8i32(<8 x i32> %val, i32* %base, <8 x i16> %idxs, <8 x i1> %m) { ; RV32-LABEL: mscatter_baseidx_zext_v8i16_v8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vzext.vf2 v12, v10 ; RV32-NEXT: vsll.vi v10, v12, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t @@ -2661,44 +2661,44 @@ ; ; RV64-LABEL: mscatter_baseidx_zext_v8i16_v8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vzext.vf4 v12, v10 ; RV64-NEXT: vsll.vi v12, v12, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret ; ; RV64ZVE32F-LABEL: mscatter_baseidx_zext_v8i16_v8i32: ; RV64ZVE32F: # %bb.0: ; RV64ZVE32F-NEXT: lui a1, 16 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v0 ; RV64ZVE32F-NEXT: andi a3, a2, 1 ; RV64ZVE32F-NEXT: addiw a1, a1, -1 ; RV64ZVE32F-NEXT: beqz a3, .LBB34_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.store -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a3, v10 ; RV64ZVE32F-NEXT: and a3, a3, a1 ; RV64ZVE32F-NEXT: slli a3, a3, 2 ; RV64ZVE32F-NEXT: add a3, a0, a3 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vse32.v v8, (a3) ; RV64ZVE32F-NEXT: .LBB34_2: # %else ; RV64ZVE32F-NEXT: andi a3, a2, 2 ; RV64ZVE32F-NEXT: beqz a3, .LBB34_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a3, v11 ; RV64ZVE32F-NEXT: and a3, a3, a1 ; RV64ZVE32F-NEXT: slli a3, a3, 2 ; RV64ZVE32F-NEXT: add a3, a0, a3 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 1 ; RV64ZVE32F-NEXT: vse32.v v12, (a3) ; RV64ZVE32F-NEXT: .LBB34_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a3, a2, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 2 ; RV64ZVE32F-NEXT: beqz a3, .LBB34_6 @@ -2707,11 +2707,11 @@ ; RV64ZVE32F-NEXT: and a3, a3, a1 ; RV64ZVE32F-NEXT: slli a3, a3, 2 ; RV64ZVE32F-NEXT: add a3, a0, a3 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2 ; RV64ZVE32F-NEXT: vse32.v v12, (a3) ; RV64ZVE32F-NEXT: .LBB34_6: # %else4 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma ; RV64ZVE32F-NEXT: andi a3, a2, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 4 ; RV64ZVE32F-NEXT: bnez a3, .LBB34_13 @@ -2722,17 +2722,17 @@ ; RV64ZVE32F-NEXT: andi a3, a2, 32 ; RV64ZVE32F-NEXT: beqz a3, .LBB34_10 ; RV64ZVE32F-NEXT: .LBB34_9: # %cond.store9 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a3, v11 ; RV64ZVE32F-NEXT: and a3, a3, a1 ; RV64ZVE32F-NEXT: slli a3, a3, 2 ; RV64ZVE32F-NEXT: add a3, a0, a3 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 5 ; RV64ZVE32F-NEXT: vse32.v v12, (a3) ; RV64ZVE32F-NEXT: .LBB34_10: # %else10 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a3, a2, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2 ; RV64ZVE32F-NEXT: bnez a3, .LBB34_15 @@ -2742,24 +2742,24 @@ ; RV64ZVE32F-NEXT: .LBB34_12: # %else14 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB34_13: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v11, 1 ; RV64ZVE32F-NEXT: vmv.x.s a3, v11 ; RV64ZVE32F-NEXT: and a3, a3, a1 ; RV64ZVE32F-NEXT: slli a3, a3, 2 ; RV64ZVE32F-NEXT: add a3, a0, a3 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 3 ; RV64ZVE32F-NEXT: vse32.v v12, (a3) ; RV64ZVE32F-NEXT: andi a3, a2, 16 ; RV64ZVE32F-NEXT: beqz a3, .LBB34_8 ; RV64ZVE32F-NEXT: .LBB34_14: # %cond.store7 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a3, v10 ; RV64ZVE32F-NEXT: and a3, a3, a1 ; RV64ZVE32F-NEXT: slli a3, a3, 2 ; RV64ZVE32F-NEXT: add a3, a0, a3 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 4 ; RV64ZVE32F-NEXT: vse32.v v12, (a3) ; RV64ZVE32F-NEXT: andi a3, a2, 32 @@ -2770,19 +2770,19 @@ ; RV64ZVE32F-NEXT: and a3, a3, a1 ; RV64ZVE32F-NEXT: slli a3, a3, 2 ; RV64ZVE32F-NEXT: add a3, a0, a3 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 6 ; RV64ZVE32F-NEXT: vse32.v v12, (a3) ; RV64ZVE32F-NEXT: andi a2, a2, -128 ; RV64ZVE32F-NEXT: beqz a2, .LBB34_12 ; RV64ZVE32F-NEXT: .LBB34_16: # %cond.store13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: and a1, a2, a1 ; RV64ZVE32F-NEXT: slli a1, a1, 2 ; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV64ZVE32F-NEXT: vse32.v v8, (a0) ; RV64ZVE32F-NEXT: ret @@ -2795,49 +2795,49 @@ define void @mscatter_baseidx_v8i32(<8 x i32> %val, i32* %base, <8 x i32> %idxs, <8 x i1> %m) { ; RV32-LABEL: mscatter_baseidx_v8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsll.vi v10, v10, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_v8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf2 v12, v10 ; RV64-NEXT: vsll.vi v12, v12, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret ; ; RV64ZVE32F-LABEL: mscatter_baseidx_v8i32: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB35_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.store -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vse32.v v8, (a2) ; RV64ZVE32F-NEXT: .LBB35_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB35_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v12 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 1 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: .LBB35_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v10, 4 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB35_12 @@ -2851,16 +2851,16 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB35_9 ; RV64ZVE32F-NEXT: .LBB35_8: # %cond.store9 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v12, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV64ZVE32F-NEXT: vse32.v v10, (a2) ; RV64ZVE32F-NEXT: .LBB35_9: # %else10 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v12, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB35_15 @@ -2873,28 +2873,28 @@ ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v14, v8, 2 ; RV64ZVE32F-NEXT: vse32.v v14, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: beqz a2, .LBB35_6 ; RV64ZVE32F-NEXT: .LBB35_13: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV64ZVE32F-NEXT: vse32.v v10, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB35_7 ; RV64ZVE32F-NEXT: .LBB35_14: # %cond.store7 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v12 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV64ZVE32F-NEXT: vse32.v v10, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 32 @@ -2904,18 +2904,18 @@ ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 6 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB35_11 ; RV64ZVE32F-NEXT: .LBB35_16: # %cond.store13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v10 ; RV64ZVE32F-NEXT: slli a1, a1, 2 ; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV64ZVE32F-NEXT: vse32.v v8, (a0) ; RV64ZVE32F-NEXT: ret @@ -2929,26 +2929,26 @@ define void @mscatter_v1i64(<1 x i64> %val, <1 x i64*> %ptrs, <1 x i1> %m) { ; RV32V-LABEL: mscatter_v1i64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32V-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_v1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret ; ; RV32ZVE32F-LABEL: mscatter_v1i64: ; RV32ZVE32F: # %bb.0: -; RV32ZVE32F-NEXT: vsetvli a2, zero, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetvli a2, zero, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.v.i v9, 0 ; RV32ZVE32F-NEXT: vmerge.vim v9, v9, 1, v0 ; RV32ZVE32F-NEXT: vmv.x.s a2, v9 ; RV32ZVE32F-NEXT: andi a2, a2, 1 ; RV32ZVE32F-NEXT: beqz a2, .LBB36_2 ; RV32ZVE32F-NEXT: # %bb.1: # %cond.store -; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a2, v8 ; RV32ZVE32F-NEXT: sw a1, 4(a2) ; RV32ZVE32F-NEXT: sw a0, 0(a2) @@ -2957,7 +2957,7 @@ ; ; RV64ZVE32F-LABEL: mscatter_v1i64: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetvli a2, zero, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetvli a2, zero, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.v.i v8, 0 ; RV64ZVE32F-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 @@ -2976,13 +2976,13 @@ define void @mscatter_v2i64(<2 x i64> %val, <2 x i64*> %ptrs, <2 x i1> %m) { ; RV32V-LABEL: mscatter_v2i64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32V-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32V-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret ; @@ -2990,7 +2990,7 @@ ; RV32ZVE32F: # %bb.0: ; RV32ZVE32F-NEXT: lw a2, 12(a0) ; RV32ZVE32F-NEXT: lw a1, 8(a0) -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a3, v0 ; RV32ZVE32F-NEXT: andi a4, a3, 1 ; RV32ZVE32F-NEXT: bnez a4, .LBB37_3 @@ -3002,14 +3002,14 @@ ; RV32ZVE32F-NEXT: .LBB37_3: # %cond.store ; RV32ZVE32F-NEXT: lw a4, 4(a0) ; RV32ZVE32F-NEXT: lw a0, 0(a0) -; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a5, v8 ; RV32ZVE32F-NEXT: sw a4, 4(a5) ; RV32ZVE32F-NEXT: sw a0, 0(a5) ; RV32ZVE32F-NEXT: andi a0, a3, 2 ; RV32ZVE32F-NEXT: beqz a0, .LBB37_2 ; RV32ZVE32F-NEXT: .LBB37_4: # %cond.store1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a0, v8 ; RV32ZVE32F-NEXT: sw a2, 4(a0) @@ -3018,7 +3018,7 @@ ; ; RV64ZVE32F-LABEL: mscatter_v2i64: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a4, v0 ; RV64ZVE32F-NEXT: andi a5, a4, 1 ; RV64ZVE32F-NEXT: bnez a5, .LBB37_3 @@ -3043,13 +3043,13 @@ define void @mscatter_v4i64(<4 x i64> %val, <4 x i64*> %ptrs, <4 x i1> %m) { ; RV32V-LABEL: mscatter_v4i64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32V-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32V-NEXT: vsoxei32.v v8, (zero), v10, v0.t ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret ; @@ -3061,7 +3061,7 @@ ; RV32ZVE32F-NEXT: lw a4, 16(a0) ; RV32ZVE32F-NEXT: lw a7, 12(a0) ; RV32ZVE32F-NEXT: lw a6, 8(a0) -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a5, v0 ; RV32ZVE32F-NEXT: andi t0, a5, 1 ; RV32ZVE32F-NEXT: bnez t0, .LBB38_5 @@ -3079,14 +3079,14 @@ ; RV32ZVE32F-NEXT: .LBB38_5: # %cond.store ; RV32ZVE32F-NEXT: lw t0, 4(a0) ; RV32ZVE32F-NEXT: lw a0, 0(a0) -; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s t1, v8 ; RV32ZVE32F-NEXT: sw t0, 4(t1) ; RV32ZVE32F-NEXT: sw a0, 0(t1) ; RV32ZVE32F-NEXT: andi a0, a5, 2 ; RV32ZVE32F-NEXT: beqz a0, .LBB38_2 ; RV32ZVE32F-NEXT: .LBB38_6: # %cond.store1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a0, v9 ; RV32ZVE32F-NEXT: sw a7, 4(a0) @@ -3094,7 +3094,7 @@ ; RV32ZVE32F-NEXT: andi a0, a5, 4 ; RV32ZVE32F-NEXT: beqz a0, .LBB38_3 ; RV32ZVE32F-NEXT: .LBB38_7: # %cond.store3 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s a0, v9 ; RV32ZVE32F-NEXT: sw a4, 0(a0) @@ -3102,7 +3102,7 @@ ; RV32ZVE32F-NEXT: andi a0, a5, 8 ; RV32ZVE32F-NEXT: beqz a0, .LBB38_4 ; RV32ZVE32F-NEXT: .LBB38_8: # %cond.store5 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s a0, v8 ; RV32ZVE32F-NEXT: sw a2, 0(a0) @@ -3117,7 +3117,7 @@ ; RV64ZVE32F-NEXT: ld a3, 24(a0) ; RV64ZVE32F-NEXT: ld a5, 16(a0) ; RV64ZVE32F-NEXT: ld t0, 8(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a6, v0 ; RV64ZVE32F-NEXT: andi t1, a6, 1 ; RV64ZVE32F-NEXT: bnez t1, .LBB38_5 @@ -3156,13 +3156,13 @@ define void @mscatter_truemask_v4i64(<4 x i64> %val, <4 x i64*> %ptrs) { ; RV32V-LABEL: mscatter_truemask_v4i64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32V-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32V-NEXT: vsoxei32.v v8, (zero), v10 ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_truemask_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v10 ; RV64-NEXT: ret ; @@ -3174,7 +3174,7 @@ ; RV32ZVE32F-NEXT: lw a4, 16(a0) ; RV32ZVE32F-NEXT: lw a7, 12(a0) ; RV32ZVE32F-NEXT: lw a6, 8(a0) -; RV32ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV32ZVE32F-NEXT: vmset.m v9 ; RV32ZVE32F-NEXT: vmv.x.s a5, v9 ; RV32ZVE32F-NEXT: beqz zero, .LBB39_5 @@ -3192,14 +3192,14 @@ ; RV32ZVE32F-NEXT: .LBB39_5: # %cond.store ; RV32ZVE32F-NEXT: lw t0, 4(a0) ; RV32ZVE32F-NEXT: lw a0, 0(a0) -; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s t1, v8 ; RV32ZVE32F-NEXT: sw t0, 4(t1) ; RV32ZVE32F-NEXT: sw a0, 0(t1) ; RV32ZVE32F-NEXT: andi a0, a5, 2 ; RV32ZVE32F-NEXT: beqz a0, .LBB39_2 ; RV32ZVE32F-NEXT: .LBB39_6: # %cond.store1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a0, v9 ; RV32ZVE32F-NEXT: sw a7, 4(a0) @@ -3207,7 +3207,7 @@ ; RV32ZVE32F-NEXT: andi a0, a5, 4 ; RV32ZVE32F-NEXT: beqz a0, .LBB39_3 ; RV32ZVE32F-NEXT: .LBB39_7: # %cond.store3 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s a0, v9 ; RV32ZVE32F-NEXT: sw a4, 0(a0) @@ -3215,7 +3215,7 @@ ; RV32ZVE32F-NEXT: andi a0, a5, 8 ; RV32ZVE32F-NEXT: beqz a0, .LBB39_4 ; RV32ZVE32F-NEXT: .LBB39_8: # %cond.store5 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s a0, v8 ; RV32ZVE32F-NEXT: sw a2, 0(a0) @@ -3230,7 +3230,7 @@ ; RV64ZVE32F-NEXT: ld a3, 24(a0) ; RV64ZVE32F-NEXT: ld a5, 16(a0) ; RV64ZVE32F-NEXT: ld t0, 8(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: vmset.m v8 ; RV64ZVE32F-NEXT: vmv.x.s a6, v8 ; RV64ZVE32F-NEXT: beqz zero, .LBB39_5 @@ -3281,13 +3281,13 @@ define void @mscatter_v8i64(<8 x i64> %val, <8 x i64*> %ptrs, <8 x i1> %m) { ; RV32V-LABEL: mscatter_v8i64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32V-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32V-NEXT: vsoxei32.v v8, (zero), v12, v0.t ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t ; RV64-NEXT: ret ; @@ -3315,7 +3315,7 @@ ; RV32ZVE32F-NEXT: lw t5, 16(a0) ; RV32ZVE32F-NEXT: lw s0, 12(a0) ; RV32ZVE32F-NEXT: lw t6, 8(a0) -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a6, v0 ; RV32ZVE32F-NEXT: andi s1, a6, 1 ; RV32ZVE32F-NEXT: bnez s1, .LBB41_10 @@ -3341,7 +3341,7 @@ ; RV32ZVE32F-NEXT: andi a0, a6, -128 ; RV32ZVE32F-NEXT: beqz a0, .LBB41_9 ; RV32ZVE32F-NEXT: .LBB41_8: # %cond.store13 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV32ZVE32F-NEXT: vmv.x.s a0, v8 ; RV32ZVE32F-NEXT: sw a2, 0(a0) @@ -3355,14 +3355,14 @@ ; RV32ZVE32F-NEXT: .LBB41_10: # %cond.store ; RV32ZVE32F-NEXT: lw s1, 4(a0) ; RV32ZVE32F-NEXT: lw a0, 0(a0) -; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s s2, v8 ; RV32ZVE32F-NEXT: sw s1, 4(s2) ; RV32ZVE32F-NEXT: sw a0, 0(s2) ; RV32ZVE32F-NEXT: andi a0, a6, 2 ; RV32ZVE32F-NEXT: beqz a0, .LBB41_2 ; RV32ZVE32F-NEXT: .LBB41_11: # %cond.store1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw s0, 4(a0) @@ -3370,7 +3370,7 @@ ; RV32ZVE32F-NEXT: andi a0, a6, 4 ; RV32ZVE32F-NEXT: beqz a0, .LBB41_3 ; RV32ZVE32F-NEXT: .LBB41_12: # %cond.store3 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw t5, 0(a0) @@ -3378,7 +3378,7 @@ ; RV32ZVE32F-NEXT: andi a0, a6, 8 ; RV32ZVE32F-NEXT: beqz a0, .LBB41_4 ; RV32ZVE32F-NEXT: .LBB41_13: # %cond.store5 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw t3, 0(a0) @@ -3386,7 +3386,7 @@ ; RV32ZVE32F-NEXT: andi a0, a6, 16 ; RV32ZVE32F-NEXT: beqz a0, .LBB41_5 ; RV32ZVE32F-NEXT: .LBB41_14: # %cond.store7 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw t1, 0(a0) @@ -3394,7 +3394,7 @@ ; RV32ZVE32F-NEXT: andi a0, a6, 32 ; RV32ZVE32F-NEXT: beqz a0, .LBB41_6 ; RV32ZVE32F-NEXT: .LBB41_15: # %cond.store9 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw a7, 0(a0) @@ -3402,7 +3402,7 @@ ; RV32ZVE32F-NEXT: andi a0, a6, 64 ; RV32ZVE32F-NEXT: beqz a0, .LBB41_7 ; RV32ZVE32F-NEXT: .LBB41_16: # %cond.store11 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw a4, 0(a0) @@ -3435,7 +3435,7 @@ ; RV64ZVE32F-NEXT: ld t4, 24(a0) ; RV64ZVE32F-NEXT: ld t6, 16(a0) ; RV64ZVE32F-NEXT: ld s1, 8(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a7, v0 ; RV64ZVE32F-NEXT: andi s2, a7, 1 ; RV64ZVE32F-NEXT: bnez s2, .LBB41_10 @@ -3506,16 +3506,16 @@ define void @mscatter_baseidx_v8i8_v8i64(<8 x i64> %val, i64* %base, <8 x i8> %idxs, <8 x i1> %m) { ; RV32V-LABEL: mscatter_baseidx_v8i8_v8i64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32V-NEXT: vsext.vf4 v14, v12 ; RV32V-NEXT: vsll.vi v12, v14, 3 -; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; RV32V-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_v8i8_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t @@ -3545,11 +3545,11 @@ ; RV32ZVE32F-NEXT: lw t5, 16(a0) ; RV32ZVE32F-NEXT: lw s0, 12(a0) ; RV32ZVE32F-NEXT: lw t6, 8(a0) -; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vsext.vf4 v10, v8 ; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3 ; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1 -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a1, v0 ; RV32ZVE32F-NEXT: andi s1, a1, 1 ; RV32ZVE32F-NEXT: bnez s1, .LBB42_10 @@ -3575,7 +3575,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, -128 ; RV32ZVE32F-NEXT: beqz a0, .LBB42_9 ; RV32ZVE32F-NEXT: .LBB42_8: # %cond.store13 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV32ZVE32F-NEXT: vmv.x.s a0, v8 ; RV32ZVE32F-NEXT: sw a3, 0(a0) @@ -3589,14 +3589,14 @@ ; RV32ZVE32F-NEXT: .LBB42_10: # %cond.store ; RV32ZVE32F-NEXT: lw s1, 4(a0) ; RV32ZVE32F-NEXT: lw a0, 0(a0) -; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s s2, v8 ; RV32ZVE32F-NEXT: sw s1, 4(s2) ; RV32ZVE32F-NEXT: sw a0, 0(s2) ; RV32ZVE32F-NEXT: andi a0, a1, 2 ; RV32ZVE32F-NEXT: beqz a0, .LBB42_2 ; RV32ZVE32F-NEXT: .LBB42_11: # %cond.store1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw s0, 4(a0) @@ -3604,7 +3604,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, 4 ; RV32ZVE32F-NEXT: beqz a0, .LBB42_3 ; RV32ZVE32F-NEXT: .LBB42_12: # %cond.store3 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw t5, 0(a0) @@ -3612,7 +3612,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, 8 ; RV32ZVE32F-NEXT: beqz a0, .LBB42_4 ; RV32ZVE32F-NEXT: .LBB42_13: # %cond.store5 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw t3, 0(a0) @@ -3620,7 +3620,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, 16 ; RV32ZVE32F-NEXT: beqz a0, .LBB42_5 ; RV32ZVE32F-NEXT: .LBB42_14: # %cond.store7 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw t1, 0(a0) @@ -3628,7 +3628,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, 32 ; RV32ZVE32F-NEXT: beqz a0, .LBB42_6 ; RV32ZVE32F-NEXT: .LBB42_15: # %cond.store9 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw a7, 0(a0) @@ -3636,7 +3636,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, 64 ; RV32ZVE32F-NEXT: beqz a0, .LBB42_7 ; RV32ZVE32F-NEXT: .LBB42_16: # %cond.store11 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw a5, 0(a0) @@ -3654,7 +3654,7 @@ ; RV64ZVE32F-NEXT: ld a7, 24(a0) ; RV64ZVE32F-NEXT: ld t0, 16(a0) ; RV64ZVE32F-NEXT: ld t1, 8(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a4, v0 ; RV64ZVE32F-NEXT: andi t2, a4, 1 ; RV64ZVE32F-NEXT: beqz t2, .LBB42_2 @@ -3668,14 +3668,14 @@ ; RV64ZVE32F-NEXT: andi a0, a4, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB42_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a0, v9 ; RV64ZVE32F-NEXT: slli a0, a0, 3 ; RV64ZVE32F-NEXT: add a0, a1, a0 ; RV64ZVE32F-NEXT: sd t1, 0(a0) ; RV64ZVE32F-NEXT: .LBB42_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a0, a4, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB42_6 @@ -3685,7 +3685,7 @@ ; RV64ZVE32F-NEXT: add a0, a1, a0 ; RV64ZVE32F-NEXT: sd t0, 0(a0) ; RV64ZVE32F-NEXT: .LBB42_6: # %else4 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a0, a4, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 4 ; RV64ZVE32F-NEXT: bnez a0, .LBB42_13 @@ -3696,14 +3696,14 @@ ; RV64ZVE32F-NEXT: andi a0, a4, 32 ; RV64ZVE32F-NEXT: beqz a0, .LBB42_10 ; RV64ZVE32F-NEXT: .LBB42_9: # %cond.store9 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a0, v9 ; RV64ZVE32F-NEXT: slli a0, a0, 3 ; RV64ZVE32F-NEXT: add a0, a1, a0 ; RV64ZVE32F-NEXT: sd a5, 0(a0) ; RV64ZVE32F-NEXT: .LBB42_10: # %else10 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a0, a4, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: bnez a0, .LBB42_15 @@ -3713,7 +3713,7 @@ ; RV64ZVE32F-NEXT: .LBB42_12: # %else14 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB42_13: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a0, v9 ; RV64ZVE32F-NEXT: slli a0, a0, 3 @@ -3722,7 +3722,7 @@ ; RV64ZVE32F-NEXT: andi a0, a4, 16 ; RV64ZVE32F-NEXT: beqz a0, .LBB42_8 ; RV64ZVE32F-NEXT: .LBB42_14: # %cond.store7 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a0, v8 ; RV64ZVE32F-NEXT: slli a0, a0, 3 ; RV64ZVE32F-NEXT: add a0, a1, a0 @@ -3738,7 +3738,7 @@ ; RV64ZVE32F-NEXT: andi a0, a4, -128 ; RV64ZVE32F-NEXT: beqz a0, .LBB42_12 ; RV64ZVE32F-NEXT: .LBB42_16: # %cond.store13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a0, v8 ; RV64ZVE32F-NEXT: slli a0, a0, 3 @@ -3753,16 +3753,16 @@ define void @mscatter_baseidx_sext_v8i8_v8i64(<8 x i64> %val, i64* %base, <8 x i8> %idxs, <8 x i1> %m) { ; RV32V-LABEL: mscatter_baseidx_sext_v8i8_v8i64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32V-NEXT: vsext.vf4 v14, v12 ; RV32V-NEXT: vsll.vi v12, v14, 3 -; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; RV32V-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_sext_v8i8_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t @@ -3792,11 +3792,11 @@ ; RV32ZVE32F-NEXT: lw t5, 16(a0) ; RV32ZVE32F-NEXT: lw s0, 12(a0) ; RV32ZVE32F-NEXT: lw t6, 8(a0) -; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vsext.vf4 v10, v8 ; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3 ; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1 -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a1, v0 ; RV32ZVE32F-NEXT: andi s1, a1, 1 ; RV32ZVE32F-NEXT: bnez s1, .LBB43_10 @@ -3822,7 +3822,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, -128 ; RV32ZVE32F-NEXT: beqz a0, .LBB43_9 ; RV32ZVE32F-NEXT: .LBB43_8: # %cond.store13 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV32ZVE32F-NEXT: vmv.x.s a0, v8 ; RV32ZVE32F-NEXT: sw a3, 0(a0) @@ -3836,14 +3836,14 @@ ; RV32ZVE32F-NEXT: .LBB43_10: # %cond.store ; RV32ZVE32F-NEXT: lw s1, 4(a0) ; RV32ZVE32F-NEXT: lw a0, 0(a0) -; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s s2, v8 ; RV32ZVE32F-NEXT: sw s1, 4(s2) ; RV32ZVE32F-NEXT: sw a0, 0(s2) ; RV32ZVE32F-NEXT: andi a0, a1, 2 ; RV32ZVE32F-NEXT: beqz a0, .LBB43_2 ; RV32ZVE32F-NEXT: .LBB43_11: # %cond.store1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw s0, 4(a0) @@ -3851,7 +3851,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, 4 ; RV32ZVE32F-NEXT: beqz a0, .LBB43_3 ; RV32ZVE32F-NEXT: .LBB43_12: # %cond.store3 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw t5, 0(a0) @@ -3859,7 +3859,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, 8 ; RV32ZVE32F-NEXT: beqz a0, .LBB43_4 ; RV32ZVE32F-NEXT: .LBB43_13: # %cond.store5 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw t3, 0(a0) @@ -3867,7 +3867,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, 16 ; RV32ZVE32F-NEXT: beqz a0, .LBB43_5 ; RV32ZVE32F-NEXT: .LBB43_14: # %cond.store7 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw t1, 0(a0) @@ -3875,7 +3875,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, 32 ; RV32ZVE32F-NEXT: beqz a0, .LBB43_6 ; RV32ZVE32F-NEXT: .LBB43_15: # %cond.store9 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw a7, 0(a0) @@ -3883,7 +3883,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, 64 ; RV32ZVE32F-NEXT: beqz a0, .LBB43_7 ; RV32ZVE32F-NEXT: .LBB43_16: # %cond.store11 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw a5, 0(a0) @@ -3901,7 +3901,7 @@ ; RV64ZVE32F-NEXT: ld a7, 24(a0) ; RV64ZVE32F-NEXT: ld t0, 16(a0) ; RV64ZVE32F-NEXT: ld t1, 8(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a4, v0 ; RV64ZVE32F-NEXT: andi t2, a4, 1 ; RV64ZVE32F-NEXT: beqz t2, .LBB43_2 @@ -3915,14 +3915,14 @@ ; RV64ZVE32F-NEXT: andi a0, a4, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB43_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a0, v9 ; RV64ZVE32F-NEXT: slli a0, a0, 3 ; RV64ZVE32F-NEXT: add a0, a1, a0 ; RV64ZVE32F-NEXT: sd t1, 0(a0) ; RV64ZVE32F-NEXT: .LBB43_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a0, a4, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB43_6 @@ -3932,7 +3932,7 @@ ; RV64ZVE32F-NEXT: add a0, a1, a0 ; RV64ZVE32F-NEXT: sd t0, 0(a0) ; RV64ZVE32F-NEXT: .LBB43_6: # %else4 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a0, a4, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 4 ; RV64ZVE32F-NEXT: bnez a0, .LBB43_13 @@ -3943,14 +3943,14 @@ ; RV64ZVE32F-NEXT: andi a0, a4, 32 ; RV64ZVE32F-NEXT: beqz a0, .LBB43_10 ; RV64ZVE32F-NEXT: .LBB43_9: # %cond.store9 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a0, v9 ; RV64ZVE32F-NEXT: slli a0, a0, 3 ; RV64ZVE32F-NEXT: add a0, a1, a0 ; RV64ZVE32F-NEXT: sd a5, 0(a0) ; RV64ZVE32F-NEXT: .LBB43_10: # %else10 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a0, a4, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: bnez a0, .LBB43_15 @@ -3960,7 +3960,7 @@ ; RV64ZVE32F-NEXT: .LBB43_12: # %else14 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB43_13: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a0, v9 ; RV64ZVE32F-NEXT: slli a0, a0, 3 @@ -3969,7 +3969,7 @@ ; RV64ZVE32F-NEXT: andi a0, a4, 16 ; RV64ZVE32F-NEXT: beqz a0, .LBB43_8 ; RV64ZVE32F-NEXT: .LBB43_14: # %cond.store7 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a0, v8 ; RV64ZVE32F-NEXT: slli a0, a0, 3 ; RV64ZVE32F-NEXT: add a0, a1, a0 @@ -3985,7 +3985,7 @@ ; RV64ZVE32F-NEXT: andi a0, a4, -128 ; RV64ZVE32F-NEXT: beqz a0, .LBB43_12 ; RV64ZVE32F-NEXT: .LBB43_16: # %cond.store13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a0, v8 ; RV64ZVE32F-NEXT: slli a0, a0, 3 @@ -4001,16 +4001,16 @@ define void @mscatter_baseidx_zext_v8i8_v8i64(<8 x i64> %val, i64* %base, <8 x i8> %idxs, <8 x i1> %m) { ; RV32V-LABEL: mscatter_baseidx_zext_v8i8_v8i64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32V-NEXT: vzext.vf4 v14, v12 ; RV32V-NEXT: vsll.vi v12, v14, 3 -; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; RV32V-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_zext_v8i8_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vzext.vf8 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t @@ -4040,11 +4040,11 @@ ; RV32ZVE32F-NEXT: lw t5, 16(a0) ; RV32ZVE32F-NEXT: lw s0, 12(a0) ; RV32ZVE32F-NEXT: lw t6, 8(a0) -; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vzext.vf4 v10, v8 ; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3 ; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1 -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a1, v0 ; RV32ZVE32F-NEXT: andi s1, a1, 1 ; RV32ZVE32F-NEXT: bnez s1, .LBB44_10 @@ -4070,7 +4070,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, -128 ; RV32ZVE32F-NEXT: beqz a0, .LBB44_9 ; RV32ZVE32F-NEXT: .LBB44_8: # %cond.store13 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV32ZVE32F-NEXT: vmv.x.s a0, v8 ; RV32ZVE32F-NEXT: sw a3, 0(a0) @@ -4084,14 +4084,14 @@ ; RV32ZVE32F-NEXT: .LBB44_10: # %cond.store ; RV32ZVE32F-NEXT: lw s1, 4(a0) ; RV32ZVE32F-NEXT: lw a0, 0(a0) -; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s s2, v8 ; RV32ZVE32F-NEXT: sw s1, 4(s2) ; RV32ZVE32F-NEXT: sw a0, 0(s2) ; RV32ZVE32F-NEXT: andi a0, a1, 2 ; RV32ZVE32F-NEXT: beqz a0, .LBB44_2 ; RV32ZVE32F-NEXT: .LBB44_11: # %cond.store1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw s0, 4(a0) @@ -4099,7 +4099,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, 4 ; RV32ZVE32F-NEXT: beqz a0, .LBB44_3 ; RV32ZVE32F-NEXT: .LBB44_12: # %cond.store3 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw t5, 0(a0) @@ -4107,7 +4107,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, 8 ; RV32ZVE32F-NEXT: beqz a0, .LBB44_4 ; RV32ZVE32F-NEXT: .LBB44_13: # %cond.store5 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw t3, 0(a0) @@ -4115,7 +4115,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, 16 ; RV32ZVE32F-NEXT: beqz a0, .LBB44_5 ; RV32ZVE32F-NEXT: .LBB44_14: # %cond.store7 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw t1, 0(a0) @@ -4123,7 +4123,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, 32 ; RV32ZVE32F-NEXT: beqz a0, .LBB44_6 ; RV32ZVE32F-NEXT: .LBB44_15: # %cond.store9 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw a7, 0(a0) @@ -4131,7 +4131,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, 64 ; RV32ZVE32F-NEXT: beqz a0, .LBB44_7 ; RV32ZVE32F-NEXT: .LBB44_16: # %cond.store11 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw a5, 0(a0) @@ -4149,7 +4149,7 @@ ; RV64ZVE32F-NEXT: ld a7, 24(a0) ; RV64ZVE32F-NEXT: ld t0, 16(a0) ; RV64ZVE32F-NEXT: ld t1, 8(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a4, v0 ; RV64ZVE32F-NEXT: andi t2, a4, 1 ; RV64ZVE32F-NEXT: beqz t2, .LBB44_2 @@ -4164,7 +4164,7 @@ ; RV64ZVE32F-NEXT: andi a0, a4, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB44_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a0, v9 ; RV64ZVE32F-NEXT: andi a0, a0, 255 @@ -4172,7 +4172,7 @@ ; RV64ZVE32F-NEXT: add a0, a1, a0 ; RV64ZVE32F-NEXT: sd t1, 0(a0) ; RV64ZVE32F-NEXT: .LBB44_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a0, a4, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB44_6 @@ -4183,7 +4183,7 @@ ; RV64ZVE32F-NEXT: add a0, a1, a0 ; RV64ZVE32F-NEXT: sd t0, 0(a0) ; RV64ZVE32F-NEXT: .LBB44_6: # %else4 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a0, a4, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 4 ; RV64ZVE32F-NEXT: bnez a0, .LBB44_13 @@ -4194,7 +4194,7 @@ ; RV64ZVE32F-NEXT: andi a0, a4, 32 ; RV64ZVE32F-NEXT: beqz a0, .LBB44_10 ; RV64ZVE32F-NEXT: .LBB44_9: # %cond.store9 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a0, v9 ; RV64ZVE32F-NEXT: andi a0, a0, 255 @@ -4202,7 +4202,7 @@ ; RV64ZVE32F-NEXT: add a0, a1, a0 ; RV64ZVE32F-NEXT: sd a5, 0(a0) ; RV64ZVE32F-NEXT: .LBB44_10: # %else10 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a0, a4, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: bnez a0, .LBB44_15 @@ -4212,7 +4212,7 @@ ; RV64ZVE32F-NEXT: .LBB44_12: # %else14 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB44_13: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a0, v9 ; RV64ZVE32F-NEXT: andi a0, a0, 255 @@ -4222,7 +4222,7 @@ ; RV64ZVE32F-NEXT: andi a0, a4, 16 ; RV64ZVE32F-NEXT: beqz a0, .LBB44_8 ; RV64ZVE32F-NEXT: .LBB44_14: # %cond.store7 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a0, v8 ; RV64ZVE32F-NEXT: andi a0, a0, 255 ; RV64ZVE32F-NEXT: slli a0, a0, 3 @@ -4240,7 +4240,7 @@ ; RV64ZVE32F-NEXT: andi a0, a4, -128 ; RV64ZVE32F-NEXT: beqz a0, .LBB44_12 ; RV64ZVE32F-NEXT: .LBB44_16: # %cond.store13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a0, v8 ; RV64ZVE32F-NEXT: andi a0, a0, 255 @@ -4257,16 +4257,16 @@ define void @mscatter_baseidx_v8i16_v8i64(<8 x i64> %val, i64* %base, <8 x i16> %idxs, <8 x i1> %m) { ; RV32V-LABEL: mscatter_baseidx_v8i16_v8i64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32V-NEXT: vsext.vf2 v14, v12 ; RV32V-NEXT: vsll.vi v12, v14, 3 -; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; RV32V-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_v8i16_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf4 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t @@ -4296,11 +4296,11 @@ ; RV32ZVE32F-NEXT: lw t5, 16(a0) ; RV32ZVE32F-NEXT: lw s0, 12(a0) ; RV32ZVE32F-NEXT: lw t6, 8(a0) -; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vsext.vf2 v10, v8 ; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3 ; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1 -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a1, v0 ; RV32ZVE32F-NEXT: andi s1, a1, 1 ; RV32ZVE32F-NEXT: bnez s1, .LBB45_10 @@ -4326,7 +4326,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, -128 ; RV32ZVE32F-NEXT: beqz a0, .LBB45_9 ; RV32ZVE32F-NEXT: .LBB45_8: # %cond.store13 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV32ZVE32F-NEXT: vmv.x.s a0, v8 ; RV32ZVE32F-NEXT: sw a3, 0(a0) @@ -4340,14 +4340,14 @@ ; RV32ZVE32F-NEXT: .LBB45_10: # %cond.store ; RV32ZVE32F-NEXT: lw s1, 4(a0) ; RV32ZVE32F-NEXT: lw a0, 0(a0) -; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s s2, v8 ; RV32ZVE32F-NEXT: sw s1, 4(s2) ; RV32ZVE32F-NEXT: sw a0, 0(s2) ; RV32ZVE32F-NEXT: andi a0, a1, 2 ; RV32ZVE32F-NEXT: beqz a0, .LBB45_2 ; RV32ZVE32F-NEXT: .LBB45_11: # %cond.store1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw s0, 4(a0) @@ -4355,7 +4355,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, 4 ; RV32ZVE32F-NEXT: beqz a0, .LBB45_3 ; RV32ZVE32F-NEXT: .LBB45_12: # %cond.store3 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw t5, 0(a0) @@ -4363,7 +4363,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, 8 ; RV32ZVE32F-NEXT: beqz a0, .LBB45_4 ; RV32ZVE32F-NEXT: .LBB45_13: # %cond.store5 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw t3, 0(a0) @@ -4371,7 +4371,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, 16 ; RV32ZVE32F-NEXT: beqz a0, .LBB45_5 ; RV32ZVE32F-NEXT: .LBB45_14: # %cond.store7 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw t1, 0(a0) @@ -4379,7 +4379,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, 32 ; RV32ZVE32F-NEXT: beqz a0, .LBB45_6 ; RV32ZVE32F-NEXT: .LBB45_15: # %cond.store9 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw a7, 0(a0) @@ -4387,7 +4387,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, 64 ; RV32ZVE32F-NEXT: beqz a0, .LBB45_7 ; RV32ZVE32F-NEXT: .LBB45_16: # %cond.store11 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw a5, 0(a0) @@ -4405,13 +4405,13 @@ ; RV64ZVE32F-NEXT: ld a7, 24(a0) ; RV64ZVE32F-NEXT: ld t0, 16(a0) ; RV64ZVE32F-NEXT: ld t1, 8(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a4, v0 ; RV64ZVE32F-NEXT: andi t2, a4, 1 ; RV64ZVE32F-NEXT: beqz t2, .LBB45_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.store ; RV64ZVE32F-NEXT: ld a0, 0(a0) -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s t2, v8 ; RV64ZVE32F-NEXT: slli t2, t2, 3 ; RV64ZVE32F-NEXT: add t2, a1, t2 @@ -4420,14 +4420,14 @@ ; RV64ZVE32F-NEXT: andi a0, a4, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB45_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a0, v9 ; RV64ZVE32F-NEXT: slli a0, a0, 3 ; RV64ZVE32F-NEXT: add a0, a1, a0 ; RV64ZVE32F-NEXT: sd t1, 0(a0) ; RV64ZVE32F-NEXT: .LBB45_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a0, a4, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB45_6 @@ -4437,7 +4437,7 @@ ; RV64ZVE32F-NEXT: add a0, a1, a0 ; RV64ZVE32F-NEXT: sd t0, 0(a0) ; RV64ZVE32F-NEXT: .LBB45_6: # %else4 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma ; RV64ZVE32F-NEXT: andi a0, a4, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 4 ; RV64ZVE32F-NEXT: bnez a0, .LBB45_13 @@ -4448,14 +4448,14 @@ ; RV64ZVE32F-NEXT: andi a0, a4, 32 ; RV64ZVE32F-NEXT: beqz a0, .LBB45_10 ; RV64ZVE32F-NEXT: .LBB45_9: # %cond.store9 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a0, v9 ; RV64ZVE32F-NEXT: slli a0, a0, 3 ; RV64ZVE32F-NEXT: add a0, a1, a0 ; RV64ZVE32F-NEXT: sd a5, 0(a0) ; RV64ZVE32F-NEXT: .LBB45_10: # %else10 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a0, a4, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: bnez a0, .LBB45_15 @@ -4465,7 +4465,7 @@ ; RV64ZVE32F-NEXT: .LBB45_12: # %else14 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB45_13: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a0, v9 ; RV64ZVE32F-NEXT: slli a0, a0, 3 @@ -4474,7 +4474,7 @@ ; RV64ZVE32F-NEXT: andi a0, a4, 16 ; RV64ZVE32F-NEXT: beqz a0, .LBB45_8 ; RV64ZVE32F-NEXT: .LBB45_14: # %cond.store7 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a0, v8 ; RV64ZVE32F-NEXT: slli a0, a0, 3 ; RV64ZVE32F-NEXT: add a0, a1, a0 @@ -4490,7 +4490,7 @@ ; RV64ZVE32F-NEXT: andi a0, a4, -128 ; RV64ZVE32F-NEXT: beqz a0, .LBB45_12 ; RV64ZVE32F-NEXT: .LBB45_16: # %cond.store13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a0, v8 ; RV64ZVE32F-NEXT: slli a0, a0, 3 @@ -4505,16 +4505,16 @@ define void @mscatter_baseidx_sext_v8i16_v8i64(<8 x i64> %val, i64* %base, <8 x i16> %idxs, <8 x i1> %m) { ; RV32V-LABEL: mscatter_baseidx_sext_v8i16_v8i64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32V-NEXT: vsext.vf2 v14, v12 ; RV32V-NEXT: vsll.vi v12, v14, 3 -; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; RV32V-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_sext_v8i16_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf4 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t @@ -4544,11 +4544,11 @@ ; RV32ZVE32F-NEXT: lw t5, 16(a0) ; RV32ZVE32F-NEXT: lw s0, 12(a0) ; RV32ZVE32F-NEXT: lw t6, 8(a0) -; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vsext.vf2 v10, v8 ; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3 ; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1 -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a1, v0 ; RV32ZVE32F-NEXT: andi s1, a1, 1 ; RV32ZVE32F-NEXT: bnez s1, .LBB46_10 @@ -4574,7 +4574,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, -128 ; RV32ZVE32F-NEXT: beqz a0, .LBB46_9 ; RV32ZVE32F-NEXT: .LBB46_8: # %cond.store13 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV32ZVE32F-NEXT: vmv.x.s a0, v8 ; RV32ZVE32F-NEXT: sw a3, 0(a0) @@ -4588,14 +4588,14 @@ ; RV32ZVE32F-NEXT: .LBB46_10: # %cond.store ; RV32ZVE32F-NEXT: lw s1, 4(a0) ; RV32ZVE32F-NEXT: lw a0, 0(a0) -; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s s2, v8 ; RV32ZVE32F-NEXT: sw s1, 4(s2) ; RV32ZVE32F-NEXT: sw a0, 0(s2) ; RV32ZVE32F-NEXT: andi a0, a1, 2 ; RV32ZVE32F-NEXT: beqz a0, .LBB46_2 ; RV32ZVE32F-NEXT: .LBB46_11: # %cond.store1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw s0, 4(a0) @@ -4603,7 +4603,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, 4 ; RV32ZVE32F-NEXT: beqz a0, .LBB46_3 ; RV32ZVE32F-NEXT: .LBB46_12: # %cond.store3 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw t5, 0(a0) @@ -4611,7 +4611,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, 8 ; RV32ZVE32F-NEXT: beqz a0, .LBB46_4 ; RV32ZVE32F-NEXT: .LBB46_13: # %cond.store5 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw t3, 0(a0) @@ -4619,7 +4619,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, 16 ; RV32ZVE32F-NEXT: beqz a0, .LBB46_5 ; RV32ZVE32F-NEXT: .LBB46_14: # %cond.store7 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw t1, 0(a0) @@ -4627,7 +4627,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, 32 ; RV32ZVE32F-NEXT: beqz a0, .LBB46_6 ; RV32ZVE32F-NEXT: .LBB46_15: # %cond.store9 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw a7, 0(a0) @@ -4635,7 +4635,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, 64 ; RV32ZVE32F-NEXT: beqz a0, .LBB46_7 ; RV32ZVE32F-NEXT: .LBB46_16: # %cond.store11 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw a5, 0(a0) @@ -4653,13 +4653,13 @@ ; RV64ZVE32F-NEXT: ld a7, 24(a0) ; RV64ZVE32F-NEXT: ld t0, 16(a0) ; RV64ZVE32F-NEXT: ld t1, 8(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a4, v0 ; RV64ZVE32F-NEXT: andi t2, a4, 1 ; RV64ZVE32F-NEXT: beqz t2, .LBB46_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.store ; RV64ZVE32F-NEXT: ld a0, 0(a0) -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s t2, v8 ; RV64ZVE32F-NEXT: slli t2, t2, 3 ; RV64ZVE32F-NEXT: add t2, a1, t2 @@ -4668,14 +4668,14 @@ ; RV64ZVE32F-NEXT: andi a0, a4, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB46_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a0, v9 ; RV64ZVE32F-NEXT: slli a0, a0, 3 ; RV64ZVE32F-NEXT: add a0, a1, a0 ; RV64ZVE32F-NEXT: sd t1, 0(a0) ; RV64ZVE32F-NEXT: .LBB46_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a0, a4, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB46_6 @@ -4685,7 +4685,7 @@ ; RV64ZVE32F-NEXT: add a0, a1, a0 ; RV64ZVE32F-NEXT: sd t0, 0(a0) ; RV64ZVE32F-NEXT: .LBB46_6: # %else4 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma ; RV64ZVE32F-NEXT: andi a0, a4, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 4 ; RV64ZVE32F-NEXT: bnez a0, .LBB46_13 @@ -4696,14 +4696,14 @@ ; RV64ZVE32F-NEXT: andi a0, a4, 32 ; RV64ZVE32F-NEXT: beqz a0, .LBB46_10 ; RV64ZVE32F-NEXT: .LBB46_9: # %cond.store9 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a0, v9 ; RV64ZVE32F-NEXT: slli a0, a0, 3 ; RV64ZVE32F-NEXT: add a0, a1, a0 ; RV64ZVE32F-NEXT: sd a5, 0(a0) ; RV64ZVE32F-NEXT: .LBB46_10: # %else10 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a0, a4, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: bnez a0, .LBB46_15 @@ -4713,7 +4713,7 @@ ; RV64ZVE32F-NEXT: .LBB46_12: # %else14 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB46_13: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a0, v9 ; RV64ZVE32F-NEXT: slli a0, a0, 3 @@ -4722,7 +4722,7 @@ ; RV64ZVE32F-NEXT: andi a0, a4, 16 ; RV64ZVE32F-NEXT: beqz a0, .LBB46_8 ; RV64ZVE32F-NEXT: .LBB46_14: # %cond.store7 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a0, v8 ; RV64ZVE32F-NEXT: slli a0, a0, 3 ; RV64ZVE32F-NEXT: add a0, a1, a0 @@ -4738,7 +4738,7 @@ ; RV64ZVE32F-NEXT: andi a0, a4, -128 ; RV64ZVE32F-NEXT: beqz a0, .LBB46_12 ; RV64ZVE32F-NEXT: .LBB46_16: # %cond.store13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a0, v8 ; RV64ZVE32F-NEXT: slli a0, a0, 3 @@ -4754,16 +4754,16 @@ define void @mscatter_baseidx_zext_v8i16_v8i64(<8 x i64> %val, i64* %base, <8 x i16> %idxs, <8 x i1> %m) { ; RV32V-LABEL: mscatter_baseidx_zext_v8i16_v8i64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32V-NEXT: vzext.vf2 v14, v12 ; RV32V-NEXT: vsll.vi v12, v14, 3 -; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; RV32V-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_zext_v8i16_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vzext.vf4 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t @@ -4793,11 +4793,11 @@ ; RV32ZVE32F-NEXT: lw t5, 16(a0) ; RV32ZVE32F-NEXT: lw s0, 12(a0) ; RV32ZVE32F-NEXT: lw t6, 8(a0) -; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vzext.vf2 v10, v8 ; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3 ; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1 -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a1, v0 ; RV32ZVE32F-NEXT: andi s1, a1, 1 ; RV32ZVE32F-NEXT: bnez s1, .LBB47_10 @@ -4823,7 +4823,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, -128 ; RV32ZVE32F-NEXT: beqz a0, .LBB47_9 ; RV32ZVE32F-NEXT: .LBB47_8: # %cond.store13 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV32ZVE32F-NEXT: vmv.x.s a0, v8 ; RV32ZVE32F-NEXT: sw a3, 0(a0) @@ -4837,14 +4837,14 @@ ; RV32ZVE32F-NEXT: .LBB47_10: # %cond.store ; RV32ZVE32F-NEXT: lw s1, 4(a0) ; RV32ZVE32F-NEXT: lw a0, 0(a0) -; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s s2, v8 ; RV32ZVE32F-NEXT: sw s1, 4(s2) ; RV32ZVE32F-NEXT: sw a0, 0(s2) ; RV32ZVE32F-NEXT: andi a0, a1, 2 ; RV32ZVE32F-NEXT: beqz a0, .LBB47_2 ; RV32ZVE32F-NEXT: .LBB47_11: # %cond.store1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw s0, 4(a0) @@ -4852,7 +4852,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, 4 ; RV32ZVE32F-NEXT: beqz a0, .LBB47_3 ; RV32ZVE32F-NEXT: .LBB47_12: # %cond.store3 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw t5, 0(a0) @@ -4860,7 +4860,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, 8 ; RV32ZVE32F-NEXT: beqz a0, .LBB47_4 ; RV32ZVE32F-NEXT: .LBB47_13: # %cond.store5 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw t3, 0(a0) @@ -4868,7 +4868,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, 16 ; RV32ZVE32F-NEXT: beqz a0, .LBB47_5 ; RV32ZVE32F-NEXT: .LBB47_14: # %cond.store7 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw t1, 0(a0) @@ -4876,7 +4876,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, 32 ; RV32ZVE32F-NEXT: beqz a0, .LBB47_6 ; RV32ZVE32F-NEXT: .LBB47_15: # %cond.store9 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw a7, 0(a0) @@ -4884,7 +4884,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, 64 ; RV32ZVE32F-NEXT: beqz a0, .LBB47_7 ; RV32ZVE32F-NEXT: .LBB47_16: # %cond.store11 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw a5, 0(a0) @@ -4903,14 +4903,14 @@ ; RV64ZVE32F-NEXT: ld t1, 16(a0) ; RV64ZVE32F-NEXT: ld t2, 8(a0) ; RV64ZVE32F-NEXT: lui a4, 16 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a5, v0 ; RV64ZVE32F-NEXT: andi t3, a5, 1 ; RV64ZVE32F-NEXT: addiw a4, a4, -1 ; RV64ZVE32F-NEXT: beqz t3, .LBB47_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.store ; RV64ZVE32F-NEXT: ld a0, 0(a0) -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s t3, v8 ; RV64ZVE32F-NEXT: and t3, t3, a4 ; RV64ZVE32F-NEXT: slli t3, t3, 3 @@ -4920,7 +4920,7 @@ ; RV64ZVE32F-NEXT: andi a0, a5, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB47_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a0, v9 ; RV64ZVE32F-NEXT: and a0, a0, a4 @@ -4928,7 +4928,7 @@ ; RV64ZVE32F-NEXT: add a0, a1, a0 ; RV64ZVE32F-NEXT: sd t2, 0(a0) ; RV64ZVE32F-NEXT: .LBB47_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a0, a5, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB47_6 @@ -4939,7 +4939,7 @@ ; RV64ZVE32F-NEXT: add a0, a1, a0 ; RV64ZVE32F-NEXT: sd t1, 0(a0) ; RV64ZVE32F-NEXT: .LBB47_6: # %else4 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma ; RV64ZVE32F-NEXT: andi a0, a5, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 4 ; RV64ZVE32F-NEXT: bnez a0, .LBB47_13 @@ -4950,7 +4950,7 @@ ; RV64ZVE32F-NEXT: andi a0, a5, 32 ; RV64ZVE32F-NEXT: beqz a0, .LBB47_10 ; RV64ZVE32F-NEXT: .LBB47_9: # %cond.store9 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a0, v9 ; RV64ZVE32F-NEXT: and a0, a0, a4 @@ -4958,7 +4958,7 @@ ; RV64ZVE32F-NEXT: add a0, a1, a0 ; RV64ZVE32F-NEXT: sd a6, 0(a0) ; RV64ZVE32F-NEXT: .LBB47_10: # %else10 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a0, a5, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: bnez a0, .LBB47_15 @@ -4968,7 +4968,7 @@ ; RV64ZVE32F-NEXT: .LBB47_12: # %else14 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB47_13: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a0, v9 ; RV64ZVE32F-NEXT: and a0, a0, a4 @@ -4978,7 +4978,7 @@ ; RV64ZVE32F-NEXT: andi a0, a5, 16 ; RV64ZVE32F-NEXT: beqz a0, .LBB47_8 ; RV64ZVE32F-NEXT: .LBB47_14: # %cond.store7 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a0, v8 ; RV64ZVE32F-NEXT: and a0, a0, a4 ; RV64ZVE32F-NEXT: slli a0, a0, 3 @@ -4996,7 +4996,7 @@ ; RV64ZVE32F-NEXT: andi a0, a5, -128 ; RV64ZVE32F-NEXT: beqz a0, .LBB47_12 ; RV64ZVE32F-NEXT: .LBB47_16: # %cond.store13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a0, v8 ; RV64ZVE32F-NEXT: and a0, a0, a4 @@ -5013,15 +5013,15 @@ define void @mscatter_baseidx_v8i32_v8i64(<8 x i64> %val, i64* %base, <8 x i32> %idxs, <8 x i1> %m) { ; RV32V-LABEL: mscatter_baseidx_v8i32_v8i64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32V-NEXT: vsll.vi v12, v12, 3 -; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; RV32V-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_v8i32_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf2 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t @@ -5051,10 +5051,10 @@ ; RV32ZVE32F-NEXT: lw t5, 16(a0) ; RV32ZVE32F-NEXT: lw s0, 12(a0) ; RV32ZVE32F-NEXT: lw t6, 8(a0) -; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3 ; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1 -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a1, v0 ; RV32ZVE32F-NEXT: andi s1, a1, 1 ; RV32ZVE32F-NEXT: bnez s1, .LBB48_10 @@ -5080,7 +5080,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, -128 ; RV32ZVE32F-NEXT: beqz a0, .LBB48_9 ; RV32ZVE32F-NEXT: .LBB48_8: # %cond.store13 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV32ZVE32F-NEXT: vmv.x.s a0, v8 ; RV32ZVE32F-NEXT: sw a3, 0(a0) @@ -5094,14 +5094,14 @@ ; RV32ZVE32F-NEXT: .LBB48_10: # %cond.store ; RV32ZVE32F-NEXT: lw s1, 4(a0) ; RV32ZVE32F-NEXT: lw a0, 0(a0) -; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s s2, v8 ; RV32ZVE32F-NEXT: sw s1, 4(s2) ; RV32ZVE32F-NEXT: sw a0, 0(s2) ; RV32ZVE32F-NEXT: andi a0, a1, 2 ; RV32ZVE32F-NEXT: beqz a0, .LBB48_2 ; RV32ZVE32F-NEXT: .LBB48_11: # %cond.store1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw s0, 4(a0) @@ -5109,7 +5109,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, 4 ; RV32ZVE32F-NEXT: beqz a0, .LBB48_3 ; RV32ZVE32F-NEXT: .LBB48_12: # %cond.store3 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw t5, 0(a0) @@ -5117,7 +5117,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, 8 ; RV32ZVE32F-NEXT: beqz a0, .LBB48_4 ; RV32ZVE32F-NEXT: .LBB48_13: # %cond.store5 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw t3, 0(a0) @@ -5125,7 +5125,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, 16 ; RV32ZVE32F-NEXT: beqz a0, .LBB48_5 ; RV32ZVE32F-NEXT: .LBB48_14: # %cond.store7 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw t1, 0(a0) @@ -5133,7 +5133,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, 32 ; RV32ZVE32F-NEXT: beqz a0, .LBB48_6 ; RV32ZVE32F-NEXT: .LBB48_15: # %cond.store9 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw a7, 0(a0) @@ -5141,7 +5141,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, 64 ; RV32ZVE32F-NEXT: beqz a0, .LBB48_7 ; RV32ZVE32F-NEXT: .LBB48_16: # %cond.store11 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw a5, 0(a0) @@ -5159,13 +5159,13 @@ ; RV64ZVE32F-NEXT: ld a7, 24(a0) ; RV64ZVE32F-NEXT: ld t0, 16(a0) ; RV64ZVE32F-NEXT: ld t1, 8(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a4, v0 ; RV64ZVE32F-NEXT: andi t2, a4, 1 ; RV64ZVE32F-NEXT: beqz t2, .LBB48_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.store ; RV64ZVE32F-NEXT: ld a0, 0(a0) -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s t2, v8 ; RV64ZVE32F-NEXT: slli t2, t2, 3 ; RV64ZVE32F-NEXT: add t2, a1, t2 @@ -5174,16 +5174,16 @@ ; RV64ZVE32F-NEXT: andi a0, a4, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB48_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a0, v10 ; RV64ZVE32F-NEXT: slli a0, a0, 3 ; RV64ZVE32F-NEXT: add a0, a1, a0 ; RV64ZVE32F-NEXT: sd t1, 0(a0) ; RV64ZVE32F-NEXT: .LBB48_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV64ZVE32F-NEXT: andi a0, a4, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: bnez a0, .LBB48_12 @@ -5197,14 +5197,14 @@ ; RV64ZVE32F-NEXT: andi a0, a4, 32 ; RV64ZVE32F-NEXT: beqz a0, .LBB48_9 ; RV64ZVE32F-NEXT: .LBB48_8: # %cond.store9 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a0, v8 ; RV64ZVE32F-NEXT: slli a0, a0, 3 ; RV64ZVE32F-NEXT: add a0, a1, a0 ; RV64ZVE32F-NEXT: sd a5, 0(a0) ; RV64ZVE32F-NEXT: .LBB48_9: # %else10 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV64ZVE32F-NEXT: andi a0, a4, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 2 ; RV64ZVE32F-NEXT: bnez a0, .LBB48_15 @@ -5221,7 +5221,7 @@ ; RV64ZVE32F-NEXT: andi a0, a4, 8 ; RV64ZVE32F-NEXT: beqz a0, .LBB48_6 ; RV64ZVE32F-NEXT: .LBB48_13: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a0, v8 ; RV64ZVE32F-NEXT: slli a0, a0, 3 @@ -5230,7 +5230,7 @@ ; RV64ZVE32F-NEXT: andi a0, a4, 16 ; RV64ZVE32F-NEXT: beqz a0, .LBB48_7 ; RV64ZVE32F-NEXT: .LBB48_14: # %cond.store7 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a0, v10 ; RV64ZVE32F-NEXT: slli a0, a0, 3 ; RV64ZVE32F-NEXT: add a0, a1, a0 @@ -5246,7 +5246,7 @@ ; RV64ZVE32F-NEXT: andi a0, a4, -128 ; RV64ZVE32F-NEXT: beqz a0, .LBB48_11 ; RV64ZVE32F-NEXT: .LBB48_16: # %cond.store13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a0, v8 ; RV64ZVE32F-NEXT: slli a0, a0, 3 @@ -5261,15 +5261,15 @@ define void @mscatter_baseidx_sext_v8i32_v8i64(<8 x i64> %val, i64* %base, <8 x i32> %idxs, <8 x i1> %m) { ; RV32V-LABEL: mscatter_baseidx_sext_v8i32_v8i64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32V-NEXT: vsll.vi v12, v12, 3 -; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; RV32V-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_sext_v8i32_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf2 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t @@ -5299,10 +5299,10 @@ ; RV32ZVE32F-NEXT: lw t5, 16(a0) ; RV32ZVE32F-NEXT: lw s0, 12(a0) ; RV32ZVE32F-NEXT: lw t6, 8(a0) -; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3 ; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1 -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a1, v0 ; RV32ZVE32F-NEXT: andi s1, a1, 1 ; RV32ZVE32F-NEXT: bnez s1, .LBB49_10 @@ -5328,7 +5328,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, -128 ; RV32ZVE32F-NEXT: beqz a0, .LBB49_9 ; RV32ZVE32F-NEXT: .LBB49_8: # %cond.store13 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV32ZVE32F-NEXT: vmv.x.s a0, v8 ; RV32ZVE32F-NEXT: sw a3, 0(a0) @@ -5342,14 +5342,14 @@ ; RV32ZVE32F-NEXT: .LBB49_10: # %cond.store ; RV32ZVE32F-NEXT: lw s1, 4(a0) ; RV32ZVE32F-NEXT: lw a0, 0(a0) -; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s s2, v8 ; RV32ZVE32F-NEXT: sw s1, 4(s2) ; RV32ZVE32F-NEXT: sw a0, 0(s2) ; RV32ZVE32F-NEXT: andi a0, a1, 2 ; RV32ZVE32F-NEXT: beqz a0, .LBB49_2 ; RV32ZVE32F-NEXT: .LBB49_11: # %cond.store1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw s0, 4(a0) @@ -5357,7 +5357,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, 4 ; RV32ZVE32F-NEXT: beqz a0, .LBB49_3 ; RV32ZVE32F-NEXT: .LBB49_12: # %cond.store3 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw t5, 0(a0) @@ -5365,7 +5365,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, 8 ; RV32ZVE32F-NEXT: beqz a0, .LBB49_4 ; RV32ZVE32F-NEXT: .LBB49_13: # %cond.store5 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw t3, 0(a0) @@ -5373,7 +5373,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, 16 ; RV32ZVE32F-NEXT: beqz a0, .LBB49_5 ; RV32ZVE32F-NEXT: .LBB49_14: # %cond.store7 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw t1, 0(a0) @@ -5381,7 +5381,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, 32 ; RV32ZVE32F-NEXT: beqz a0, .LBB49_6 ; RV32ZVE32F-NEXT: .LBB49_15: # %cond.store9 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw a7, 0(a0) @@ -5389,7 +5389,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, 64 ; RV32ZVE32F-NEXT: beqz a0, .LBB49_7 ; RV32ZVE32F-NEXT: .LBB49_16: # %cond.store11 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw a5, 0(a0) @@ -5407,13 +5407,13 @@ ; RV64ZVE32F-NEXT: ld a7, 24(a0) ; RV64ZVE32F-NEXT: ld t0, 16(a0) ; RV64ZVE32F-NEXT: ld t1, 8(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a4, v0 ; RV64ZVE32F-NEXT: andi t2, a4, 1 ; RV64ZVE32F-NEXT: beqz t2, .LBB49_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.store ; RV64ZVE32F-NEXT: ld a0, 0(a0) -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s t2, v8 ; RV64ZVE32F-NEXT: slli t2, t2, 3 ; RV64ZVE32F-NEXT: add t2, a1, t2 @@ -5422,16 +5422,16 @@ ; RV64ZVE32F-NEXT: andi a0, a4, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB49_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a0, v10 ; RV64ZVE32F-NEXT: slli a0, a0, 3 ; RV64ZVE32F-NEXT: add a0, a1, a0 ; RV64ZVE32F-NEXT: sd t1, 0(a0) ; RV64ZVE32F-NEXT: .LBB49_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV64ZVE32F-NEXT: andi a0, a4, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: bnez a0, .LBB49_12 @@ -5445,14 +5445,14 @@ ; RV64ZVE32F-NEXT: andi a0, a4, 32 ; RV64ZVE32F-NEXT: beqz a0, .LBB49_9 ; RV64ZVE32F-NEXT: .LBB49_8: # %cond.store9 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a0, v8 ; RV64ZVE32F-NEXT: slli a0, a0, 3 ; RV64ZVE32F-NEXT: add a0, a1, a0 ; RV64ZVE32F-NEXT: sd a5, 0(a0) ; RV64ZVE32F-NEXT: .LBB49_9: # %else10 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV64ZVE32F-NEXT: andi a0, a4, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 2 ; RV64ZVE32F-NEXT: bnez a0, .LBB49_15 @@ -5469,7 +5469,7 @@ ; RV64ZVE32F-NEXT: andi a0, a4, 8 ; RV64ZVE32F-NEXT: beqz a0, .LBB49_6 ; RV64ZVE32F-NEXT: .LBB49_13: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a0, v8 ; RV64ZVE32F-NEXT: slli a0, a0, 3 @@ -5478,7 +5478,7 @@ ; RV64ZVE32F-NEXT: andi a0, a4, 16 ; RV64ZVE32F-NEXT: beqz a0, .LBB49_7 ; RV64ZVE32F-NEXT: .LBB49_14: # %cond.store7 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a0, v10 ; RV64ZVE32F-NEXT: slli a0, a0, 3 ; RV64ZVE32F-NEXT: add a0, a1, a0 @@ -5494,7 +5494,7 @@ ; RV64ZVE32F-NEXT: andi a0, a4, -128 ; RV64ZVE32F-NEXT: beqz a0, .LBB49_11 ; RV64ZVE32F-NEXT: .LBB49_16: # %cond.store13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a0, v8 ; RV64ZVE32F-NEXT: slli a0, a0, 3 @@ -5510,15 +5510,15 @@ define void @mscatter_baseidx_zext_v8i32_v8i64(<8 x i64> %val, i64* %base, <8 x i32> %idxs, <8 x i1> %m) { ; RV32V-LABEL: mscatter_baseidx_zext_v8i32_v8i64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32V-NEXT: vsll.vi v12, v12, 3 -; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; RV32V-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_zext_v8i32_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vzext.vf2 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t @@ -5548,10 +5548,10 @@ ; RV32ZVE32F-NEXT: lw t5, 16(a0) ; RV32ZVE32F-NEXT: lw s0, 12(a0) ; RV32ZVE32F-NEXT: lw t6, 8(a0) -; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3 ; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1 -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a1, v0 ; RV32ZVE32F-NEXT: andi s1, a1, 1 ; RV32ZVE32F-NEXT: bnez s1, .LBB50_10 @@ -5577,7 +5577,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, -128 ; RV32ZVE32F-NEXT: beqz a0, .LBB50_9 ; RV32ZVE32F-NEXT: .LBB50_8: # %cond.store13 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV32ZVE32F-NEXT: vmv.x.s a0, v8 ; RV32ZVE32F-NEXT: sw a3, 0(a0) @@ -5591,14 +5591,14 @@ ; RV32ZVE32F-NEXT: .LBB50_10: # %cond.store ; RV32ZVE32F-NEXT: lw s1, 4(a0) ; RV32ZVE32F-NEXT: lw a0, 0(a0) -; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s s2, v8 ; RV32ZVE32F-NEXT: sw s1, 4(s2) ; RV32ZVE32F-NEXT: sw a0, 0(s2) ; RV32ZVE32F-NEXT: andi a0, a1, 2 ; RV32ZVE32F-NEXT: beqz a0, .LBB50_2 ; RV32ZVE32F-NEXT: .LBB50_11: # %cond.store1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw s0, 4(a0) @@ -5606,7 +5606,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, 4 ; RV32ZVE32F-NEXT: beqz a0, .LBB50_3 ; RV32ZVE32F-NEXT: .LBB50_12: # %cond.store3 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw t5, 0(a0) @@ -5614,7 +5614,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, 8 ; RV32ZVE32F-NEXT: beqz a0, .LBB50_4 ; RV32ZVE32F-NEXT: .LBB50_13: # %cond.store5 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw t3, 0(a0) @@ -5622,7 +5622,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, 16 ; RV32ZVE32F-NEXT: beqz a0, .LBB50_5 ; RV32ZVE32F-NEXT: .LBB50_14: # %cond.store7 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw t1, 0(a0) @@ -5630,7 +5630,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, 32 ; RV32ZVE32F-NEXT: beqz a0, .LBB50_6 ; RV32ZVE32F-NEXT: .LBB50_15: # %cond.store9 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw a7, 0(a0) @@ -5638,7 +5638,7 @@ ; RV32ZVE32F-NEXT: andi a0, a1, 64 ; RV32ZVE32F-NEXT: beqz a0, .LBB50_7 ; RV32ZVE32F-NEXT: .LBB50_16: # %cond.store11 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV32ZVE32F-NEXT: vmv.x.s a0, v10 ; RV32ZVE32F-NEXT: sw a5, 0(a0) @@ -5656,13 +5656,13 @@ ; RV64ZVE32F-NEXT: ld a7, 24(a0) ; RV64ZVE32F-NEXT: ld t0, 16(a0) ; RV64ZVE32F-NEXT: ld t1, 8(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a4, v0 ; RV64ZVE32F-NEXT: andi t2, a4, 1 ; RV64ZVE32F-NEXT: beqz t2, .LBB50_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.store ; RV64ZVE32F-NEXT: ld a0, 0(a0) -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s t2, v8 ; RV64ZVE32F-NEXT: slli t2, t2, 32 ; RV64ZVE32F-NEXT: srli t2, t2, 29 @@ -5672,7 +5672,7 @@ ; RV64ZVE32F-NEXT: andi a0, a4, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB50_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a0, v10 ; RV64ZVE32F-NEXT: slli a0, a0, 32 @@ -5680,9 +5680,9 @@ ; RV64ZVE32F-NEXT: add a0, a1, a0 ; RV64ZVE32F-NEXT: sd t1, 0(a0) ; RV64ZVE32F-NEXT: .LBB50_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV64ZVE32F-NEXT: andi a0, a4, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: bnez a0, .LBB50_12 @@ -5696,7 +5696,7 @@ ; RV64ZVE32F-NEXT: andi a0, a4, 32 ; RV64ZVE32F-NEXT: beqz a0, .LBB50_9 ; RV64ZVE32F-NEXT: .LBB50_8: # %cond.store9 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a0, v8 ; RV64ZVE32F-NEXT: slli a0, a0, 32 @@ -5704,7 +5704,7 @@ ; RV64ZVE32F-NEXT: add a0, a1, a0 ; RV64ZVE32F-NEXT: sd a5, 0(a0) ; RV64ZVE32F-NEXT: .LBB50_9: # %else10 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV64ZVE32F-NEXT: andi a0, a4, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 2 ; RV64ZVE32F-NEXT: bnez a0, .LBB50_15 @@ -5722,7 +5722,7 @@ ; RV64ZVE32F-NEXT: andi a0, a4, 8 ; RV64ZVE32F-NEXT: beqz a0, .LBB50_6 ; RV64ZVE32F-NEXT: .LBB50_13: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a0, v8 ; RV64ZVE32F-NEXT: slli a0, a0, 32 @@ -5732,7 +5732,7 @@ ; RV64ZVE32F-NEXT: andi a0, a4, 16 ; RV64ZVE32F-NEXT: beqz a0, .LBB50_7 ; RV64ZVE32F-NEXT: .LBB50_14: # %cond.store7 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a0, v10 ; RV64ZVE32F-NEXT: slli a0, a0, 32 ; RV64ZVE32F-NEXT: srli a0, a0, 29 @@ -5750,7 +5750,7 @@ ; RV64ZVE32F-NEXT: andi a0, a4, -128 ; RV64ZVE32F-NEXT: beqz a0, .LBB50_11 ; RV64ZVE32F-NEXT: .LBB50_16: # %cond.store13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a0, v8 ; RV64ZVE32F-NEXT: slli a0, a0, 32 @@ -5767,16 +5767,16 @@ define void @mscatter_baseidx_v8i64(<8 x i64> %val, i64* %base, <8 x i64> %idxs, <8 x i1> %m) { ; RV32V-LABEL: mscatter_baseidx_v8i64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32V-NEXT: vnsrl.wi v16, v12, 0 ; RV32V-NEXT: vsll.vi v12, v16, 3 -; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; RV32V-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsll.vi v12, v12, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret @@ -5845,11 +5845,11 @@ ; RV32ZVE32F-NEXT: sw s6, 4(sp) ; RV32ZVE32F-NEXT: sw a0, 0(sp) ; RV32ZVE32F-NEXT: mv a0, sp -; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vle32.v v8, (a0) ; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3 ; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1 -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a0, v0 ; RV32ZVE32F-NEXT: andi a1, a0, 1 ; RV32ZVE32F-NEXT: bnez a1, .LBB51_10 @@ -5875,7 +5875,7 @@ ; RV32ZVE32F-NEXT: andi a0, a0, -128 ; RV32ZVE32F-NEXT: beqz a0, .LBB51_9 ; RV32ZVE32F-NEXT: .LBB51_8: # %cond.store13 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV32ZVE32F-NEXT: vmv.x.s a0, v8 ; RV32ZVE32F-NEXT: sw a4, 0(a0) @@ -5897,14 +5897,14 @@ ; RV32ZVE32F-NEXT: addi sp, sp, 96 ; RV32ZVE32F-NEXT: ret ; RV32ZVE32F-NEXT: .LBB51_10: # %cond.store -; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a1, v8 ; RV32ZVE32F-NEXT: sw s5, 4(a1) ; RV32ZVE32F-NEXT: sw s4, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 2 ; RV32ZVE32F-NEXT: beqz a1, .LBB51_2 ; RV32ZVE32F-NEXT: .LBB51_11: # %cond.store1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: sw s3, 4(a1) @@ -5912,7 +5912,7 @@ ; RV32ZVE32F-NEXT: andi a1, a0, 4 ; RV32ZVE32F-NEXT: beqz a1, .LBB51_3 ; RV32ZVE32F-NEXT: .LBB51_12: # %cond.store3 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: sw t6, 0(a1) @@ -5920,7 +5920,7 @@ ; RV32ZVE32F-NEXT: andi a1, a0, 8 ; RV32ZVE32F-NEXT: beqz a1, .LBB51_4 ; RV32ZVE32F-NEXT: .LBB51_13: # %cond.store5 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: sw t4, 0(a1) @@ -5928,7 +5928,7 @@ ; RV32ZVE32F-NEXT: andi a1, a0, 16 ; RV32ZVE32F-NEXT: beqz a1, .LBB51_5 ; RV32ZVE32F-NEXT: .LBB51_14: # %cond.store7 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: sw t2, 0(a1) @@ -5936,7 +5936,7 @@ ; RV32ZVE32F-NEXT: andi a1, a0, 32 ; RV32ZVE32F-NEXT: beqz a1, .LBB51_6 ; RV32ZVE32F-NEXT: .LBB51_15: # %cond.store9 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: sw t0, 0(a1) @@ -5944,7 +5944,7 @@ ; RV32ZVE32F-NEXT: andi a1, a0, 64 ; RV32ZVE32F-NEXT: beqz a1, .LBB51_7 ; RV32ZVE32F-NEXT: .LBB51_16: # %cond.store11 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: sw a6, 0(a1) @@ -5979,7 +5979,7 @@ ; RV64ZVE32F-NEXT: ld t2, 40(a2) ; RV64ZVE32F-NEXT: ld t0, 48(a2) ; RV64ZVE32F-NEXT: ld a5, 56(a2) -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a7, v0 ; RV64ZVE32F-NEXT: andi s3, a7, 1 ; RV64ZVE32F-NEXT: bnez s3, .LBB51_10 @@ -6070,32 +6070,32 @@ define void @mscatter_v1f16(<1 x half> %val, <1 x half*> %ptrs, <1 x i1> %m) { ; RV32V-LABEL: mscatter_v1f16: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; RV32V-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; RV32V-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_v1f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret ; ; RV32ZVE32F-LABEL: mscatter_v1f16: ; RV32ZVE32F: # %bb.0: -; RV32ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV32ZVE32F-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32ZVE32F-NEXT: ret ; ; RV64ZVE32F-LABEL: mscatter_v1f16: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.v.i v9, 0 ; RV64ZVE32F-NEXT: vmerge.vim v9, v9, 1, v0 ; RV64ZVE32F-NEXT: vmv.x.s a1, v9 ; RV64ZVE32F-NEXT: andi a1, a1, 1 ; RV64ZVE32F-NEXT: beqz a1, .LBB52_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.store -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vse16.v v8, (a0) ; RV64ZVE32F-NEXT: .LBB52_2: # %else ; RV64ZVE32F-NEXT: ret @@ -6108,25 +6108,25 @@ define void @mscatter_v2f16(<2 x half> %val, <2 x half*> %ptrs, <2 x i1> %m) { ; RV32V-LABEL: mscatter_v2f16: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; RV32V-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; RV32V-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_v2f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret ; ; RV32ZVE32F-LABEL: mscatter_v2f16: ; RV32ZVE32F: # %bb.0: -; RV32ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV32ZVE32F-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32ZVE32F-NEXT: ret ; ; RV64ZVE32F-LABEL: mscatter_v2f16: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v0 ; RV64ZVE32F-NEXT: andi a3, a2, 1 ; RV64ZVE32F-NEXT: bnez a3, .LBB53_3 @@ -6136,12 +6136,12 @@ ; RV64ZVE32F-NEXT: .LBB53_2: # %else2 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB53_3: # %cond.store -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vse16.v v8, (a0) ; RV64ZVE32F-NEXT: andi a0, a2, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB53_2 ; RV64ZVE32F-NEXT: .LBB53_4: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vse16.v v8, (a1) ; RV64ZVE32F-NEXT: ret @@ -6154,13 +6154,13 @@ define void @mscatter_v4f16(<4 x half> %val, <4 x half*> %ptrs, <4 x i1> %m) { ; RV32-LABEL: mscatter_v4f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v4f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret ; @@ -6169,7 +6169,7 @@ ; RV64ZVE32F-NEXT: ld a1, 24(a0) ; RV64ZVE32F-NEXT: ld a2, 16(a0) ; RV64ZVE32F-NEXT: ld a4, 8(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a3, v0 ; RV64ZVE32F-NEXT: andi a5, a3, 1 ; RV64ZVE32F-NEXT: bnez a5, .LBB54_5 @@ -6186,24 +6186,24 @@ ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB54_5: # %cond.store ; RV64ZVE32F-NEXT: ld a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vse16.v v8, (a0) ; RV64ZVE32F-NEXT: andi a0, a3, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB54_2 ; RV64ZVE32F-NEXT: .LBB54_6: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vse16.v v9, (a4) ; RV64ZVE32F-NEXT: andi a0, a3, 4 ; RV64ZVE32F-NEXT: beqz a0, .LBB54_3 ; RV64ZVE32F-NEXT: .LBB54_7: # %cond.store3 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV64ZVE32F-NEXT: vse16.v v9, (a2) ; RV64ZVE32F-NEXT: andi a0, a3, 8 ; RV64ZVE32F-NEXT: beqz a0, .LBB54_4 ; RV64ZVE32F-NEXT: .LBB54_8: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 3 ; RV64ZVE32F-NEXT: vse16.v v8, (a1) ; RV64ZVE32F-NEXT: ret @@ -6214,13 +6214,13 @@ define void @mscatter_truemask_v4f16(<4 x half> %val, <4 x half*> %ptrs) { ; RV32-LABEL: mscatter_truemask_v4f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_truemask_v4f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v10 ; RV64-NEXT: ret ; @@ -6229,7 +6229,7 @@ ; RV64ZVE32F-NEXT: ld a1, 24(a0) ; RV64ZVE32F-NEXT: ld a2, 16(a0) ; RV64ZVE32F-NEXT: ld a4, 8(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: vmset.m v9 ; RV64ZVE32F-NEXT: vmv.x.s a3, v9 ; RV64ZVE32F-NEXT: beqz zero, .LBB55_5 @@ -6246,24 +6246,24 @@ ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB55_5: # %cond.store ; RV64ZVE32F-NEXT: ld a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vse16.v v8, (a0) ; RV64ZVE32F-NEXT: andi a0, a3, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB55_2 ; RV64ZVE32F-NEXT: .LBB55_6: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vse16.v v9, (a4) ; RV64ZVE32F-NEXT: andi a0, a3, 4 ; RV64ZVE32F-NEXT: beqz a0, .LBB55_3 ; RV64ZVE32F-NEXT: .LBB55_7: # %cond.store3 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV64ZVE32F-NEXT: vse16.v v9, (a2) ; RV64ZVE32F-NEXT: andi a0, a3, 8 ; RV64ZVE32F-NEXT: beqz a0, .LBB55_4 ; RV64ZVE32F-NEXT: .LBB55_8: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 3 ; RV64ZVE32F-NEXT: vse16.v v8, (a1) ; RV64ZVE32F-NEXT: ret @@ -6286,13 +6286,13 @@ define void @mscatter_v8f16(<8 x half> %val, <8 x half*> %ptrs, <8 x i1> %m) { ; RV32-LABEL: mscatter_v8f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v8f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t ; RV64-NEXT: ret ; @@ -6305,7 +6305,7 @@ ; RV64ZVE32F-NEXT: ld a6, 24(a0) ; RV64ZVE32F-NEXT: ld a7, 16(a0) ; RV64ZVE32F-NEXT: ld t0, 8(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a3, v0 ; RV64ZVE32F-NEXT: andi t1, a3, 1 ; RV64ZVE32F-NEXT: bnez t1, .LBB57_9 @@ -6334,48 +6334,48 @@ ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB57_9: # %cond.store ; RV64ZVE32F-NEXT: ld a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vse16.v v8, (a0) ; RV64ZVE32F-NEXT: andi a0, a3, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB57_2 ; RV64ZVE32F-NEXT: .LBB57_10: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vse16.v v9, (t0) ; RV64ZVE32F-NEXT: andi a0, a3, 4 ; RV64ZVE32F-NEXT: beqz a0, .LBB57_3 ; RV64ZVE32F-NEXT: .LBB57_11: # %cond.store3 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV64ZVE32F-NEXT: vse16.v v9, (a7) ; RV64ZVE32F-NEXT: andi a0, a3, 8 ; RV64ZVE32F-NEXT: beqz a0, .LBB57_4 ; RV64ZVE32F-NEXT: .LBB57_12: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 3 ; RV64ZVE32F-NEXT: vse16.v v9, (a6) ; RV64ZVE32F-NEXT: andi a0, a3, 16 ; RV64ZVE32F-NEXT: beqz a0, .LBB57_5 ; RV64ZVE32F-NEXT: .LBB57_13: # %cond.store7 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4 ; RV64ZVE32F-NEXT: vse16.v v9, (a5) ; RV64ZVE32F-NEXT: andi a0, a3, 32 ; RV64ZVE32F-NEXT: beqz a0, .LBB57_6 ; RV64ZVE32F-NEXT: .LBB57_14: # %cond.store9 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 5 ; RV64ZVE32F-NEXT: vse16.v v9, (a4) ; RV64ZVE32F-NEXT: andi a0, a3, 64 ; RV64ZVE32F-NEXT: beqz a0, .LBB57_7 ; RV64ZVE32F-NEXT: .LBB57_15: # %cond.store11 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 6 ; RV64ZVE32F-NEXT: vse16.v v9, (a2) ; RV64ZVE32F-NEXT: andi a0, a3, -128 ; RV64ZVE32F-NEXT: beqz a0, .LBB57_8 ; RV64ZVE32F-NEXT: .LBB57_16: # %cond.store13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV64ZVE32F-NEXT: vse16.v v8, (a1) ; RV64ZVE32F-NEXT: ret @@ -6386,25 +6386,25 @@ define void @mscatter_baseidx_v8i8_v8f16(<8 x half> %val, half* %base, <8 x i8> %idxs, <8 x i1> %m) { ; RV32-LABEL: mscatter_baseidx_v8i8_v8f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v10, v9 ; RV32-NEXT: vadd.vv v10, v10, v10 -; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_v8i8_v8f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v12, v9 ; RV64-NEXT: vadd.vv v12, v12, v12 -; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret ; ; RV64ZVE32F-LABEL: mscatter_baseidx_v8i8_v8f16: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB58_2 @@ -6412,22 +6412,22 @@ ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vse16.v v8, (a2) ; RV64ZVE32F-NEXT: .LBB58_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB58_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV64ZVE32F-NEXT: vse16.v v10, (a2) ; RV64ZVE32F-NEXT: .LBB58_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB58_6 @@ -6435,11 +6435,11 @@ ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v8, 2 ; RV64ZVE32F-NEXT: vse16.v v11, (a2) ; RV64ZVE32F-NEXT: .LBB58_6: # %else4 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 4 ; RV64ZVE32F-NEXT: bnez a2, .LBB58_13 @@ -6450,16 +6450,16 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB58_10 ; RV64ZVE32F-NEXT: .LBB58_9: # %cond.store9 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV64ZVE32F-NEXT: vse16.v v10, (a2) ; RV64ZVE32F-NEXT: .LBB58_10: # %else10 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB58_15 @@ -6469,22 +6469,22 @@ ; RV64ZVE32F-NEXT: .LBB58_12: # %else14 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB58_13: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV64ZVE32F-NEXT: vse16.v v10, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB58_8 ; RV64ZVE32F-NEXT: .LBB58_14: # %cond.store7 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV64ZVE32F-NEXT: vse16.v v10, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 32 @@ -6494,18 +6494,18 @@ ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV64ZVE32F-NEXT: vse16.v v10, (a2) ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB58_12 ; RV64ZVE32F-NEXT: .LBB58_16: # %cond.store13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v9 ; RV64ZVE32F-NEXT: slli a1, a1, 1 ; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV64ZVE32F-NEXT: vse16.v v8, (a0) ; RV64ZVE32F-NEXT: ret @@ -6517,25 +6517,25 @@ define void @mscatter_baseidx_sext_v8i8_v8f16(<8 x half> %val, half* %base, <8 x i8> %idxs, <8 x i1> %m) { ; RV32-LABEL: mscatter_baseidx_sext_v8i8_v8f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v10, v9 ; RV32-NEXT: vadd.vv v10, v10, v10 -; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_sext_v8i8_v8f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v12, v9 ; RV64-NEXT: vadd.vv v12, v12, v12 -; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret ; ; RV64ZVE32F-LABEL: mscatter_baseidx_sext_v8i8_v8f16: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB59_2 @@ -6543,22 +6543,22 @@ ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vse16.v v8, (a2) ; RV64ZVE32F-NEXT: .LBB59_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB59_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV64ZVE32F-NEXT: vse16.v v10, (a2) ; RV64ZVE32F-NEXT: .LBB59_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB59_6 @@ -6566,11 +6566,11 @@ ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v8, 2 ; RV64ZVE32F-NEXT: vse16.v v11, (a2) ; RV64ZVE32F-NEXT: .LBB59_6: # %else4 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 4 ; RV64ZVE32F-NEXT: bnez a2, .LBB59_13 @@ -6581,16 +6581,16 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB59_10 ; RV64ZVE32F-NEXT: .LBB59_9: # %cond.store9 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV64ZVE32F-NEXT: vse16.v v10, (a2) ; RV64ZVE32F-NEXT: .LBB59_10: # %else10 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB59_15 @@ -6600,22 +6600,22 @@ ; RV64ZVE32F-NEXT: .LBB59_12: # %else14 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB59_13: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV64ZVE32F-NEXT: vse16.v v10, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB59_8 ; RV64ZVE32F-NEXT: .LBB59_14: # %cond.store7 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV64ZVE32F-NEXT: vse16.v v10, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 32 @@ -6625,18 +6625,18 @@ ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV64ZVE32F-NEXT: vse16.v v10, (a2) ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB59_12 ; RV64ZVE32F-NEXT: .LBB59_16: # %cond.store13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v9 ; RV64ZVE32F-NEXT: slli a1, a1, 1 ; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV64ZVE32F-NEXT: vse16.v v8, (a0) ; RV64ZVE32F-NEXT: ret @@ -6649,25 +6649,25 @@ define void @mscatter_baseidx_zext_v8i8_v8f16(<8 x half> %val, half* %base, <8 x i8> %idxs, <8 x i1> %m) { ; RV32-LABEL: mscatter_baseidx_zext_v8i8_v8f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vzext.vf4 v10, v9 ; RV32-NEXT: vadd.vv v10, v10, v10 -; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_zext_v8i8_v8f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vzext.vf8 v12, v9 ; RV64-NEXT: vadd.vv v12, v12, v12 -; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret ; ; RV64ZVE32F-LABEL: mscatter_baseidx_zext_v8i8_v8f16: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB60_2 @@ -6676,23 +6676,23 @@ ; RV64ZVE32F-NEXT: andi a2, a2, 255 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vse16.v v8, (a2) ; RV64ZVE32F-NEXT: .LBB60_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB60_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: andi a2, a2, 255 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV64ZVE32F-NEXT: vse16.v v10, (a2) ; RV64ZVE32F-NEXT: .LBB60_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB60_6 @@ -6701,11 +6701,11 @@ ; RV64ZVE32F-NEXT: andi a2, a2, 255 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v8, 2 ; RV64ZVE32F-NEXT: vse16.v v11, (a2) ; RV64ZVE32F-NEXT: .LBB60_6: # %else4 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 4 ; RV64ZVE32F-NEXT: bnez a2, .LBB60_13 @@ -6716,17 +6716,17 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB60_10 ; RV64ZVE32F-NEXT: .LBB60_9: # %cond.store9 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: andi a2, a2, 255 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV64ZVE32F-NEXT: vse16.v v10, (a2) ; RV64ZVE32F-NEXT: .LBB60_10: # %else10 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB60_15 @@ -6736,24 +6736,24 @@ ; RV64ZVE32F-NEXT: .LBB60_12: # %else14 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB60_13: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: andi a2, a2, 255 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV64ZVE32F-NEXT: vse16.v v10, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB60_8 ; RV64ZVE32F-NEXT: .LBB60_14: # %cond.store7 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: andi a2, a2, 255 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV64ZVE32F-NEXT: vse16.v v10, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 32 @@ -6764,19 +6764,19 @@ ; RV64ZVE32F-NEXT: andi a2, a2, 255 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV64ZVE32F-NEXT: vse16.v v10, (a2) ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB60_12 ; RV64ZVE32F-NEXT: .LBB60_16: # %cond.store13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v9 ; RV64ZVE32F-NEXT: andi a1, a1, 255 ; RV64ZVE32F-NEXT: slli a1, a1, 1 ; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV64ZVE32F-NEXT: vse16.v v8, (a0) ; RV64ZVE32F-NEXT: ret @@ -6789,49 +6789,49 @@ define void @mscatter_baseidx_v8f16(<8 x half> %val, half* %base, <8 x i16> %idxs, <8 x i1> %m) { ; RV32-LABEL: mscatter_baseidx_v8f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf2 v10, v9 ; RV32-NEXT: vadd.vv v10, v10, v10 -; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_v8f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf4 v12, v9 ; RV64-NEXT: vadd.vv v12, v12, v12 -; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret ; ; RV64ZVE32F-LABEL: mscatter_baseidx_v8f16: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB61_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.store -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vse16.v v8, (a2) ; RV64ZVE32F-NEXT: .LBB61_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB61_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV64ZVE32F-NEXT: vse16.v v10, (a2) ; RV64ZVE32F-NEXT: .LBB61_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB61_6 @@ -6839,11 +6839,11 @@ ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v8, 2 ; RV64ZVE32F-NEXT: vse16.v v11, (a2) ; RV64ZVE32F-NEXT: .LBB61_6: # %else4 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 4 ; RV64ZVE32F-NEXT: bnez a2, .LBB61_13 @@ -6854,16 +6854,16 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB61_10 ; RV64ZVE32F-NEXT: .LBB61_9: # %cond.store9 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV64ZVE32F-NEXT: vse16.v v10, (a2) ; RV64ZVE32F-NEXT: .LBB61_10: # %else10 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB61_15 @@ -6873,22 +6873,22 @@ ; RV64ZVE32F-NEXT: .LBB61_12: # %else14 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB61_13: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV64ZVE32F-NEXT: vse16.v v10, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB61_8 ; RV64ZVE32F-NEXT: .LBB61_14: # %cond.store7 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV64ZVE32F-NEXT: vse16.v v10, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 32 @@ -6898,18 +6898,18 @@ ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 1 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV64ZVE32F-NEXT: vse16.v v10, (a2) ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB61_12 ; RV64ZVE32F-NEXT: .LBB61_16: # %cond.store13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v9 ; RV64ZVE32F-NEXT: slli a1, a1, 1 ; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV64ZVE32F-NEXT: vse16.v v8, (a0) ; RV64ZVE32F-NEXT: ret @@ -6923,32 +6923,32 @@ define void @mscatter_v1f32(<1 x float> %val, <1 x float*> %ptrs, <1 x i1> %m) { ; RV32V-LABEL: mscatter_v1f32: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; RV32V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV32V-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_v1f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret ; ; RV32ZVE32F-LABEL: mscatter_v1f32: ; RV32ZVE32F: # %bb.0: -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32ZVE32F-NEXT: ret ; ; RV64ZVE32F-LABEL: mscatter_v1f32: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.v.i v9, 0 ; RV64ZVE32F-NEXT: vmerge.vim v9, v9, 1, v0 ; RV64ZVE32F-NEXT: vmv.x.s a1, v9 ; RV64ZVE32F-NEXT: andi a1, a1, 1 ; RV64ZVE32F-NEXT: beqz a1, .LBB62_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.store -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vse32.v v8, (a0) ; RV64ZVE32F-NEXT: .LBB62_2: # %else ; RV64ZVE32F-NEXT: ret @@ -6961,25 +6961,25 @@ define void @mscatter_v2f32(<2 x float> %val, <2 x float*> %ptrs, <2 x i1> %m) { ; RV32V-LABEL: mscatter_v2f32: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV32V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV32V-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_v2f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret ; ; RV32ZVE32F-LABEL: mscatter_v2f32: ; RV32ZVE32F: # %bb.0: -; RV32ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32ZVE32F-NEXT: ret ; ; RV64ZVE32F-LABEL: mscatter_v2f32: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v0 ; RV64ZVE32F-NEXT: andi a3, a2, 1 ; RV64ZVE32F-NEXT: bnez a3, .LBB63_3 @@ -6989,12 +6989,12 @@ ; RV64ZVE32F-NEXT: .LBB63_2: # %else2 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB63_3: # %cond.store -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vse32.v v8, (a0) ; RV64ZVE32F-NEXT: andi a0, a2, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB63_2 ; RV64ZVE32F-NEXT: .LBB63_4: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vse32.v v8, (a1) ; RV64ZVE32F-NEXT: ret @@ -7007,13 +7007,13 @@ define void @mscatter_v4f32(<4 x float> %val, <4 x float*> %ptrs, <4 x i1> %m) { ; RV32-LABEL: mscatter_v4f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v4f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret ; @@ -7022,7 +7022,7 @@ ; RV64ZVE32F-NEXT: ld a1, 24(a0) ; RV64ZVE32F-NEXT: ld a2, 16(a0) ; RV64ZVE32F-NEXT: ld a4, 8(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a3, v0 ; RV64ZVE32F-NEXT: andi a5, a3, 1 ; RV64ZVE32F-NEXT: bnez a5, .LBB64_5 @@ -7039,24 +7039,24 @@ ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB64_5: # %cond.store ; RV64ZVE32F-NEXT: ld a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vse32.v v8, (a0) ; RV64ZVE32F-NEXT: andi a0, a3, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB64_2 ; RV64ZVE32F-NEXT: .LBB64_6: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vse32.v v9, (a4) ; RV64ZVE32F-NEXT: andi a0, a3, 4 ; RV64ZVE32F-NEXT: beqz a0, .LBB64_3 ; RV64ZVE32F-NEXT: .LBB64_7: # %cond.store3 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV64ZVE32F-NEXT: vse32.v v9, (a2) ; RV64ZVE32F-NEXT: andi a0, a3, 8 ; RV64ZVE32F-NEXT: beqz a0, .LBB64_4 ; RV64ZVE32F-NEXT: .LBB64_8: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 3 ; RV64ZVE32F-NEXT: vse32.v v8, (a1) ; RV64ZVE32F-NEXT: ret @@ -7067,13 +7067,13 @@ define void @mscatter_truemask_v4f32(<4 x float> %val, <4 x float*> %ptrs) { ; RV32-LABEL: mscatter_truemask_v4f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_truemask_v4f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v10 ; RV64-NEXT: ret ; @@ -7082,7 +7082,7 @@ ; RV64ZVE32F-NEXT: ld a1, 24(a0) ; RV64ZVE32F-NEXT: ld a2, 16(a0) ; RV64ZVE32F-NEXT: ld a4, 8(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: vmset.m v9 ; RV64ZVE32F-NEXT: vmv.x.s a3, v9 ; RV64ZVE32F-NEXT: beqz zero, .LBB65_5 @@ -7099,24 +7099,24 @@ ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB65_5: # %cond.store ; RV64ZVE32F-NEXT: ld a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vse32.v v8, (a0) ; RV64ZVE32F-NEXT: andi a0, a3, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB65_2 ; RV64ZVE32F-NEXT: .LBB65_6: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vse32.v v9, (a4) ; RV64ZVE32F-NEXT: andi a0, a3, 4 ; RV64ZVE32F-NEXT: beqz a0, .LBB65_3 ; RV64ZVE32F-NEXT: .LBB65_7: # %cond.store3 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV64ZVE32F-NEXT: vse32.v v9, (a2) ; RV64ZVE32F-NEXT: andi a0, a3, 8 ; RV64ZVE32F-NEXT: beqz a0, .LBB65_4 ; RV64ZVE32F-NEXT: .LBB65_8: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 3 ; RV64ZVE32F-NEXT: vse32.v v8, (a1) ; RV64ZVE32F-NEXT: ret @@ -7139,13 +7139,13 @@ define void @mscatter_v8f32(<8 x float> %val, <8 x float*> %ptrs, <8 x i1> %m) { ; RV32-LABEL: mscatter_v8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_v8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t ; RV64-NEXT: ret ; @@ -7158,7 +7158,7 @@ ; RV64ZVE32F-NEXT: ld a6, 24(a0) ; RV64ZVE32F-NEXT: ld a7, 16(a0) ; RV64ZVE32F-NEXT: ld t0, 8(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a3, v0 ; RV64ZVE32F-NEXT: andi t1, a3, 1 ; RV64ZVE32F-NEXT: bnez t1, .LBB67_9 @@ -7187,48 +7187,48 @@ ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB67_9: # %cond.store ; RV64ZVE32F-NEXT: ld a0, 0(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vse32.v v8, (a0) ; RV64ZVE32F-NEXT: andi a0, a3, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB67_2 ; RV64ZVE32F-NEXT: .LBB67_10: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV64ZVE32F-NEXT: vse32.v v10, (t0) ; RV64ZVE32F-NEXT: andi a0, a3, 4 ; RV64ZVE32F-NEXT: beqz a0, .LBB67_3 ; RV64ZVE32F-NEXT: .LBB67_11: # %cond.store3 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV64ZVE32F-NEXT: vse32.v v10, (a7) ; RV64ZVE32F-NEXT: andi a0, a3, 8 ; RV64ZVE32F-NEXT: beqz a0, .LBB67_4 ; RV64ZVE32F-NEXT: .LBB67_12: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV64ZVE32F-NEXT: vse32.v v10, (a6) ; RV64ZVE32F-NEXT: andi a0, a3, 16 ; RV64ZVE32F-NEXT: beqz a0, .LBB67_5 ; RV64ZVE32F-NEXT: .LBB67_13: # %cond.store7 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV64ZVE32F-NEXT: vse32.v v10, (a5) ; RV64ZVE32F-NEXT: andi a0, a3, 32 ; RV64ZVE32F-NEXT: beqz a0, .LBB67_6 ; RV64ZVE32F-NEXT: .LBB67_14: # %cond.store9 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV64ZVE32F-NEXT: vse32.v v10, (a4) ; RV64ZVE32F-NEXT: andi a0, a3, 64 ; RV64ZVE32F-NEXT: beqz a0, .LBB67_7 ; RV64ZVE32F-NEXT: .LBB67_15: # %cond.store11 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV64ZVE32F-NEXT: vse32.v v10, (a2) ; RV64ZVE32F-NEXT: andi a0, a3, -128 ; RV64ZVE32F-NEXT: beqz a0, .LBB67_8 ; RV64ZVE32F-NEXT: .LBB67_16: # %cond.store13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV64ZVE32F-NEXT: vse32.v v8, (a1) ; RV64ZVE32F-NEXT: ret @@ -7239,7 +7239,7 @@ define void @mscatter_baseidx_v8i8_v8f32(<8 x float> %val, float* %base, <8 x i8> %idxs, <8 x i1> %m) { ; RV32-LABEL: mscatter_baseidx_v8i8_v8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v12, v10 ; RV32-NEXT: vsll.vi v10, v12, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t @@ -7247,16 +7247,16 @@ ; ; RV64-LABEL: mscatter_baseidx_v8i8_v8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v12, v10 ; RV64-NEXT: vsll.vi v12, v12, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret ; ; RV64ZVE32F-LABEL: mscatter_baseidx_v8i8_v8f32: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB68_2 @@ -7264,22 +7264,22 @@ ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vse32.v v8, (a2) ; RV64ZVE32F-NEXT: .LBB68_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB68_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 1 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: .LBB68_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB68_6 @@ -7287,11 +7287,11 @@ ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: .LBB68_6: # %else4 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 4 ; RV64ZVE32F-NEXT: bnez a2, .LBB68_13 @@ -7302,16 +7302,16 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB68_10 ; RV64ZVE32F-NEXT: .LBB68_9: # %cond.store9 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 5 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: .LBB68_10: # %else10 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB68_15 @@ -7321,22 +7321,22 @@ ; RV64ZVE32F-NEXT: .LBB68_12: # %else14 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB68_13: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v11, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 3 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB68_8 ; RV64ZVE32F-NEXT: .LBB68_14: # %cond.store7 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 4 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 32 @@ -7346,18 +7346,18 @@ ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 6 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB68_12 ; RV64ZVE32F-NEXT: .LBB68_16: # %cond.store13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v10 ; RV64ZVE32F-NEXT: slli a1, a1, 2 ; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV64ZVE32F-NEXT: vse32.v v8, (a0) ; RV64ZVE32F-NEXT: ret @@ -7369,7 +7369,7 @@ define void @mscatter_baseidx_sext_v8i8_v8f32(<8 x float> %val, float* %base, <8 x i8> %idxs, <8 x i1> %m) { ; RV32-LABEL: mscatter_baseidx_sext_v8i8_v8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v12, v10 ; RV32-NEXT: vsll.vi v10, v12, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t @@ -7377,16 +7377,16 @@ ; ; RV64-LABEL: mscatter_baseidx_sext_v8i8_v8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v12, v10 ; RV64-NEXT: vsll.vi v12, v12, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret ; ; RV64ZVE32F-LABEL: mscatter_baseidx_sext_v8i8_v8f32: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB69_2 @@ -7394,22 +7394,22 @@ ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vse32.v v8, (a2) ; RV64ZVE32F-NEXT: .LBB69_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB69_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 1 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: .LBB69_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB69_6 @@ -7417,11 +7417,11 @@ ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: .LBB69_6: # %else4 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 4 ; RV64ZVE32F-NEXT: bnez a2, .LBB69_13 @@ -7432,16 +7432,16 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB69_10 ; RV64ZVE32F-NEXT: .LBB69_9: # %cond.store9 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 5 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: .LBB69_10: # %else10 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB69_15 @@ -7451,22 +7451,22 @@ ; RV64ZVE32F-NEXT: .LBB69_12: # %else14 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB69_13: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v11, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 3 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB69_8 ; RV64ZVE32F-NEXT: .LBB69_14: # %cond.store7 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 4 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 32 @@ -7476,18 +7476,18 @@ ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 6 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB69_12 ; RV64ZVE32F-NEXT: .LBB69_16: # %cond.store13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v10 ; RV64ZVE32F-NEXT: slli a1, a1, 2 ; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV64ZVE32F-NEXT: vse32.v v8, (a0) ; RV64ZVE32F-NEXT: ret @@ -7500,7 +7500,7 @@ define void @mscatter_baseidx_zext_v8i8_v8f32(<8 x float> %val, float* %base, <8 x i8> %idxs, <8 x i1> %m) { ; RV32-LABEL: mscatter_baseidx_zext_v8i8_v8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vzext.vf4 v12, v10 ; RV32-NEXT: vsll.vi v10, v12, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t @@ -7508,16 +7508,16 @@ ; ; RV64-LABEL: mscatter_baseidx_zext_v8i8_v8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vzext.vf8 v12, v10 ; RV64-NEXT: vsll.vi v12, v12, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret ; ; RV64ZVE32F-LABEL: mscatter_baseidx_zext_v8i8_v8f32: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB70_2 @@ -7526,23 +7526,23 @@ ; RV64ZVE32F-NEXT: andi a2, a2, 255 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vse32.v v8, (a2) ; RV64ZVE32F-NEXT: .LBB70_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB70_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: andi a2, a2, 255 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 1 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: .LBB70_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB70_6 @@ -7551,11 +7551,11 @@ ; RV64ZVE32F-NEXT: andi a2, a2, 255 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: .LBB70_6: # %else4 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 4 ; RV64ZVE32F-NEXT: bnez a2, .LBB70_13 @@ -7566,17 +7566,17 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB70_10 ; RV64ZVE32F-NEXT: .LBB70_9: # %cond.store9 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: andi a2, a2, 255 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 5 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: .LBB70_10: # %else10 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB70_15 @@ -7586,24 +7586,24 @@ ; RV64ZVE32F-NEXT: .LBB70_12: # %else14 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB70_13: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v11, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: andi a2, a2, 255 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 3 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB70_8 ; RV64ZVE32F-NEXT: .LBB70_14: # %cond.store7 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: andi a2, a2, 255 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 4 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 32 @@ -7614,19 +7614,19 @@ ; RV64ZVE32F-NEXT: andi a2, a2, 255 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 6 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB70_12 ; RV64ZVE32F-NEXT: .LBB70_16: # %cond.store13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v10 ; RV64ZVE32F-NEXT: andi a1, a1, 255 ; RV64ZVE32F-NEXT: slli a1, a1, 2 ; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV64ZVE32F-NEXT: vse32.v v8, (a0) ; RV64ZVE32F-NEXT: ret @@ -7639,7 +7639,7 @@ define void @mscatter_baseidx_v8i16_v8f32(<8 x float> %val, float* %base, <8 x i16> %idxs, <8 x i1> %m) { ; RV32-LABEL: mscatter_baseidx_v8i16_v8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf2 v12, v10 ; RV32-NEXT: vsll.vi v10, v12, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t @@ -7647,40 +7647,40 @@ ; ; RV64-LABEL: mscatter_baseidx_v8i16_v8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf4 v12, v10 ; RV64-NEXT: vsll.vi v12, v12, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret ; ; RV64ZVE32F-LABEL: mscatter_baseidx_v8i16_v8f32: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB71_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.store -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vse32.v v8, (a2) ; RV64ZVE32F-NEXT: .LBB71_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB71_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 1 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: .LBB71_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB71_6 @@ -7688,11 +7688,11 @@ ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: .LBB71_6: # %else4 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 4 ; RV64ZVE32F-NEXT: bnez a2, .LBB71_13 @@ -7703,16 +7703,16 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB71_10 ; RV64ZVE32F-NEXT: .LBB71_9: # %cond.store9 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 5 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: .LBB71_10: # %else10 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB71_15 @@ -7722,22 +7722,22 @@ ; RV64ZVE32F-NEXT: .LBB71_12: # %else14 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB71_13: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v11, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 3 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB71_8 ; RV64ZVE32F-NEXT: .LBB71_14: # %cond.store7 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 4 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 32 @@ -7747,18 +7747,18 @@ ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 6 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB71_12 ; RV64ZVE32F-NEXT: .LBB71_16: # %cond.store13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v10 ; RV64ZVE32F-NEXT: slli a1, a1, 2 ; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV64ZVE32F-NEXT: vse32.v v8, (a0) ; RV64ZVE32F-NEXT: ret @@ -7770,7 +7770,7 @@ define void @mscatter_baseidx_sext_v8i16_v8f32(<8 x float> %val, float* %base, <8 x i16> %idxs, <8 x i1> %m) { ; RV32-LABEL: mscatter_baseidx_sext_v8i16_v8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf2 v12, v10 ; RV32-NEXT: vsll.vi v10, v12, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t @@ -7778,40 +7778,40 @@ ; ; RV64-LABEL: mscatter_baseidx_sext_v8i16_v8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf4 v12, v10 ; RV64-NEXT: vsll.vi v12, v12, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret ; ; RV64ZVE32F-LABEL: mscatter_baseidx_sext_v8i16_v8f32: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB72_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.store -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vse32.v v8, (a2) ; RV64ZVE32F-NEXT: .LBB72_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB72_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 1 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: .LBB72_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB72_6 @@ -7819,11 +7819,11 @@ ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: .LBB72_6: # %else4 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 4 ; RV64ZVE32F-NEXT: bnez a2, .LBB72_13 @@ -7834,16 +7834,16 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB72_10 ; RV64ZVE32F-NEXT: .LBB72_9: # %cond.store9 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 5 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: .LBB72_10: # %else10 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB72_15 @@ -7853,22 +7853,22 @@ ; RV64ZVE32F-NEXT: .LBB72_12: # %else14 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB72_13: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v11, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 3 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB72_8 ; RV64ZVE32F-NEXT: .LBB72_14: # %cond.store7 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 4 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 32 @@ -7878,18 +7878,18 @@ ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 6 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB72_12 ; RV64ZVE32F-NEXT: .LBB72_16: # %cond.store13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v10 ; RV64ZVE32F-NEXT: slli a1, a1, 2 ; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV64ZVE32F-NEXT: vse32.v v8, (a0) ; RV64ZVE32F-NEXT: ret @@ -7902,7 +7902,7 @@ define void @mscatter_baseidx_zext_v8i16_v8f32(<8 x float> %val, float* %base, <8 x i16> %idxs, <8 x i1> %m) { ; RV32-LABEL: mscatter_baseidx_zext_v8i16_v8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vzext.vf2 v12, v10 ; RV32-NEXT: vsll.vi v10, v12, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t @@ -7910,44 +7910,44 @@ ; ; RV64-LABEL: mscatter_baseidx_zext_v8i16_v8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vzext.vf4 v12, v10 ; RV64-NEXT: vsll.vi v12, v12, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret ; ; RV64ZVE32F-LABEL: mscatter_baseidx_zext_v8i16_v8f32: ; RV64ZVE32F: # %bb.0: ; RV64ZVE32F-NEXT: lui a1, 16 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v0 ; RV64ZVE32F-NEXT: andi a3, a2, 1 ; RV64ZVE32F-NEXT: addiw a1, a1, -1 ; RV64ZVE32F-NEXT: beqz a3, .LBB73_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.store -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a3, v10 ; RV64ZVE32F-NEXT: and a3, a3, a1 ; RV64ZVE32F-NEXT: slli a3, a3, 2 ; RV64ZVE32F-NEXT: add a3, a0, a3 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vse32.v v8, (a3) ; RV64ZVE32F-NEXT: .LBB73_2: # %else ; RV64ZVE32F-NEXT: andi a3, a2, 2 ; RV64ZVE32F-NEXT: beqz a3, .LBB73_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a3, v11 ; RV64ZVE32F-NEXT: and a3, a3, a1 ; RV64ZVE32F-NEXT: slli a3, a3, 2 ; RV64ZVE32F-NEXT: add a3, a0, a3 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 1 ; RV64ZVE32F-NEXT: vse32.v v12, (a3) ; RV64ZVE32F-NEXT: .LBB73_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a3, a2, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 2 ; RV64ZVE32F-NEXT: beqz a3, .LBB73_6 @@ -7956,11 +7956,11 @@ ; RV64ZVE32F-NEXT: and a3, a3, a1 ; RV64ZVE32F-NEXT: slli a3, a3, 2 ; RV64ZVE32F-NEXT: add a3, a0, a3 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2 ; RV64ZVE32F-NEXT: vse32.v v12, (a3) ; RV64ZVE32F-NEXT: .LBB73_6: # %else4 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma ; RV64ZVE32F-NEXT: andi a3, a2, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 4 ; RV64ZVE32F-NEXT: bnez a3, .LBB73_13 @@ -7971,17 +7971,17 @@ ; RV64ZVE32F-NEXT: andi a3, a2, 32 ; RV64ZVE32F-NEXT: beqz a3, .LBB73_10 ; RV64ZVE32F-NEXT: .LBB73_9: # %cond.store9 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a3, v11 ; RV64ZVE32F-NEXT: and a3, a3, a1 ; RV64ZVE32F-NEXT: slli a3, a3, 2 ; RV64ZVE32F-NEXT: add a3, a0, a3 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 5 ; RV64ZVE32F-NEXT: vse32.v v12, (a3) ; RV64ZVE32F-NEXT: .LBB73_10: # %else10 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a3, a2, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2 ; RV64ZVE32F-NEXT: bnez a3, .LBB73_15 @@ -7991,24 +7991,24 @@ ; RV64ZVE32F-NEXT: .LBB73_12: # %else14 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB73_13: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v11, 1 ; RV64ZVE32F-NEXT: vmv.x.s a3, v11 ; RV64ZVE32F-NEXT: and a3, a3, a1 ; RV64ZVE32F-NEXT: slli a3, a3, 2 ; RV64ZVE32F-NEXT: add a3, a0, a3 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 3 ; RV64ZVE32F-NEXT: vse32.v v12, (a3) ; RV64ZVE32F-NEXT: andi a3, a2, 16 ; RV64ZVE32F-NEXT: beqz a3, .LBB73_8 ; RV64ZVE32F-NEXT: .LBB73_14: # %cond.store7 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a3, v10 ; RV64ZVE32F-NEXT: and a3, a3, a1 ; RV64ZVE32F-NEXT: slli a3, a3, 2 ; RV64ZVE32F-NEXT: add a3, a0, a3 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 4 ; RV64ZVE32F-NEXT: vse32.v v12, (a3) ; RV64ZVE32F-NEXT: andi a3, a2, 32 @@ -8019,19 +8019,19 @@ ; RV64ZVE32F-NEXT: and a3, a3, a1 ; RV64ZVE32F-NEXT: slli a3, a3, 2 ; RV64ZVE32F-NEXT: add a3, a0, a3 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 6 ; RV64ZVE32F-NEXT: vse32.v v12, (a3) ; RV64ZVE32F-NEXT: andi a2, a2, -128 ; RV64ZVE32F-NEXT: beqz a2, .LBB73_12 ; RV64ZVE32F-NEXT: .LBB73_16: # %cond.store13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: and a1, a2, a1 ; RV64ZVE32F-NEXT: slli a1, a1, 2 ; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV64ZVE32F-NEXT: vse32.v v8, (a0) ; RV64ZVE32F-NEXT: ret @@ -8044,49 +8044,49 @@ define void @mscatter_baseidx_v8f32(<8 x float> %val, float* %base, <8 x i32> %idxs, <8 x i1> %m) { ; RV32-LABEL: mscatter_baseidx_v8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsll.vi v10, v10, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_v8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf2 v12, v10 ; RV64-NEXT: vsll.vi v12, v12, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret ; ; RV64ZVE32F-LABEL: mscatter_baseidx_v8f32: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB74_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.store -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vse32.v v8, (a2) ; RV64ZVE32F-NEXT: .LBB74_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB74_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v12 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 1 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: .LBB74_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v10, 4 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB74_12 @@ -8100,16 +8100,16 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB74_9 ; RV64ZVE32F-NEXT: .LBB74_8: # %cond.store9 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v12, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV64ZVE32F-NEXT: vse32.v v10, (a2) ; RV64ZVE32F-NEXT: .LBB74_9: # %else10 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v12, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB74_15 @@ -8122,28 +8122,28 @@ ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v14, v8, 2 ; RV64ZVE32F-NEXT: vse32.v v14, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: beqz a2, .LBB74_6 ; RV64ZVE32F-NEXT: .LBB74_13: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV64ZVE32F-NEXT: vse32.v v10, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB74_7 ; RV64ZVE32F-NEXT: .LBB74_14: # %cond.store7 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v12 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV64ZVE32F-NEXT: vse32.v v10, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 32 @@ -8153,18 +8153,18 @@ ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 2 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 6 ; RV64ZVE32F-NEXT: vse32.v v12, (a2) ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB74_11 ; RV64ZVE32F-NEXT: .LBB74_16: # %cond.store13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v10 ; RV64ZVE32F-NEXT: slli a1, a1, 2 ; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV64ZVE32F-NEXT: vse32.v v8, (a0) ; RV64ZVE32F-NEXT: ret @@ -8178,26 +8178,26 @@ define void @mscatter_v1f64(<1 x double> %val, <1 x double*> %ptrs, <1 x i1> %m) { ; RV32V-LABEL: mscatter_v1f64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32V-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_v1f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret ; ; RV32ZVE32F-LABEL: mscatter_v1f64: ; RV32ZVE32F: # %bb.0: -; RV32ZVE32F-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.v.i v9, 0 ; RV32ZVE32F-NEXT: vmerge.vim v9, v9, 1, v0 ; RV32ZVE32F-NEXT: vmv.x.s a0, v9 ; RV32ZVE32F-NEXT: andi a0, a0, 1 ; RV32ZVE32F-NEXT: beqz a0, .LBB75_2 ; RV32ZVE32F-NEXT: # %bb.1: # %cond.store -; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a0, v8 ; RV32ZVE32F-NEXT: fsd fa0, 0(a0) ; RV32ZVE32F-NEXT: .LBB75_2: # %else @@ -8205,7 +8205,7 @@ ; ; RV64ZVE32F-LABEL: mscatter_v1f64: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.v.i v8, 0 ; RV64ZVE32F-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64ZVE32F-NEXT: vmv.x.s a1, v8 @@ -8224,19 +8224,19 @@ define void @mscatter_v2f64(<2 x double> %val, <2 x double*> %ptrs, <2 x i1> %m) { ; RV32V-LABEL: mscatter_v2f64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32V-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32V-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_v2f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret ; ; RV32ZVE32F-LABEL: mscatter_v2f64: ; RV32ZVE32F: # %bb.0: -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a0, v0 ; RV32ZVE32F-NEXT: andi a1, a0, 1 ; RV32ZVE32F-NEXT: bnez a1, .LBB76_3 @@ -8246,13 +8246,13 @@ ; RV32ZVE32F-NEXT: .LBB76_2: # %else2 ; RV32ZVE32F-NEXT: ret ; RV32ZVE32F-NEXT: .LBB76_3: # %cond.store -; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a1, v8 ; RV32ZVE32F-NEXT: fsd fa0, 0(a1) ; RV32ZVE32F-NEXT: andi a0, a0, 2 ; RV32ZVE32F-NEXT: beqz a0, .LBB76_2 ; RV32ZVE32F-NEXT: .LBB76_4: # %cond.store1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a0, v8 ; RV32ZVE32F-NEXT: fsd fa1, 0(a0) @@ -8260,7 +8260,7 @@ ; ; RV64ZVE32F-LABEL: mscatter_v2f64: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v0 ; RV64ZVE32F-NEXT: andi a3, a2, 1 ; RV64ZVE32F-NEXT: bnez a3, .LBB76_3 @@ -8285,19 +8285,19 @@ define void @mscatter_v4f64(<4 x double> %val, <4 x double*> %ptrs, <4 x i1> %m) { ; RV32V-LABEL: mscatter_v4f64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32V-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32V-NEXT: vsoxei32.v v8, (zero), v10, v0.t ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_v4f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret ; ; RV32ZVE32F-LABEL: mscatter_v4f64: ; RV32ZVE32F: # %bb.0: -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a0, v0 ; RV32ZVE32F-NEXT: andi a1, a0, 1 ; RV32ZVE32F-NEXT: bnez a1, .LBB77_5 @@ -8313,27 +8313,27 @@ ; RV32ZVE32F-NEXT: .LBB77_4: # %else6 ; RV32ZVE32F-NEXT: ret ; RV32ZVE32F-NEXT: .LBB77_5: # %cond.store -; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a1, v8 ; RV32ZVE32F-NEXT: fsd fa0, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 2 ; RV32ZVE32F-NEXT: beqz a1, .LBB77_2 ; RV32ZVE32F-NEXT: .LBB77_6: # %cond.store1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a1, v9 ; RV32ZVE32F-NEXT: fsd fa1, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 4 ; RV32ZVE32F-NEXT: beqz a1, .LBB77_3 ; RV32ZVE32F-NEXT: .LBB77_7: # %cond.store3 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s a1, v9 ; RV32ZVE32F-NEXT: fsd fa2, 0(a1) ; RV32ZVE32F-NEXT: andi a0, a0, 8 ; RV32ZVE32F-NEXT: beqz a0, .LBB77_4 ; RV32ZVE32F-NEXT: .LBB77_8: # %cond.store5 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s a0, v8 ; RV32ZVE32F-NEXT: fsd fa3, 0(a0) @@ -8344,7 +8344,7 @@ ; RV64ZVE32F-NEXT: ld a1, 24(a0) ; RV64ZVE32F-NEXT: ld a2, 16(a0) ; RV64ZVE32F-NEXT: ld a4, 8(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a3, v0 ; RV64ZVE32F-NEXT: andi a5, a3, 1 ; RV64ZVE32F-NEXT: bnez a5, .LBB77_5 @@ -8382,19 +8382,19 @@ define void @mscatter_truemask_v4f64(<4 x double> %val, <4 x double*> %ptrs) { ; RV32V-LABEL: mscatter_truemask_v4f64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32V-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32V-NEXT: vsoxei32.v v8, (zero), v10 ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_truemask_v4f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v10 ; RV64-NEXT: ret ; ; RV32ZVE32F-LABEL: mscatter_truemask_v4f64: ; RV32ZVE32F: # %bb.0: -; RV32ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV32ZVE32F-NEXT: vmset.m v9 ; RV32ZVE32F-NEXT: vmv.x.s a0, v9 ; RV32ZVE32F-NEXT: beqz zero, .LBB78_5 @@ -8410,27 +8410,27 @@ ; RV32ZVE32F-NEXT: .LBB78_4: # %else6 ; RV32ZVE32F-NEXT: ret ; RV32ZVE32F-NEXT: .LBB78_5: # %cond.store -; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a1, v8 ; RV32ZVE32F-NEXT: fsd fa0, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 2 ; RV32ZVE32F-NEXT: beqz a1, .LBB78_2 ; RV32ZVE32F-NEXT: .LBB78_6: # %cond.store1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a1, v9 ; RV32ZVE32F-NEXT: fsd fa1, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 4 ; RV32ZVE32F-NEXT: beqz a1, .LBB78_3 ; RV32ZVE32F-NEXT: .LBB78_7: # %cond.store3 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s a1, v9 ; RV32ZVE32F-NEXT: fsd fa2, 0(a1) ; RV32ZVE32F-NEXT: andi a0, a0, 8 ; RV32ZVE32F-NEXT: beqz a0, .LBB78_4 ; RV32ZVE32F-NEXT: .LBB78_8: # %cond.store5 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s a0, v8 ; RV32ZVE32F-NEXT: fsd fa3, 0(a0) @@ -8441,7 +8441,7 @@ ; RV64ZVE32F-NEXT: ld a1, 24(a0) ; RV64ZVE32F-NEXT: ld a2, 16(a0) ; RV64ZVE32F-NEXT: ld a4, 8(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: vmset.m v8 ; RV64ZVE32F-NEXT: vmv.x.s a3, v8 ; RV64ZVE32F-NEXT: beqz zero, .LBB78_5 @@ -8491,19 +8491,19 @@ define void @mscatter_v8f64(<8 x double> %val, <8 x double*> %ptrs, <8 x i1> %m) { ; RV32V-LABEL: mscatter_v8f64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32V-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32V-NEXT: vsoxei32.v v8, (zero), v12, v0.t ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_v8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t ; RV64-NEXT: ret ; ; RV32ZVE32F-LABEL: mscatter_v8f64: ; RV32ZVE32F: # %bb.0: -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a0, v0 ; RV32ZVE32F-NEXT: andi a1, a0, 1 ; RV32ZVE32F-NEXT: bnez a1, .LBB80_9 @@ -8531,55 +8531,55 @@ ; RV32ZVE32F-NEXT: .LBB80_8: # %else14 ; RV32ZVE32F-NEXT: ret ; RV32ZVE32F-NEXT: .LBB80_9: # %cond.store -; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a1, v8 ; RV32ZVE32F-NEXT: fsd fa0, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 2 ; RV32ZVE32F-NEXT: beqz a1, .LBB80_2 ; RV32ZVE32F-NEXT: .LBB80_10: # %cond.store1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa1, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 4 ; RV32ZVE32F-NEXT: beqz a1, .LBB80_3 ; RV32ZVE32F-NEXT: .LBB80_11: # %cond.store3 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa2, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 8 ; RV32ZVE32F-NEXT: beqz a1, .LBB80_4 ; RV32ZVE32F-NEXT: .LBB80_12: # %cond.store5 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa3, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 16 ; RV32ZVE32F-NEXT: beqz a1, .LBB80_5 ; RV32ZVE32F-NEXT: .LBB80_13: # %cond.store7 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa4, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 32 ; RV32ZVE32F-NEXT: beqz a1, .LBB80_6 ; RV32ZVE32F-NEXT: .LBB80_14: # %cond.store9 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa5, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 64 ; RV32ZVE32F-NEXT: beqz a1, .LBB80_7 ; RV32ZVE32F-NEXT: .LBB80_15: # %cond.store11 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa6, 0(a1) ; RV32ZVE32F-NEXT: andi a0, a0, -128 ; RV32ZVE32F-NEXT: beqz a0, .LBB80_8 ; RV32ZVE32F-NEXT: .LBB80_16: # %cond.store13 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV32ZVE32F-NEXT: vmv.x.s a0, v8 ; RV32ZVE32F-NEXT: fsd fa7, 0(a0) @@ -8594,7 +8594,7 @@ ; RV64ZVE32F-NEXT: ld a6, 24(a0) ; RV64ZVE32F-NEXT: ld a7, 16(a0) ; RV64ZVE32F-NEXT: ld t0, 8(a0) -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a3, v0 ; RV64ZVE32F-NEXT: andi t1, a3, 1 ; RV64ZVE32F-NEXT: bnez t1, .LBB80_9 @@ -8660,16 +8660,16 @@ define void @mscatter_baseidx_v8i8_v8f64(<8 x double> %val, double* %base, <8 x i8> %idxs, <8 x i1> %m) { ; RV32V-LABEL: mscatter_baseidx_v8i8_v8f64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32V-NEXT: vsext.vf4 v14, v12 ; RV32V-NEXT: vsll.vi v12, v14, 3 -; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; RV32V-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_v8i8_v8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t @@ -8677,11 +8677,11 @@ ; ; RV32ZVE32F-LABEL: mscatter_baseidx_v8i8_v8f64: ; RV32ZVE32F: # %bb.0: -; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vsext.vf4 v10, v8 ; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3 ; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0 -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a0, v0 ; RV32ZVE32F-NEXT: andi a1, a0, 1 ; RV32ZVE32F-NEXT: bnez a1, .LBB81_9 @@ -8709,55 +8709,55 @@ ; RV32ZVE32F-NEXT: .LBB81_8: # %else14 ; RV32ZVE32F-NEXT: ret ; RV32ZVE32F-NEXT: .LBB81_9: # %cond.store -; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a1, v8 ; RV32ZVE32F-NEXT: fsd fa0, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 2 ; RV32ZVE32F-NEXT: beqz a1, .LBB81_2 ; RV32ZVE32F-NEXT: .LBB81_10: # %cond.store1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa1, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 4 ; RV32ZVE32F-NEXT: beqz a1, .LBB81_3 ; RV32ZVE32F-NEXT: .LBB81_11: # %cond.store3 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa2, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 8 ; RV32ZVE32F-NEXT: beqz a1, .LBB81_4 ; RV32ZVE32F-NEXT: .LBB81_12: # %cond.store5 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa3, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 16 ; RV32ZVE32F-NEXT: beqz a1, .LBB81_5 ; RV32ZVE32F-NEXT: .LBB81_13: # %cond.store7 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa4, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 32 ; RV32ZVE32F-NEXT: beqz a1, .LBB81_6 ; RV32ZVE32F-NEXT: .LBB81_14: # %cond.store9 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa5, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 64 ; RV32ZVE32F-NEXT: beqz a1, .LBB81_7 ; RV32ZVE32F-NEXT: .LBB81_15: # %cond.store11 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa6, 0(a1) ; RV32ZVE32F-NEXT: andi a0, a0, -128 ; RV32ZVE32F-NEXT: beqz a0, .LBB81_8 ; RV32ZVE32F-NEXT: .LBB81_16: # %cond.store13 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV32ZVE32F-NEXT: vmv.x.s a0, v8 ; RV32ZVE32F-NEXT: fsd fa7, 0(a0) @@ -8765,7 +8765,7 @@ ; ; RV64ZVE32F-LABEL: mscatter_baseidx_v8i8_v8f64: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB81_2 @@ -8778,14 +8778,14 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB81_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 3 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: fsd fa1, 0(a2) ; RV64ZVE32F-NEXT: .LBB81_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB81_6 @@ -8795,7 +8795,7 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: fsd fa2, 0(a2) ; RV64ZVE32F-NEXT: .LBB81_6: # %else4 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 4 ; RV64ZVE32F-NEXT: bnez a2, .LBB81_13 @@ -8806,14 +8806,14 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB81_10 ; RV64ZVE32F-NEXT: .LBB81_9: # %cond.store9 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 3 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: fsd fa5, 0(a2) ; RV64ZVE32F-NEXT: .LBB81_10: # %else10 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB81_15 @@ -8823,7 +8823,7 @@ ; RV64ZVE32F-NEXT: .LBB81_12: # %else14 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB81_13: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 3 @@ -8832,7 +8832,7 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB81_8 ; RV64ZVE32F-NEXT: .LBB81_14: # %cond.store7 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 3 ; RV64ZVE32F-NEXT: add a2, a0, a2 @@ -8848,7 +8848,7 @@ ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB81_12 ; RV64ZVE32F-NEXT: .LBB81_16: # %cond.store13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v8 ; RV64ZVE32F-NEXT: slli a1, a1, 3 @@ -8863,16 +8863,16 @@ define void @mscatter_baseidx_sext_v8i8_v8f64(<8 x double> %val, double* %base, <8 x i8> %idxs, <8 x i1> %m) { ; RV32V-LABEL: mscatter_baseidx_sext_v8i8_v8f64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32V-NEXT: vsext.vf4 v14, v12 ; RV32V-NEXT: vsll.vi v12, v14, 3 -; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; RV32V-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_sext_v8i8_v8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t @@ -8880,11 +8880,11 @@ ; ; RV32ZVE32F-LABEL: mscatter_baseidx_sext_v8i8_v8f64: ; RV32ZVE32F: # %bb.0: -; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vsext.vf4 v10, v8 ; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3 ; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0 -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a0, v0 ; RV32ZVE32F-NEXT: andi a1, a0, 1 ; RV32ZVE32F-NEXT: bnez a1, .LBB82_9 @@ -8912,55 +8912,55 @@ ; RV32ZVE32F-NEXT: .LBB82_8: # %else14 ; RV32ZVE32F-NEXT: ret ; RV32ZVE32F-NEXT: .LBB82_9: # %cond.store -; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a1, v8 ; RV32ZVE32F-NEXT: fsd fa0, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 2 ; RV32ZVE32F-NEXT: beqz a1, .LBB82_2 ; RV32ZVE32F-NEXT: .LBB82_10: # %cond.store1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa1, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 4 ; RV32ZVE32F-NEXT: beqz a1, .LBB82_3 ; RV32ZVE32F-NEXT: .LBB82_11: # %cond.store3 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa2, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 8 ; RV32ZVE32F-NEXT: beqz a1, .LBB82_4 ; RV32ZVE32F-NEXT: .LBB82_12: # %cond.store5 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa3, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 16 ; RV32ZVE32F-NEXT: beqz a1, .LBB82_5 ; RV32ZVE32F-NEXT: .LBB82_13: # %cond.store7 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa4, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 32 ; RV32ZVE32F-NEXT: beqz a1, .LBB82_6 ; RV32ZVE32F-NEXT: .LBB82_14: # %cond.store9 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa5, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 64 ; RV32ZVE32F-NEXT: beqz a1, .LBB82_7 ; RV32ZVE32F-NEXT: .LBB82_15: # %cond.store11 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa6, 0(a1) ; RV32ZVE32F-NEXT: andi a0, a0, -128 ; RV32ZVE32F-NEXT: beqz a0, .LBB82_8 ; RV32ZVE32F-NEXT: .LBB82_16: # %cond.store13 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV32ZVE32F-NEXT: vmv.x.s a0, v8 ; RV32ZVE32F-NEXT: fsd fa7, 0(a0) @@ -8968,7 +8968,7 @@ ; ; RV64ZVE32F-LABEL: mscatter_baseidx_sext_v8i8_v8f64: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB82_2 @@ -8981,14 +8981,14 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB82_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 3 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: fsd fa1, 0(a2) ; RV64ZVE32F-NEXT: .LBB82_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB82_6 @@ -8998,7 +8998,7 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: fsd fa2, 0(a2) ; RV64ZVE32F-NEXT: .LBB82_6: # %else4 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 4 ; RV64ZVE32F-NEXT: bnez a2, .LBB82_13 @@ -9009,14 +9009,14 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB82_10 ; RV64ZVE32F-NEXT: .LBB82_9: # %cond.store9 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 3 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: fsd fa5, 0(a2) ; RV64ZVE32F-NEXT: .LBB82_10: # %else10 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB82_15 @@ -9026,7 +9026,7 @@ ; RV64ZVE32F-NEXT: .LBB82_12: # %else14 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB82_13: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 3 @@ -9035,7 +9035,7 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB82_8 ; RV64ZVE32F-NEXT: .LBB82_14: # %cond.store7 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 3 ; RV64ZVE32F-NEXT: add a2, a0, a2 @@ -9051,7 +9051,7 @@ ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB82_12 ; RV64ZVE32F-NEXT: .LBB82_16: # %cond.store13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v8 ; RV64ZVE32F-NEXT: slli a1, a1, 3 @@ -9067,16 +9067,16 @@ define void @mscatter_baseidx_zext_v8i8_v8f64(<8 x double> %val, double* %base, <8 x i8> %idxs, <8 x i1> %m) { ; RV32V-LABEL: mscatter_baseidx_zext_v8i8_v8f64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32V-NEXT: vzext.vf4 v14, v12 ; RV32V-NEXT: vsll.vi v12, v14, 3 -; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; RV32V-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_zext_v8i8_v8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vzext.vf8 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t @@ -9084,11 +9084,11 @@ ; ; RV32ZVE32F-LABEL: mscatter_baseidx_zext_v8i8_v8f64: ; RV32ZVE32F: # %bb.0: -; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vzext.vf4 v10, v8 ; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3 ; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0 -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a0, v0 ; RV32ZVE32F-NEXT: andi a1, a0, 1 ; RV32ZVE32F-NEXT: bnez a1, .LBB83_9 @@ -9116,55 +9116,55 @@ ; RV32ZVE32F-NEXT: .LBB83_8: # %else14 ; RV32ZVE32F-NEXT: ret ; RV32ZVE32F-NEXT: .LBB83_9: # %cond.store -; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a1, v8 ; RV32ZVE32F-NEXT: fsd fa0, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 2 ; RV32ZVE32F-NEXT: beqz a1, .LBB83_2 ; RV32ZVE32F-NEXT: .LBB83_10: # %cond.store1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa1, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 4 ; RV32ZVE32F-NEXT: beqz a1, .LBB83_3 ; RV32ZVE32F-NEXT: .LBB83_11: # %cond.store3 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa2, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 8 ; RV32ZVE32F-NEXT: beqz a1, .LBB83_4 ; RV32ZVE32F-NEXT: .LBB83_12: # %cond.store5 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa3, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 16 ; RV32ZVE32F-NEXT: beqz a1, .LBB83_5 ; RV32ZVE32F-NEXT: .LBB83_13: # %cond.store7 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa4, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 32 ; RV32ZVE32F-NEXT: beqz a1, .LBB83_6 ; RV32ZVE32F-NEXT: .LBB83_14: # %cond.store9 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa5, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 64 ; RV32ZVE32F-NEXT: beqz a1, .LBB83_7 ; RV32ZVE32F-NEXT: .LBB83_15: # %cond.store11 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa6, 0(a1) ; RV32ZVE32F-NEXT: andi a0, a0, -128 ; RV32ZVE32F-NEXT: beqz a0, .LBB83_8 ; RV32ZVE32F-NEXT: .LBB83_16: # %cond.store13 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV32ZVE32F-NEXT: vmv.x.s a0, v8 ; RV32ZVE32F-NEXT: fsd fa7, 0(a0) @@ -9172,7 +9172,7 @@ ; ; RV64ZVE32F-LABEL: mscatter_baseidx_zext_v8i8_v8f64: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB83_2 @@ -9186,7 +9186,7 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB83_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: andi a2, a2, 255 @@ -9194,7 +9194,7 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: fsd fa1, 0(a2) ; RV64ZVE32F-NEXT: .LBB83_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB83_6 @@ -9205,7 +9205,7 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: fsd fa2, 0(a2) ; RV64ZVE32F-NEXT: .LBB83_6: # %else4 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 4 ; RV64ZVE32F-NEXT: bnez a2, .LBB83_13 @@ -9216,7 +9216,7 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB83_10 ; RV64ZVE32F-NEXT: .LBB83_9: # %cond.store9 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: andi a2, a2, 255 @@ -9224,7 +9224,7 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: fsd fa5, 0(a2) ; RV64ZVE32F-NEXT: .LBB83_10: # %else10 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB83_15 @@ -9234,7 +9234,7 @@ ; RV64ZVE32F-NEXT: .LBB83_12: # %else14 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB83_13: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: andi a2, a2, 255 @@ -9244,7 +9244,7 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB83_8 ; RV64ZVE32F-NEXT: .LBB83_14: # %cond.store7 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: andi a2, a2, 255 ; RV64ZVE32F-NEXT: slli a2, a2, 3 @@ -9262,7 +9262,7 @@ ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB83_12 ; RV64ZVE32F-NEXT: .LBB83_16: # %cond.store13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v8 ; RV64ZVE32F-NEXT: andi a1, a1, 255 @@ -9279,16 +9279,16 @@ define void @mscatter_baseidx_v8i16_v8f64(<8 x double> %val, double* %base, <8 x i16> %idxs, <8 x i1> %m) { ; RV32V-LABEL: mscatter_baseidx_v8i16_v8f64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32V-NEXT: vsext.vf2 v14, v12 ; RV32V-NEXT: vsll.vi v12, v14, 3 -; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; RV32V-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_v8i16_v8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf4 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t @@ -9296,11 +9296,11 @@ ; ; RV32ZVE32F-LABEL: mscatter_baseidx_v8i16_v8f64: ; RV32ZVE32F: # %bb.0: -; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vsext.vf2 v10, v8 ; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3 ; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0 -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a0, v0 ; RV32ZVE32F-NEXT: andi a1, a0, 1 ; RV32ZVE32F-NEXT: bnez a1, .LBB84_9 @@ -9328,55 +9328,55 @@ ; RV32ZVE32F-NEXT: .LBB84_8: # %else14 ; RV32ZVE32F-NEXT: ret ; RV32ZVE32F-NEXT: .LBB84_9: # %cond.store -; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a1, v8 ; RV32ZVE32F-NEXT: fsd fa0, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 2 ; RV32ZVE32F-NEXT: beqz a1, .LBB84_2 ; RV32ZVE32F-NEXT: .LBB84_10: # %cond.store1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa1, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 4 ; RV32ZVE32F-NEXT: beqz a1, .LBB84_3 ; RV32ZVE32F-NEXT: .LBB84_11: # %cond.store3 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa2, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 8 ; RV32ZVE32F-NEXT: beqz a1, .LBB84_4 ; RV32ZVE32F-NEXT: .LBB84_12: # %cond.store5 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa3, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 16 ; RV32ZVE32F-NEXT: beqz a1, .LBB84_5 ; RV32ZVE32F-NEXT: .LBB84_13: # %cond.store7 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa4, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 32 ; RV32ZVE32F-NEXT: beqz a1, .LBB84_6 ; RV32ZVE32F-NEXT: .LBB84_14: # %cond.store9 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa5, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 64 ; RV32ZVE32F-NEXT: beqz a1, .LBB84_7 ; RV32ZVE32F-NEXT: .LBB84_15: # %cond.store11 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa6, 0(a1) ; RV32ZVE32F-NEXT: andi a0, a0, -128 ; RV32ZVE32F-NEXT: beqz a0, .LBB84_8 ; RV32ZVE32F-NEXT: .LBB84_16: # %cond.store13 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV32ZVE32F-NEXT: vmv.x.s a0, v8 ; RV32ZVE32F-NEXT: fsd fa7, 0(a0) @@ -9384,12 +9384,12 @@ ; ; RV64ZVE32F-LABEL: mscatter_baseidx_v8i16_v8f64: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB84_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.store -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 3 ; RV64ZVE32F-NEXT: add a2, a0, a2 @@ -9398,14 +9398,14 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB84_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 3 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: fsd fa1, 0(a2) ; RV64ZVE32F-NEXT: .LBB84_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB84_6 @@ -9415,7 +9415,7 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: fsd fa2, 0(a2) ; RV64ZVE32F-NEXT: .LBB84_6: # %else4 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 4 ; RV64ZVE32F-NEXT: bnez a2, .LBB84_13 @@ -9426,14 +9426,14 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB84_10 ; RV64ZVE32F-NEXT: .LBB84_9: # %cond.store9 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 3 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: fsd fa5, 0(a2) ; RV64ZVE32F-NEXT: .LBB84_10: # %else10 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB84_15 @@ -9443,7 +9443,7 @@ ; RV64ZVE32F-NEXT: .LBB84_12: # %else14 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB84_13: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 3 @@ -9452,7 +9452,7 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB84_8 ; RV64ZVE32F-NEXT: .LBB84_14: # %cond.store7 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 3 ; RV64ZVE32F-NEXT: add a2, a0, a2 @@ -9468,7 +9468,7 @@ ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB84_12 ; RV64ZVE32F-NEXT: .LBB84_16: # %cond.store13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v8 ; RV64ZVE32F-NEXT: slli a1, a1, 3 @@ -9483,16 +9483,16 @@ define void @mscatter_baseidx_sext_v8i16_v8f64(<8 x double> %val, double* %base, <8 x i16> %idxs, <8 x i1> %m) { ; RV32V-LABEL: mscatter_baseidx_sext_v8i16_v8f64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32V-NEXT: vsext.vf2 v14, v12 ; RV32V-NEXT: vsll.vi v12, v14, 3 -; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; RV32V-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_sext_v8i16_v8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf4 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t @@ -9500,11 +9500,11 @@ ; ; RV32ZVE32F-LABEL: mscatter_baseidx_sext_v8i16_v8f64: ; RV32ZVE32F: # %bb.0: -; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vsext.vf2 v10, v8 ; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3 ; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0 -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a0, v0 ; RV32ZVE32F-NEXT: andi a1, a0, 1 ; RV32ZVE32F-NEXT: bnez a1, .LBB85_9 @@ -9532,55 +9532,55 @@ ; RV32ZVE32F-NEXT: .LBB85_8: # %else14 ; RV32ZVE32F-NEXT: ret ; RV32ZVE32F-NEXT: .LBB85_9: # %cond.store -; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a1, v8 ; RV32ZVE32F-NEXT: fsd fa0, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 2 ; RV32ZVE32F-NEXT: beqz a1, .LBB85_2 ; RV32ZVE32F-NEXT: .LBB85_10: # %cond.store1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa1, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 4 ; RV32ZVE32F-NEXT: beqz a1, .LBB85_3 ; RV32ZVE32F-NEXT: .LBB85_11: # %cond.store3 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa2, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 8 ; RV32ZVE32F-NEXT: beqz a1, .LBB85_4 ; RV32ZVE32F-NEXT: .LBB85_12: # %cond.store5 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa3, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 16 ; RV32ZVE32F-NEXT: beqz a1, .LBB85_5 ; RV32ZVE32F-NEXT: .LBB85_13: # %cond.store7 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa4, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 32 ; RV32ZVE32F-NEXT: beqz a1, .LBB85_6 ; RV32ZVE32F-NEXT: .LBB85_14: # %cond.store9 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa5, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 64 ; RV32ZVE32F-NEXT: beqz a1, .LBB85_7 ; RV32ZVE32F-NEXT: .LBB85_15: # %cond.store11 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa6, 0(a1) ; RV32ZVE32F-NEXT: andi a0, a0, -128 ; RV32ZVE32F-NEXT: beqz a0, .LBB85_8 ; RV32ZVE32F-NEXT: .LBB85_16: # %cond.store13 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV32ZVE32F-NEXT: vmv.x.s a0, v8 ; RV32ZVE32F-NEXT: fsd fa7, 0(a0) @@ -9588,12 +9588,12 @@ ; ; RV64ZVE32F-LABEL: mscatter_baseidx_sext_v8i16_v8f64: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB85_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.store -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 3 ; RV64ZVE32F-NEXT: add a2, a0, a2 @@ -9602,14 +9602,14 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB85_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 3 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: fsd fa1, 0(a2) ; RV64ZVE32F-NEXT: .LBB85_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB85_6 @@ -9619,7 +9619,7 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: fsd fa2, 0(a2) ; RV64ZVE32F-NEXT: .LBB85_6: # %else4 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 4 ; RV64ZVE32F-NEXT: bnez a2, .LBB85_13 @@ -9630,14 +9630,14 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB85_10 ; RV64ZVE32F-NEXT: .LBB85_9: # %cond.store9 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 3 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: fsd fa5, 0(a2) ; RV64ZVE32F-NEXT: .LBB85_10: # %else10 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB85_15 @@ -9647,7 +9647,7 @@ ; RV64ZVE32F-NEXT: .LBB85_12: # %else14 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB85_13: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: slli a2, a2, 3 @@ -9656,7 +9656,7 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB85_8 ; RV64ZVE32F-NEXT: .LBB85_14: # %cond.store7 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 3 ; RV64ZVE32F-NEXT: add a2, a0, a2 @@ -9672,7 +9672,7 @@ ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB85_12 ; RV64ZVE32F-NEXT: .LBB85_16: # %cond.store13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v8 ; RV64ZVE32F-NEXT: slli a1, a1, 3 @@ -9688,16 +9688,16 @@ define void @mscatter_baseidx_zext_v8i16_v8f64(<8 x double> %val, double* %base, <8 x i16> %idxs, <8 x i1> %m) { ; RV32V-LABEL: mscatter_baseidx_zext_v8i16_v8f64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32V-NEXT: vzext.vf2 v14, v12 ; RV32V-NEXT: vsll.vi v12, v14, 3 -; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; RV32V-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_zext_v8i16_v8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vzext.vf4 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t @@ -9705,11 +9705,11 @@ ; ; RV32ZVE32F-LABEL: mscatter_baseidx_zext_v8i16_v8f64: ; RV32ZVE32F: # %bb.0: -; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vzext.vf2 v10, v8 ; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3 ; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0 -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a0, v0 ; RV32ZVE32F-NEXT: andi a1, a0, 1 ; RV32ZVE32F-NEXT: bnez a1, .LBB86_9 @@ -9737,55 +9737,55 @@ ; RV32ZVE32F-NEXT: .LBB86_8: # %else14 ; RV32ZVE32F-NEXT: ret ; RV32ZVE32F-NEXT: .LBB86_9: # %cond.store -; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a1, v8 ; RV32ZVE32F-NEXT: fsd fa0, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 2 ; RV32ZVE32F-NEXT: beqz a1, .LBB86_2 ; RV32ZVE32F-NEXT: .LBB86_10: # %cond.store1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa1, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 4 ; RV32ZVE32F-NEXT: beqz a1, .LBB86_3 ; RV32ZVE32F-NEXT: .LBB86_11: # %cond.store3 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa2, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 8 ; RV32ZVE32F-NEXT: beqz a1, .LBB86_4 ; RV32ZVE32F-NEXT: .LBB86_12: # %cond.store5 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa3, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 16 ; RV32ZVE32F-NEXT: beqz a1, .LBB86_5 ; RV32ZVE32F-NEXT: .LBB86_13: # %cond.store7 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa4, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 32 ; RV32ZVE32F-NEXT: beqz a1, .LBB86_6 ; RV32ZVE32F-NEXT: .LBB86_14: # %cond.store9 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa5, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 64 ; RV32ZVE32F-NEXT: beqz a1, .LBB86_7 ; RV32ZVE32F-NEXT: .LBB86_15: # %cond.store11 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa6, 0(a1) ; RV32ZVE32F-NEXT: andi a0, a0, -128 ; RV32ZVE32F-NEXT: beqz a0, .LBB86_8 ; RV32ZVE32F-NEXT: .LBB86_16: # %cond.store13 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV32ZVE32F-NEXT: vmv.x.s a0, v8 ; RV32ZVE32F-NEXT: fsd fa7, 0(a0) @@ -9794,13 +9794,13 @@ ; RV64ZVE32F-LABEL: mscatter_baseidx_zext_v8i16_v8f64: ; RV64ZVE32F: # %bb.0: ; RV64ZVE32F-NEXT: lui a1, 16 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v0 ; RV64ZVE32F-NEXT: andi a3, a2, 1 ; RV64ZVE32F-NEXT: addiw a1, a1, -1 ; RV64ZVE32F-NEXT: beqz a3, .LBB86_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.store -; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a3, v8 ; RV64ZVE32F-NEXT: and a3, a3, a1 ; RV64ZVE32F-NEXT: slli a3, a3, 3 @@ -9810,7 +9810,7 @@ ; RV64ZVE32F-NEXT: andi a3, a2, 2 ; RV64ZVE32F-NEXT: beqz a3, .LBB86_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a3, v9 ; RV64ZVE32F-NEXT: and a3, a3, a1 @@ -9818,7 +9818,7 @@ ; RV64ZVE32F-NEXT: add a3, a0, a3 ; RV64ZVE32F-NEXT: fsd fa1, 0(a3) ; RV64ZVE32F-NEXT: .LBB86_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a3, a2, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2 ; RV64ZVE32F-NEXT: beqz a3, .LBB86_6 @@ -9829,7 +9829,7 @@ ; RV64ZVE32F-NEXT: add a3, a0, a3 ; RV64ZVE32F-NEXT: fsd fa2, 0(a3) ; RV64ZVE32F-NEXT: .LBB86_6: # %else4 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma ; RV64ZVE32F-NEXT: andi a3, a2, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 4 ; RV64ZVE32F-NEXT: bnez a3, .LBB86_13 @@ -9840,7 +9840,7 @@ ; RV64ZVE32F-NEXT: andi a3, a2, 32 ; RV64ZVE32F-NEXT: beqz a3, .LBB86_10 ; RV64ZVE32F-NEXT: .LBB86_9: # %cond.store9 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a3, v9 ; RV64ZVE32F-NEXT: and a3, a3, a1 @@ -9848,7 +9848,7 @@ ; RV64ZVE32F-NEXT: add a3, a0, a3 ; RV64ZVE32F-NEXT: fsd fa5, 0(a3) ; RV64ZVE32F-NEXT: .LBB86_10: # %else10 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a3, a2, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: bnez a3, .LBB86_15 @@ -9858,7 +9858,7 @@ ; RV64ZVE32F-NEXT: .LBB86_12: # %else14 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB86_13: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a3, v9 ; RV64ZVE32F-NEXT: and a3, a3, a1 @@ -9868,7 +9868,7 @@ ; RV64ZVE32F-NEXT: andi a3, a2, 16 ; RV64ZVE32F-NEXT: beqz a3, .LBB86_8 ; RV64ZVE32F-NEXT: .LBB86_14: # %cond.store7 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a3, v8 ; RV64ZVE32F-NEXT: and a3, a3, a1 ; RV64ZVE32F-NEXT: slli a3, a3, 3 @@ -9886,7 +9886,7 @@ ; RV64ZVE32F-NEXT: andi a2, a2, -128 ; RV64ZVE32F-NEXT: beqz a2, .LBB86_12 ; RV64ZVE32F-NEXT: .LBB86_16: # %cond.store13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: and a1, a2, a1 @@ -9903,15 +9903,15 @@ define void @mscatter_baseidx_v8i32_v8f64(<8 x double> %val, double* %base, <8 x i32> %idxs, <8 x i1> %m) { ; RV32V-LABEL: mscatter_baseidx_v8i32_v8f64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32V-NEXT: vsll.vi v12, v12, 3 -; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; RV32V-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_v8i32_v8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf2 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t @@ -9919,10 +9919,10 @@ ; ; RV32ZVE32F-LABEL: mscatter_baseidx_v8i32_v8f64: ; RV32ZVE32F: # %bb.0: -; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3 ; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0 -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a0, v0 ; RV32ZVE32F-NEXT: andi a1, a0, 1 ; RV32ZVE32F-NEXT: bnez a1, .LBB87_9 @@ -9950,55 +9950,55 @@ ; RV32ZVE32F-NEXT: .LBB87_8: # %else14 ; RV32ZVE32F-NEXT: ret ; RV32ZVE32F-NEXT: .LBB87_9: # %cond.store -; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a1, v8 ; RV32ZVE32F-NEXT: fsd fa0, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 2 ; RV32ZVE32F-NEXT: beqz a1, .LBB87_2 ; RV32ZVE32F-NEXT: .LBB87_10: # %cond.store1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa1, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 4 ; RV32ZVE32F-NEXT: beqz a1, .LBB87_3 ; RV32ZVE32F-NEXT: .LBB87_11: # %cond.store3 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa2, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 8 ; RV32ZVE32F-NEXT: beqz a1, .LBB87_4 ; RV32ZVE32F-NEXT: .LBB87_12: # %cond.store5 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa3, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 16 ; RV32ZVE32F-NEXT: beqz a1, .LBB87_5 ; RV32ZVE32F-NEXT: .LBB87_13: # %cond.store7 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa4, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 32 ; RV32ZVE32F-NEXT: beqz a1, .LBB87_6 ; RV32ZVE32F-NEXT: .LBB87_14: # %cond.store9 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa5, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 64 ; RV32ZVE32F-NEXT: beqz a1, .LBB87_7 ; RV32ZVE32F-NEXT: .LBB87_15: # %cond.store11 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa6, 0(a1) ; RV32ZVE32F-NEXT: andi a0, a0, -128 ; RV32ZVE32F-NEXT: beqz a0, .LBB87_8 ; RV32ZVE32F-NEXT: .LBB87_16: # %cond.store13 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV32ZVE32F-NEXT: vmv.x.s a0, v8 ; RV32ZVE32F-NEXT: fsd fa7, 0(a0) @@ -10006,12 +10006,12 @@ ; ; RV64ZVE32F-LABEL: mscatter_baseidx_v8i32_v8f64: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB87_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.store -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 3 ; RV64ZVE32F-NEXT: add a2, a0, a2 @@ -10020,16 +10020,16 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB87_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 3 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: fsd fa1, 0(a2) ; RV64ZVE32F-NEXT: .LBB87_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB87_12 @@ -10043,14 +10043,14 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB87_9 ; RV64ZVE32F-NEXT: .LBB87_8: # %cond.store9 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 3 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: fsd fa5, 0(a2) ; RV64ZVE32F-NEXT: .LBB87_9: # %else10 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB87_15 @@ -10067,7 +10067,7 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: beqz a2, .LBB87_6 ; RV64ZVE32F-NEXT: .LBB87_13: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 3 @@ -10076,7 +10076,7 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB87_7 ; RV64ZVE32F-NEXT: .LBB87_14: # %cond.store7 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 3 ; RV64ZVE32F-NEXT: add a2, a0, a2 @@ -10092,7 +10092,7 @@ ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB87_11 ; RV64ZVE32F-NEXT: .LBB87_16: # %cond.store13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v8 ; RV64ZVE32F-NEXT: slli a1, a1, 3 @@ -10107,15 +10107,15 @@ define void @mscatter_baseidx_sext_v8i32_v8f64(<8 x double> %val, double* %base, <8 x i32> %idxs, <8 x i1> %m) { ; RV32V-LABEL: mscatter_baseidx_sext_v8i32_v8f64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32V-NEXT: vsll.vi v12, v12, 3 -; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; RV32V-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_sext_v8i32_v8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf2 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t @@ -10123,10 +10123,10 @@ ; ; RV32ZVE32F-LABEL: mscatter_baseidx_sext_v8i32_v8f64: ; RV32ZVE32F: # %bb.0: -; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3 ; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0 -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a0, v0 ; RV32ZVE32F-NEXT: andi a1, a0, 1 ; RV32ZVE32F-NEXT: bnez a1, .LBB88_9 @@ -10154,55 +10154,55 @@ ; RV32ZVE32F-NEXT: .LBB88_8: # %else14 ; RV32ZVE32F-NEXT: ret ; RV32ZVE32F-NEXT: .LBB88_9: # %cond.store -; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a1, v8 ; RV32ZVE32F-NEXT: fsd fa0, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 2 ; RV32ZVE32F-NEXT: beqz a1, .LBB88_2 ; RV32ZVE32F-NEXT: .LBB88_10: # %cond.store1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa1, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 4 ; RV32ZVE32F-NEXT: beqz a1, .LBB88_3 ; RV32ZVE32F-NEXT: .LBB88_11: # %cond.store3 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa2, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 8 ; RV32ZVE32F-NEXT: beqz a1, .LBB88_4 ; RV32ZVE32F-NEXT: .LBB88_12: # %cond.store5 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa3, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 16 ; RV32ZVE32F-NEXT: beqz a1, .LBB88_5 ; RV32ZVE32F-NEXT: .LBB88_13: # %cond.store7 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa4, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 32 ; RV32ZVE32F-NEXT: beqz a1, .LBB88_6 ; RV32ZVE32F-NEXT: .LBB88_14: # %cond.store9 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa5, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 64 ; RV32ZVE32F-NEXT: beqz a1, .LBB88_7 ; RV32ZVE32F-NEXT: .LBB88_15: # %cond.store11 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa6, 0(a1) ; RV32ZVE32F-NEXT: andi a0, a0, -128 ; RV32ZVE32F-NEXT: beqz a0, .LBB88_8 ; RV32ZVE32F-NEXT: .LBB88_16: # %cond.store13 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV32ZVE32F-NEXT: vmv.x.s a0, v8 ; RV32ZVE32F-NEXT: fsd fa7, 0(a0) @@ -10210,12 +10210,12 @@ ; ; RV64ZVE32F-LABEL: mscatter_baseidx_sext_v8i32_v8f64: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB88_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.store -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 3 ; RV64ZVE32F-NEXT: add a2, a0, a2 @@ -10224,16 +10224,16 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB88_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 3 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: fsd fa1, 0(a2) ; RV64ZVE32F-NEXT: .LBB88_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB88_12 @@ -10247,14 +10247,14 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB88_9 ; RV64ZVE32F-NEXT: .LBB88_8: # %cond.store9 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 3 ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: fsd fa5, 0(a2) ; RV64ZVE32F-NEXT: .LBB88_9: # %else10 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB88_15 @@ -10271,7 +10271,7 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: beqz a2, .LBB88_6 ; RV64ZVE32F-NEXT: .LBB88_13: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 3 @@ -10280,7 +10280,7 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB88_7 ; RV64ZVE32F-NEXT: .LBB88_14: # %cond.store7 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 3 ; RV64ZVE32F-NEXT: add a2, a0, a2 @@ -10296,7 +10296,7 @@ ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB88_11 ; RV64ZVE32F-NEXT: .LBB88_16: # %cond.store13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v8 ; RV64ZVE32F-NEXT: slli a1, a1, 3 @@ -10312,15 +10312,15 @@ define void @mscatter_baseidx_zext_v8i32_v8f64(<8 x double> %val, double* %base, <8 x i32> %idxs, <8 x i1> %m) { ; RV32V-LABEL: mscatter_baseidx_zext_v8i32_v8f64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32V-NEXT: vsll.vi v12, v12, 3 -; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; RV32V-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_zext_v8i32_v8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vzext.vf2 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t @@ -10328,10 +10328,10 @@ ; ; RV32ZVE32F-LABEL: mscatter_baseidx_zext_v8i32_v8f64: ; RV32ZVE32F: # %bb.0: -; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3 ; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0 -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a0, v0 ; RV32ZVE32F-NEXT: andi a1, a0, 1 ; RV32ZVE32F-NEXT: bnez a1, .LBB89_9 @@ -10359,55 +10359,55 @@ ; RV32ZVE32F-NEXT: .LBB89_8: # %else14 ; RV32ZVE32F-NEXT: ret ; RV32ZVE32F-NEXT: .LBB89_9: # %cond.store -; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a1, v8 ; RV32ZVE32F-NEXT: fsd fa0, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 2 ; RV32ZVE32F-NEXT: beqz a1, .LBB89_2 ; RV32ZVE32F-NEXT: .LBB89_10: # %cond.store1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa1, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 4 ; RV32ZVE32F-NEXT: beqz a1, .LBB89_3 ; RV32ZVE32F-NEXT: .LBB89_11: # %cond.store3 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa2, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 8 ; RV32ZVE32F-NEXT: beqz a1, .LBB89_4 ; RV32ZVE32F-NEXT: .LBB89_12: # %cond.store5 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa3, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 16 ; RV32ZVE32F-NEXT: beqz a1, .LBB89_5 ; RV32ZVE32F-NEXT: .LBB89_13: # %cond.store7 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa4, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 32 ; RV32ZVE32F-NEXT: beqz a1, .LBB89_6 ; RV32ZVE32F-NEXT: .LBB89_14: # %cond.store9 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa5, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 64 ; RV32ZVE32F-NEXT: beqz a1, .LBB89_7 ; RV32ZVE32F-NEXT: .LBB89_15: # %cond.store11 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa6, 0(a1) ; RV32ZVE32F-NEXT: andi a0, a0, -128 ; RV32ZVE32F-NEXT: beqz a0, .LBB89_8 ; RV32ZVE32F-NEXT: .LBB89_16: # %cond.store13 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV32ZVE32F-NEXT: vmv.x.s a0, v8 ; RV32ZVE32F-NEXT: fsd fa7, 0(a0) @@ -10415,12 +10415,12 @@ ; ; RV64ZVE32F-LABEL: mscatter_baseidx_zext_v8i32_v8f64: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB89_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.store -; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 32 ; RV64ZVE32F-NEXT: srli a2, a2, 29 @@ -10430,7 +10430,7 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB89_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 32 @@ -10438,9 +10438,9 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: fsd fa1, 0(a2) ; RV64ZVE32F-NEXT: .LBB89_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB89_12 @@ -10454,7 +10454,7 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB89_9 ; RV64ZVE32F-NEXT: .LBB89_8: # %cond.store9 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 32 @@ -10462,7 +10462,7 @@ ; RV64ZVE32F-NEXT: add a2, a0, a2 ; RV64ZVE32F-NEXT: fsd fa5, 0(a2) ; RV64ZVE32F-NEXT: .LBB89_9: # %else10 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB89_15 @@ -10480,7 +10480,7 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: beqz a2, .LBB89_6 ; RV64ZVE32F-NEXT: .LBB89_13: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v8 ; RV64ZVE32F-NEXT: slli a2, a2, 32 @@ -10490,7 +10490,7 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB89_7 ; RV64ZVE32F-NEXT: .LBB89_14: # %cond.store7 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: slli a2, a2, 32 ; RV64ZVE32F-NEXT: srli a2, a2, 29 @@ -10508,7 +10508,7 @@ ; RV64ZVE32F-NEXT: andi a1, a1, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB89_11 ; RV64ZVE32F-NEXT: .LBB89_16: # %cond.store13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v8 ; RV64ZVE32F-NEXT: slli a1, a1, 32 @@ -10525,16 +10525,16 @@ define void @mscatter_baseidx_v8f64(<8 x double> %val, double* %base, <8 x i64> %idxs, <8 x i1> %m) { ; RV32V-LABEL: mscatter_baseidx_v8f64: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32V-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32V-NEXT: vnsrl.wi v16, v12, 0 ; RV32V-NEXT: vsll.vi v12, v16, 3 -; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32V-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; RV32V-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32V-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_v8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsll.vi v12, v12, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret @@ -10567,11 +10567,11 @@ ; RV32ZVE32F-NEXT: sw a3, 4(sp) ; RV32ZVE32F-NEXT: sw a2, 0(sp) ; RV32ZVE32F-NEXT: mv a1, sp -; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vle32.v v8, (a1) ; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3 ; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0 -; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a0, v0 ; RV32ZVE32F-NEXT: andi a1, a0, 1 ; RV32ZVE32F-NEXT: bnez a1, .LBB90_10 @@ -10597,7 +10597,7 @@ ; RV32ZVE32F-NEXT: andi a0, a0, -128 ; RV32ZVE32F-NEXT: beqz a0, .LBB90_9 ; RV32ZVE32F-NEXT: .LBB90_8: # %cond.store13 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7 ; RV32ZVE32F-NEXT: vmv.x.s a0, v8 ; RV32ZVE32F-NEXT: fsd fa7, 0(a0) @@ -10608,48 +10608,48 @@ ; RV32ZVE32F-NEXT: addi sp, sp, 64 ; RV32ZVE32F-NEXT: ret ; RV32ZVE32F-NEXT: .LBB90_10: # %cond.store -; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a1, v8 ; RV32ZVE32F-NEXT: fsd fa0, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 2 ; RV32ZVE32F-NEXT: beqz a1, .LBB90_2 ; RV32ZVE32F-NEXT: .LBB90_11: # %cond.store1 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa1, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 4 ; RV32ZVE32F-NEXT: beqz a1, .LBB90_3 ; RV32ZVE32F-NEXT: .LBB90_12: # %cond.store3 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa2, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 8 ; RV32ZVE32F-NEXT: beqz a1, .LBB90_4 ; RV32ZVE32F-NEXT: .LBB90_13: # %cond.store5 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa3, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 16 ; RV32ZVE32F-NEXT: beqz a1, .LBB90_5 ; RV32ZVE32F-NEXT: .LBB90_14: # %cond.store7 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa4, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 32 ; RV32ZVE32F-NEXT: beqz a1, .LBB90_6 ; RV32ZVE32F-NEXT: .LBB90_15: # %cond.store9 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa5, 0(a1) ; RV32ZVE32F-NEXT: andi a1, a0, 64 ; RV32ZVE32F-NEXT: beqz a1, .LBB90_7 ; RV32ZVE32F-NEXT: .LBB90_16: # %cond.store11 -; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6 ; RV32ZVE32F-NEXT: vmv.x.s a1, v10 ; RV32ZVE32F-NEXT: fsd fa6, 0(a1) @@ -10666,7 +10666,7 @@ ; RV64ZVE32F-NEXT: ld a5, 40(a1) ; RV64ZVE32F-NEXT: ld a4, 48(a1) ; RV64ZVE32F-NEXT: ld a2, 56(a1) -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a3, v0 ; RV64ZVE32F-NEXT: andi t2, a3, 1 ; RV64ZVE32F-NEXT: bnez t2, .LBB90_9 @@ -10751,56 +10751,56 @@ define void @mscatter_baseidx_v16i8(<16 x i8> %val, i8* %base, <16 x i8> %idxs, <16 x i1> %m) { ; RV32-LABEL: mscatter_baseidx_v16i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v9 -; RV32-NEXT: vsetvli zero, zero, e8, m1, ta, mu +; RV32-NEXT: vsetvli zero, zero, e8, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_v16i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v9 -; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu +; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret ; ; RV64ZVE32F-LABEL: mscatter_baseidx_v16i8: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB91_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.store -; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vse8.v v8, (a2) ; RV64ZVE32F-NEXT: .LBB91_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB91_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1 ; RV64ZVE32F-NEXT: vse8.v v10, (a2) ; RV64ZVE32F-NEXT: .LBB91_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v11, v9, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB91_6 ; RV64ZVE32F-NEXT: # %bb.5: # %cond.store3 ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 2 ; RV64ZVE32F-NEXT: vse8.v v10, (a2) ; RV64ZVE32F-NEXT: .LBB91_6: # %else4 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 4 ; RV64ZVE32F-NEXT: bnez a2, .LBB91_26 @@ -10811,17 +10811,17 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB91_10 ; RV64ZVE32F-NEXT: .LBB91_9: # %cond.store9 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v8, 5 ; RV64ZVE32F-NEXT: vse8.v v11, (a2) ; RV64ZVE32F-NEXT: .LBB91_10: # %else10 -; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 8 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB91_28 @@ -10835,26 +10835,26 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 512 ; RV64ZVE32F-NEXT: beqz a2, .LBB91_15 ; RV64ZVE32F-NEXT: .LBB91_14: # %cond.store17 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 9 ; RV64ZVE32F-NEXT: vse8.v v10, (a2) ; RV64ZVE32F-NEXT: .LBB91_15: # %else18 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 1024 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB91_17 ; RV64ZVE32F-NEXT: # %bb.16: # %cond.store19 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v8, 10 ; RV64ZVE32F-NEXT: vse8.v v11, (a2) ; RV64ZVE32F-NEXT: .LBB91_17: # %else20 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: slli a2, a1, 52 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 4 ; RV64ZVE32F-NEXT: bltz a2, .LBB91_31 @@ -10865,22 +10865,22 @@ ; RV64ZVE32F-NEXT: slli a2, a1, 50 ; RV64ZVE32F-NEXT: bgez a2, .LBB91_21 ; RV64ZVE32F-NEXT: .LBB91_20: # %cond.store25 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 13 ; RV64ZVE32F-NEXT: vse8.v v10, (a2) ; RV64ZVE32F-NEXT: .LBB91_21: # %else26 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: slli a2, a1, 49 ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 2 ; RV64ZVE32F-NEXT: bgez a2, .LBB91_23 ; RV64ZVE32F-NEXT: # %bb.22: # %cond.store27 ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 14 ; RV64ZVE32F-NEXT: vse8.v v10, (a2) ; RV64ZVE32F-NEXT: .LBB91_23: # %else28 @@ -10888,30 +10888,30 @@ ; RV64ZVE32F-NEXT: and a1, a1, a2 ; RV64ZVE32F-NEXT: beqz a1, .LBB91_25 ; RV64ZVE32F-NEXT: # %bb.24: # %cond.store29 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v9 ; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 15 ; RV64ZVE32F-NEXT: vse8.v v8, (a0) ; RV64ZVE32F-NEXT: .LBB91_25: # %else30 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB91_26: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v11, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v8, 3 ; RV64ZVE32F-NEXT: vse8.v v11, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB91_8 ; RV64ZVE32F-NEXT: .LBB91_27: # %cond.store7 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v8, 4 ; RV64ZVE32F-NEXT: vse8.v v11, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 32 @@ -10920,46 +10920,46 @@ ; RV64ZVE32F-NEXT: .LBB91_28: # %cond.store11 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v8, 6 ; RV64ZVE32F-NEXT: vse8.v v11, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 128 ; RV64ZVE32F-NEXT: beqz a2, .LBB91_12 ; RV64ZVE32F-NEXT: .LBB91_29: # %cond.store13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 7 ; RV64ZVE32F-NEXT: vse8.v v10, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 256 ; RV64ZVE32F-NEXT: beqz a2, .LBB91_13 ; RV64ZVE32F-NEXT: .LBB91_30: # %cond.store15 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 8 ; RV64ZVE32F-NEXT: vse8.v v10, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 512 ; RV64ZVE32F-NEXT: bnez a2, .LBB91_14 ; RV64ZVE32F-NEXT: j .LBB91_15 ; RV64ZVE32F-NEXT: .LBB91_31: # %cond.store21 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 11 ; RV64ZVE32F-NEXT: vse8.v v10, (a2) ; RV64ZVE32F-NEXT: slli a2, a1, 51 ; RV64ZVE32F-NEXT: bgez a2, .LBB91_19 ; RV64ZVE32F-NEXT: .LBB91_32: # %cond.store23 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v9 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 12 ; RV64ZVE32F-NEXT: vse8.v v10, (a2) ; RV64ZVE32F-NEXT: slli a2, a1, 50 @@ -10976,65 +10976,65 @@ ; RV32-LABEL: mscatter_baseidx_v32i8: ; RV32: # %bb.0: ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; RV32-NEXT: vsext.vf4 v16, v10 -; RV32-NEXT: vsetvli zero, zero, e8, m2, ta, mu +; RV32-NEXT: vsetvli zero, zero, e8, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_v32i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v10 -; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu +; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; RV64-NEXT: vsetivli zero, 16, e8, m2, ta, mu +; RV64-NEXT: vsetivli zero, 16, e8, m2, ta, ma ; RV64-NEXT: vslidedown.vi v8, v8, 16 ; RV64-NEXT: vslidedown.vi v10, v10, 16 -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v10 -; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64-NEXT: vslidedown.vi v0, v0, 2 -; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret ; ; RV64ZVE32F-LABEL: mscatter_baseidx_v32i8: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a1, v0 ; RV64ZVE32F-NEXT: andi a2, a1, 1 ; RV64ZVE32F-NEXT: beqz a2, .LBB92_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.store -; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vse8.v v8, (a2) ; RV64ZVE32F-NEXT: .LBB92_2: # %else ; RV64ZVE32F-NEXT: andi a2, a1, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB92_4 ; RV64ZVE32F-NEXT: # %bb.3: # %cond.store1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v12 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 1 ; RV64ZVE32F-NEXT: vse8.v v12, (a2) ; RV64ZVE32F-NEXT: .LBB92_4: # %else2 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 4 ; RV64ZVE32F-NEXT: vslidedown.vi v12, v10, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB92_6 ; RV64ZVE32F-NEXT: # %bb.5: # %cond.store3 ; RV64ZVE32F-NEXT: vmv.x.s a2, v12 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v14, v8, 2 ; RV64ZVE32F-NEXT: vse8.v v14, (a2) ; RV64ZVE32F-NEXT: .LBB92_6: # %else4 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 8 ; RV64ZVE32F-NEXT: vslidedown.vi v13, v10, 4 ; RV64ZVE32F-NEXT: bnez a2, .LBB92_50 @@ -11045,17 +11045,17 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 32 ; RV64ZVE32F-NEXT: beqz a2, .LBB92_10 ; RV64ZVE32F-NEXT: .LBB92_9: # %cond.store9 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v13, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v12 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v14, v8, 5 ; RV64ZVE32F-NEXT: vse8.v v14, (a2) ; RV64ZVE32F-NEXT: .LBB92_10: # %else10 -; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v10, 8 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 64 ; RV64ZVE32F-NEXT: vslidedown.vi v13, v13, 2 ; RV64ZVE32F-NEXT: bnez a2, .LBB92_52 @@ -11069,61 +11069,61 @@ ; RV64ZVE32F-NEXT: andi a2, a1, 512 ; RV64ZVE32F-NEXT: beqz a2, .LBB92_15 ; RV64ZVE32F-NEXT: .LBB92_14: # %cond.store17 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v13, v12, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v13 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v14, v8, 9 ; RV64ZVE32F-NEXT: vse8.v v14, (a2) ; RV64ZVE32F-NEXT: .LBB92_15: # %else18 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: andi a2, a1, 1024 ; RV64ZVE32F-NEXT: vslidedown.vi v13, v12, 2 ; RV64ZVE32F-NEXT: beqz a2, .LBB92_17 ; RV64ZVE32F-NEXT: # %bb.16: # %cond.store19 ; RV64ZVE32F-NEXT: vmv.x.s a2, v13 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v14, v8, 10 ; RV64ZVE32F-NEXT: vse8.v v14, (a2) ; RV64ZVE32F-NEXT: .LBB92_17: # %else20 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: slli a2, a1, 52 ; RV64ZVE32F-NEXT: vslidedown.vi v12, v12, 4 ; RV64ZVE32F-NEXT: bgez a2, .LBB92_19 ; RV64ZVE32F-NEXT: # %bb.18: # %cond.store21 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v13, v13, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v13 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v14, v8, 11 ; RV64ZVE32F-NEXT: vse8.v v14, (a2) ; RV64ZVE32F-NEXT: .LBB92_19: # %else22 -; RV64ZVE32F-NEXT: vsetivli zero, 16, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 16, e8, m2, ta, ma ; RV64ZVE32F-NEXT: slli a2, a1, 51 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 16 ; RV64ZVE32F-NEXT: bgez a2, .LBB92_21 ; RV64ZVE32F-NEXT: # %bb.20: # %cond.store23 ; RV64ZVE32F-NEXT: vmv.x.s a2, v12 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v14, v8, 12 ; RV64ZVE32F-NEXT: vse8.v v14, (a2) ; RV64ZVE32F-NEXT: .LBB92_21: # %else24 ; RV64ZVE32F-NEXT: slli a2, a1, 50 ; RV64ZVE32F-NEXT: bgez a2, .LBB92_23 ; RV64ZVE32F-NEXT: # %bb.22: # %cond.store25 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v13, v12, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v13 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v14, v8, 13 ; RV64ZVE32F-NEXT: vse8.v v14, (a2) ; RV64ZVE32F-NEXT: .LBB92_23: # %else26 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: slli a2, a1, 49 ; RV64ZVE32F-NEXT: vslidedown.vi v12, v12, 2 ; RV64ZVE32F-NEXT: bltz a2, .LBB92_55 @@ -11137,26 +11137,26 @@ ; RV64ZVE32F-NEXT: slli a2, a1, 46 ; RV64ZVE32F-NEXT: bgez a2, .LBB92_28 ; RV64ZVE32F-NEXT: .LBB92_27: # %cond.store33 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v12 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 17 ; RV64ZVE32F-NEXT: vse8.v v12, (a2) ; RV64ZVE32F-NEXT: .LBB92_28: # %else34 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: slli a2, a1, 45 ; RV64ZVE32F-NEXT: vslidedown.vi v13, v10, 2 ; RV64ZVE32F-NEXT: bgez a2, .LBB92_30 ; RV64ZVE32F-NEXT: # %bb.29: # %cond.store35 ; RV64ZVE32F-NEXT: vmv.x.s a2, v13 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v14, v8, 18 ; RV64ZVE32F-NEXT: vse8.v v14, (a2) ; RV64ZVE32F-NEXT: .LBB92_30: # %else36 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: slli a2, a1, 44 ; RV64ZVE32F-NEXT: vslidedown.vi v12, v10, 4 ; RV64ZVE32F-NEXT: bltz a2, .LBB92_58 @@ -11167,17 +11167,17 @@ ; RV64ZVE32F-NEXT: slli a2, a1, 42 ; RV64ZVE32F-NEXT: bgez a2, .LBB92_34 ; RV64ZVE32F-NEXT: .LBB92_33: # %cond.store41 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v13, v12, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v13 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v14, v8, 21 ; RV64ZVE32F-NEXT: vse8.v v14, (a2) ; RV64ZVE32F-NEXT: .LBB92_34: # %else42 -; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m1, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m1, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 8 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: slli a2, a1, 41 ; RV64ZVE32F-NEXT: vslidedown.vi v11, v12, 2 ; RV64ZVE32F-NEXT: bltz a2, .LBB92_60 @@ -11191,26 +11191,26 @@ ; RV64ZVE32F-NEXT: slli a2, a1, 38 ; RV64ZVE32F-NEXT: bgez a2, .LBB92_39 ; RV64ZVE32F-NEXT: .LBB92_38: # %cond.store49 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 25 ; RV64ZVE32F-NEXT: vse8.v v12, (a2) ; RV64ZVE32F-NEXT: .LBB92_39: # %else50 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: slli a2, a1, 37 ; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 2 ; RV64ZVE32F-NEXT: bgez a2, .LBB92_41 ; RV64ZVE32F-NEXT: # %bb.40: # %cond.store51 ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 26 ; RV64ZVE32F-NEXT: vse8.v v12, (a2) ; RV64ZVE32F-NEXT: .LBB92_41: # %else52 -; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; RV64ZVE32F-NEXT: slli a2, a1, 36 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 4 ; RV64ZVE32F-NEXT: bltz a2, .LBB92_63 @@ -11221,22 +11221,22 @@ ; RV64ZVE32F-NEXT: slli a2, a1, 34 ; RV64ZVE32F-NEXT: bgez a2, .LBB92_45 ; RV64ZVE32F-NEXT: .LBB92_44: # %cond.store57 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 29 ; RV64ZVE32F-NEXT: vse8.v v12, (a2) ; RV64ZVE32F-NEXT: .LBB92_45: # %else58 -; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: slli a2, a1, 33 ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2 ; RV64ZVE32F-NEXT: bgez a2, .LBB92_47 ; RV64ZVE32F-NEXT: # %bb.46: # %cond.store59 ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 30 ; RV64ZVE32F-NEXT: vse8.v v12, (a2) ; RV64ZVE32F-NEXT: .LBB92_47: # %else60 @@ -11244,30 +11244,30 @@ ; RV64ZVE32F-NEXT: and a1, a1, a2 ; RV64ZVE32F-NEXT: beqz a1, .LBB92_49 ; RV64ZVE32F-NEXT: # %bb.48: # %cond.store61 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1 ; RV64ZVE32F-NEXT: vmv.x.s a1, v10 ; RV64ZVE32F-NEXT: add a0, a0, a1 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 31 ; RV64ZVE32F-NEXT: vse8.v v8, (a0) ; RV64ZVE32F-NEXT: .LBB92_49: # %else62 ; RV64ZVE32F-NEXT: ret ; RV64ZVE32F-NEXT: .LBB92_50: # %cond.store5 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v12, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v12 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v14, v8, 3 ; RV64ZVE32F-NEXT: vse8.v v14, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 16 ; RV64ZVE32F-NEXT: beqz a2, .LBB92_8 ; RV64ZVE32F-NEXT: .LBB92_51: # %cond.store7 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v13 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v14, v8, 4 ; RV64ZVE32F-NEXT: vse8.v v14, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 32 @@ -11276,26 +11276,26 @@ ; RV64ZVE32F-NEXT: .LBB92_52: # %cond.store11 ; RV64ZVE32F-NEXT: vmv.x.s a2, v13 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v14, v8, 6 ; RV64ZVE32F-NEXT: vse8.v v14, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 128 ; RV64ZVE32F-NEXT: beqz a2, .LBB92_12 ; RV64ZVE32F-NEXT: .LBB92_53: # %cond.store13 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v13, v13, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v13 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v14, v8, 7 ; RV64ZVE32F-NEXT: vse8.v v14, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 256 ; RV64ZVE32F-NEXT: beqz a2, .LBB92_13 ; RV64ZVE32F-NEXT: .LBB92_54: # %cond.store15 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v12 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v14, v8, 8 ; RV64ZVE32F-NEXT: vse8.v v14, (a2) ; RV64ZVE32F-NEXT: andi a2, a1, 512 @@ -11304,46 +11304,46 @@ ; RV64ZVE32F-NEXT: .LBB92_55: # %cond.store27 ; RV64ZVE32F-NEXT: vmv.x.s a2, v12 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v14, v8, 14 ; RV64ZVE32F-NEXT: vse8.v v14, (a2) ; RV64ZVE32F-NEXT: slli a2, a1, 48 ; RV64ZVE32F-NEXT: bgez a2, .LBB92_25 ; RV64ZVE32F-NEXT: .LBB92_56: # %cond.store29 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v12, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v12 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 15 ; RV64ZVE32F-NEXT: vse8.v v12, (a2) ; RV64ZVE32F-NEXT: slli a2, a1, 47 ; RV64ZVE32F-NEXT: bgez a2, .LBB92_26 ; RV64ZVE32F-NEXT: .LBB92_57: # %cond.store31 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 16 ; RV64ZVE32F-NEXT: vse8.v v12, (a2) ; RV64ZVE32F-NEXT: slli a2, a1, 46 ; RV64ZVE32F-NEXT: bltz a2, .LBB92_27 ; RV64ZVE32F-NEXT: j .LBB92_28 ; RV64ZVE32F-NEXT: .LBB92_58: # %cond.store37 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v13, v13, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v13 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v14, v8, 19 ; RV64ZVE32F-NEXT: vse8.v v14, (a2) ; RV64ZVE32F-NEXT: slli a2, a1, 43 ; RV64ZVE32F-NEXT: bgez a2, .LBB92_32 ; RV64ZVE32F-NEXT: .LBB92_59: # %cond.store39 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v12 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v14, v8, 20 ; RV64ZVE32F-NEXT: vse8.v v14, (a2) ; RV64ZVE32F-NEXT: slli a2, a1, 42 @@ -11352,46 +11352,46 @@ ; RV64ZVE32F-NEXT: .LBB92_60: # %cond.store43 ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 22 ; RV64ZVE32F-NEXT: vse8.v v12, (a2) ; RV64ZVE32F-NEXT: slli a2, a1, 40 ; RV64ZVE32F-NEXT: bgez a2, .LBB92_36 ; RV64ZVE32F-NEXT: .LBB92_61: # %cond.store45 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v11, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 23 ; RV64ZVE32F-NEXT: vse8.v v12, (a2) ; RV64ZVE32F-NEXT: slli a2, a1, 39 ; RV64ZVE32F-NEXT: bgez a2, .LBB92_37 ; RV64ZVE32F-NEXT: .LBB92_62: # %cond.store47 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 24 ; RV64ZVE32F-NEXT: vse8.v v12, (a2) ; RV64ZVE32F-NEXT: slli a2, a1, 38 ; RV64ZVE32F-NEXT: bltz a2, .LBB92_38 ; RV64ZVE32F-NEXT: j .LBB92_39 ; RV64ZVE32F-NEXT: .LBB92_63: # %cond.store53 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v11, v11, 1 ; RV64ZVE32F-NEXT: vmv.x.s a2, v11 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 27 ; RV64ZVE32F-NEXT: vse8.v v12, (a2) ; RV64ZVE32F-NEXT: slli a2, a1, 35 ; RV64ZVE32F-NEXT: bgez a2, .LBB92_43 ; RV64ZVE32F-NEXT: .LBB92_64: # %cond.store55 -; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a2, v10 ; RV64ZVE32F-NEXT: add a2, a0, a2 -; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, mu +; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m2, ta, ma ; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 28 ; RV64ZVE32F-NEXT: vse8.v v12, (a2) ; RV64ZVE32F-NEXT: slli a2, a1, 34 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-fp.ll @@ -5,7 +5,7 @@ define void @masked_store_v1f16(<1 x half>* %val_ptr, <1 x half>* %a, <1 x half>* %m_ptr) nounwind { ; CHECK-LABEL: masked_store_v1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a2) ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: fmv.h.x ft0, zero @@ -23,7 +23,7 @@ define void @masked_store_v1f32(<1 x float>* %val_ptr, <1 x float>* %a, <1 x float>* %m_ptr) nounwind { ; CHECK-LABEL: masked_store_v1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a2) ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: fmv.w.x ft0, zero @@ -41,7 +41,7 @@ define void @masked_store_v1f64(<1 x double>* %val_ptr, <1 x double>* %a, <1 x double>* %m_ptr) nounwind { ; RV32-LABEL: masked_store_v1f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vle64.v v8, (a2) ; RV32-NEXT: vle64.v v9, (a0) ; RV32-NEXT: fcvt.d.w ft0, zero @@ -51,7 +51,7 @@ ; ; RV64-LABEL: masked_store_v1f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vle64.v v8, (a2) ; RV64-NEXT: vle64.v v9, (a0) ; RV64-NEXT: fmv.d.x ft0, zero @@ -69,7 +69,7 @@ define void @masked_store_v2f16(<2 x half>* %val_ptr, <2 x half>* %a, <2 x half>* %m_ptr) nounwind { ; CHECK-LABEL: masked_store_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a2) ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: fmv.h.x ft0, zero @@ -87,7 +87,7 @@ define void @masked_store_v2f32(<2 x float>* %val_ptr, <2 x float>* %a, <2 x float>* %m_ptr) nounwind { ; CHECK-LABEL: masked_store_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a2) ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: fmv.w.x ft0, zero @@ -105,7 +105,7 @@ define void @masked_store_v2f64(<2 x double>* %val_ptr, <2 x double>* %a, <2 x double>* %m_ptr) nounwind { ; RV32-LABEL: masked_store_v2f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vle64.v v8, (a2) ; RV32-NEXT: vle64.v v9, (a0) ; RV32-NEXT: fcvt.d.w ft0, zero @@ -115,7 +115,7 @@ ; ; RV64-LABEL: masked_store_v2f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vle64.v v8, (a2) ; RV64-NEXT: vle64.v v9, (a0) ; RV64-NEXT: fmv.d.x ft0, zero @@ -133,7 +133,7 @@ define void @masked_store_v4f16(<4 x half>* %val_ptr, <4 x half>* %a, <4 x half>* %m_ptr) nounwind { ; CHECK-LABEL: masked_store_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v8, (a2) ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: fmv.h.x ft0, zero @@ -151,7 +151,7 @@ define void @masked_store_v4f32(<4 x float>* %val_ptr, <4 x float>* %a, <4 x float>* %m_ptr) nounwind { ; CHECK-LABEL: masked_store_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a2) ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: fmv.w.x ft0, zero @@ -169,7 +169,7 @@ define void @masked_store_v4f64(<4 x double>* %val_ptr, <4 x double>* %a, <4 x double>* %m_ptr) nounwind { ; RV32-LABEL: masked_store_v4f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vle64.v v8, (a2) ; RV32-NEXT: vle64.v v10, (a0) ; RV32-NEXT: fcvt.d.w ft0, zero @@ -179,7 +179,7 @@ ; ; RV64-LABEL: masked_store_v4f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-NEXT: vle64.v v8, (a2) ; RV64-NEXT: vle64.v v10, (a0) ; RV64-NEXT: fmv.d.x ft0, zero @@ -197,7 +197,7 @@ define void @masked_store_v8f16(<8 x half>* %val_ptr, <8 x half>* %a, <8 x half>* %m_ptr) nounwind { ; CHECK-LABEL: masked_store_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a2) ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: fmv.h.x ft0, zero @@ -215,7 +215,7 @@ define void @masked_store_v8f32(<8 x float>* %val_ptr, <8 x float>* %a, <8 x float>* %m_ptr) nounwind { ; CHECK-LABEL: masked_store_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a2) ; CHECK-NEXT: vle32.v v10, (a0) ; CHECK-NEXT: fmv.w.x ft0, zero @@ -233,7 +233,7 @@ define void @masked_store_v8f64(<8 x double>* %val_ptr, <8 x double>* %a, <8 x double>* %m_ptr) nounwind { ; RV32-LABEL: masked_store_v8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vle64.v v8, (a2) ; RV32-NEXT: vle64.v v12, (a0) ; RV32-NEXT: fcvt.d.w ft0, zero @@ -243,7 +243,7 @@ ; ; RV64-LABEL: masked_store_v8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vle64.v v8, (a2) ; RV64-NEXT: vle64.v v12, (a0) ; RV64-NEXT: fmv.d.x ft0, zero @@ -261,7 +261,7 @@ define void @masked_store_v16f16(<16 x half>* %val_ptr, <16 x half>* %a, <16 x half>* %m_ptr) nounwind { ; CHECK-LABEL: masked_store_v16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v8, (a2) ; CHECK-NEXT: vle16.v v10, (a0) ; CHECK-NEXT: fmv.h.x ft0, zero @@ -279,7 +279,7 @@ define void @masked_store_v16f32(<16 x float>* %val_ptr, <16 x float>* %a, <16 x float>* %m_ptr) nounwind { ; CHECK-LABEL: masked_store_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v8, (a2) ; CHECK-NEXT: vle32.v v12, (a0) ; CHECK-NEXT: fmv.w.x ft0, zero @@ -297,7 +297,7 @@ define void @masked_store_v16f64(<16 x double>* %val_ptr, <16 x double>* %a, <16 x double>* %m_ptr) nounwind { ; RV32-LABEL: masked_store_v16f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vle64.v v8, (a2) ; RV32-NEXT: vle64.v v16, (a0) ; RV32-NEXT: fcvt.d.w ft0, zero @@ -307,7 +307,7 @@ ; ; RV64-LABEL: masked_store_v16f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vle64.v v8, (a2) ; RV64-NEXT: vle64.v v16, (a0) ; RV64-NEXT: fmv.d.x ft0, zero @@ -326,7 +326,7 @@ ; CHECK-LABEL: masked_store_v32f16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a3, 32 -; CHECK-NEXT: vsetvli zero, a3, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v8, (a2) ; CHECK-NEXT: vle16.v v12, (a0) ; CHECK-NEXT: fmv.h.x ft0, zero @@ -345,7 +345,7 @@ ; CHECK-LABEL: masked_store_v32f32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a3, 32 -; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a2) ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: fmv.w.x ft0, zero @@ -367,7 +367,7 @@ ; RV32-NEXT: csrr a3, vlenb ; RV32-NEXT: slli a3, a3, 4 ; RV32-NEXT: sub sp, sp, a3 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vle64.v v8, (a2) ; RV32-NEXT: addi a2, a2, 128 ; RV32-NEXT: vle64.v v16, (a2) @@ -407,7 +407,7 @@ ; RV64-NEXT: csrr a3, vlenb ; RV64-NEXT: slli a3, a3, 4 ; RV64-NEXT: sub sp, sp, a3 -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vle64.v v8, (a2) ; RV64-NEXT: addi a2, a2, 128 ; RV64-NEXT: vle64.v v16, (a2) @@ -452,7 +452,7 @@ ; CHECK-LABEL: masked_store_v64f16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a3, 64 -; CHECK-NEXT: vsetvli zero, a3, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v8, (a2) ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: fmv.h.x ft0, zero @@ -475,7 +475,7 @@ ; CHECK-NEXT: slli a3, a3, 4 ; CHECK-NEXT: sub sp, sp, a3 ; CHECK-NEXT: li a3, 32 -; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a2) ; CHECK-NEXT: addi a2, a2, 128 ; CHECK-NEXT: vle32.v v16, (a2) @@ -524,7 +524,7 @@ ; CHECK-NEXT: slli a3, a3, 4 ; CHECK-NEXT: sub sp, sp, a3 ; CHECK-NEXT: li a3, 64 -; CHECK-NEXT: vsetvli zero, a3, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v8, (a2) ; CHECK-NEXT: addi a2, a2, 128 ; CHECK-NEXT: vle16.v v16, (a2) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-int.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-int.ll @@ -5,7 +5,7 @@ define void @masked_store_v1i8(<1 x i8>* %val_ptr, <1 x i8>* %a, <1 x i8>* %m_ptr) nounwind { ; CHECK-LABEL: masked_store_v1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v8, (a2) ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vmseq.vi v0, v8, 0 @@ -22,7 +22,7 @@ define void @masked_store_v1i16(<1 x i16>* %val_ptr, <1 x i16>* %a, <1 x i16>* %m_ptr) nounwind { ; CHECK-LABEL: masked_store_v1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a2) ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vmseq.vi v0, v8, 0 @@ -39,7 +39,7 @@ define void @masked_store_v1i32(<1 x i32>* %val_ptr, <1 x i32>* %a, <1 x i32>* %m_ptr) nounwind { ; CHECK-LABEL: masked_store_v1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a2) ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vmseq.vi v0, v8, 0 @@ -56,7 +56,7 @@ define void @masked_store_v1i64(<1 x i64>* %val_ptr, <1 x i64>* %a, <1 x i64>* %m_ptr) nounwind { ; CHECK-LABEL: masked_store_v1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a2) ; CHECK-NEXT: vle64.v v9, (a0) ; CHECK-NEXT: vmseq.vi v0, v8, 0 @@ -73,7 +73,7 @@ define void @masked_store_v2i8(<2 x i8>* %val_ptr, <2 x i8>* %a, <2 x i8>* %m_ptr) nounwind { ; CHECK-LABEL: masked_store_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v8, (a2) ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vmseq.vi v0, v8, 0 @@ -90,7 +90,7 @@ define void @masked_store_v2i16(<2 x i16>* %val_ptr, <2 x i16>* %a, <2 x i16>* %m_ptr) nounwind { ; CHECK-LABEL: masked_store_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a2) ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vmseq.vi v0, v8, 0 @@ -107,7 +107,7 @@ define void @masked_store_v2i32(<2 x i32>* %val_ptr, <2 x i32>* %a, <2 x i32>* %m_ptr) nounwind { ; CHECK-LABEL: masked_store_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a2) ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vmseq.vi v0, v8, 0 @@ -124,7 +124,7 @@ define void @masked_store_v2i64(<2 x i64>* %val_ptr, <2 x i64>* %a, <2 x i64>* %m_ptr) nounwind { ; CHECK-LABEL: masked_store_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a2) ; CHECK-NEXT: vle64.v v9, (a0) ; CHECK-NEXT: vmseq.vi v0, v8, 0 @@ -141,7 +141,7 @@ define void @masked_store_v4i8(<4 x i8>* %val_ptr, <4 x i8>* %a, <4 x i8>* %m_ptr) nounwind { ; CHECK-LABEL: masked_store_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v8, (a2) ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vmseq.vi v0, v8, 0 @@ -158,7 +158,7 @@ define void @masked_store_v4i16(<4 x i16>* %val_ptr, <4 x i16>* %a, <4 x i16>* %m_ptr) nounwind { ; CHECK-LABEL: masked_store_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v8, (a2) ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vmseq.vi v0, v8, 0 @@ -175,7 +175,7 @@ define void @masked_store_v4i32(<4 x i32>* %val_ptr, <4 x i32>* %a, <4 x i32>* %m_ptr) nounwind { ; CHECK-LABEL: masked_store_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a2) ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vmseq.vi v0, v8, 0 @@ -192,7 +192,7 @@ define void @masked_store_v4i64(<4 x i64>* %val_ptr, <4 x i64>* %a, <4 x i64>* %m_ptr) nounwind { ; CHECK-LABEL: masked_store_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vle64.v v8, (a2) ; CHECK-NEXT: vle64.v v10, (a0) ; CHECK-NEXT: vmseq.vi v0, v8, 0 @@ -209,7 +209,7 @@ define void @masked_store_v8i8(<8 x i8>* %val_ptr, <8 x i8>* %a, <8 x i8>* %m_ptr) nounwind { ; CHECK-LABEL: masked_store_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v8, (a2) ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vmseq.vi v0, v8, 0 @@ -226,7 +226,7 @@ define void @masked_store_v8i16(<8 x i16>* %val_ptr, <8 x i16>* %a, <8 x i16>* %m_ptr) nounwind { ; CHECK-LABEL: masked_store_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a2) ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vmseq.vi v0, v8, 0 @@ -243,7 +243,7 @@ define void @masked_store_v8i32(<8 x i32>* %val_ptr, <8 x i32>* %a, <8 x i32>* %m_ptr) nounwind { ; CHECK-LABEL: masked_store_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a2) ; CHECK-NEXT: vle32.v v10, (a0) ; CHECK-NEXT: vmseq.vi v0, v8, 0 @@ -260,7 +260,7 @@ define void @masked_store_v8i64(<8 x i64>* %val_ptr, <8 x i64>* %a, <8 x i64>* %m_ptr) nounwind { ; CHECK-LABEL: masked_store_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; CHECK-NEXT: vle64.v v8, (a2) ; CHECK-NEXT: vle64.v v12, (a0) ; CHECK-NEXT: vmseq.vi v0, v8, 0 @@ -277,7 +277,7 @@ define void @masked_store_v16i8(<16 x i8>* %val_ptr, <16 x i8>* %a, <16 x i8>* %m_ptr) nounwind { ; CHECK-LABEL: masked_store_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a2) ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vmseq.vi v0, v8, 0 @@ -294,7 +294,7 @@ define void @masked_store_v16i16(<16 x i16>* %val_ptr, <16 x i16>* %a, <16 x i16>* %m_ptr) nounwind { ; CHECK-LABEL: masked_store_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v8, (a2) ; CHECK-NEXT: vle16.v v10, (a0) ; CHECK-NEXT: vmseq.vi v0, v8, 0 @@ -311,7 +311,7 @@ define void @masked_store_v16i32(<16 x i32>* %val_ptr, <16 x i32>* %a, <16 x i32>* %m_ptr) nounwind { ; CHECK-LABEL: masked_store_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v8, (a2) ; CHECK-NEXT: vle32.v v12, (a0) ; CHECK-NEXT: vmseq.vi v0, v8, 0 @@ -328,7 +328,7 @@ define void @masked_store_v16i64(<16 x i64>* %val_ptr, <16 x i64>* %a, <16 x i64>* %m_ptr) nounwind { ; CHECK-LABEL: masked_store_v16i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v8, (a2) ; CHECK-NEXT: vle64.v v16, (a0) ; CHECK-NEXT: vmseq.vi v0, v8, 0 @@ -346,7 +346,7 @@ ; CHECK-LABEL: masked_store_v32i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a3, 32 -; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, ma ; CHECK-NEXT: vle8.v v8, (a2) ; CHECK-NEXT: vle8.v v10, (a0) ; CHECK-NEXT: vmseq.vi v0, v8, 0 @@ -364,7 +364,7 @@ ; CHECK-LABEL: masked_store_v32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a3, 32 -; CHECK-NEXT: vsetvli zero, a3, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v8, (a2) ; CHECK-NEXT: vle16.v v12, (a0) ; CHECK-NEXT: vmseq.vi v0, v8, 0 @@ -382,7 +382,7 @@ ; CHECK-LABEL: masked_store_v32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a3, 32 -; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a2) ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vmseq.vi v0, v8, 0 @@ -404,7 +404,7 @@ ; RV32-NEXT: slli a3, a3, 4 ; RV32-NEXT: sub sp, sp, a3 ; RV32-NEXT: addi a3, a2, 128 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vle64.v v8, (a3) ; RV32-NEXT: csrr a3, vlenb ; RV32-NEXT: slli a3, a3, 3 @@ -413,9 +413,9 @@ ; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill ; RV32-NEXT: vle64.v v24, (a2) ; RV32-NEXT: li a2, 32 -; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; RV32-NEXT: vmv.v.i v8, 0 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vmseq.vv v1, v24, v8 ; RV32-NEXT: addi a2, a0, 128 ; RV32-NEXT: vle64.v v24, (a2) @@ -446,7 +446,7 @@ ; RV64-NEXT: csrr a3, vlenb ; RV64-NEXT: slli a3, a3, 4 ; RV64-NEXT: sub sp, sp, a3 -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vle64.v v8, (a2) ; RV64-NEXT: addi a2, a2, 128 ; RV64-NEXT: vle64.v v16, (a2) @@ -490,7 +490,7 @@ ; CHECK-LABEL: masked_store_v64i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a3, 64 -; CHECK-NEXT: vsetvli zero, a3, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e8, m4, ta, ma ; CHECK-NEXT: vle8.v v8, (a2) ; CHECK-NEXT: vle8.v v12, (a0) ; CHECK-NEXT: vmseq.vi v0, v8, 0 @@ -508,7 +508,7 @@ ; CHECK-LABEL: masked_store_v64i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a3, 64 -; CHECK-NEXT: vsetvli zero, a3, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v8, (a2) ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vmseq.vi v0, v8, 0 @@ -530,7 +530,7 @@ ; CHECK-NEXT: slli a3, a3, 4 ; CHECK-NEXT: sub sp, sp, a3 ; CHECK-NEXT: li a3, 32 -; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a2) ; CHECK-NEXT: addi a2, a2, 128 ; CHECK-NEXT: vle32.v v16, (a2) @@ -574,7 +574,7 @@ ; CHECK-LABEL: masked_store_v128i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a3, 128 -; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma ; CHECK-NEXT: vle8.v v8, (a2) ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vmseq.vi v0, v8, 0 @@ -596,7 +596,7 @@ ; CHECK-NEXT: slli a3, a3, 4 ; CHECK-NEXT: sub sp, sp, a3 ; CHECK-NEXT: li a3, 64 -; CHECK-NEXT: vsetvli zero, a3, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v8, (a2) ; CHECK-NEXT: addi a2, a2, 128 ; CHECK-NEXT: vle16.v v16, (a2) @@ -644,7 +644,7 @@ ; CHECK-NEXT: slli a3, a3, 4 ; CHECK-NEXT: sub sp, sp, a3 ; CHECK-NEXT: li a3, 128 -; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma ; CHECK-NEXT: vle8.v v8, (a2) ; CHECK-NEXT: addi a2, a2, 128 ; CHECK-NEXT: vle8.v v16, (a2) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-peephole-vmerge-vops.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-peephole-vmerge-vops.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-peephole-vmerge-vops.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-peephole-vmerge-vops.ll @@ -26,7 +26,7 @@ define <8 x i32> @vpmerge_vpadd2(<8 x i32> %passthru, <8 x i32> %x, <8 x i32> %y, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpadd2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmseq.vv v0, v9, v10 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t @@ -43,7 +43,7 @@ define <8 x i32> @vpmerge_vpadd3(<8 x i32> %passthru, <8 x i32> %x, <8 x i32> %y, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpadd3: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vadd.vv v8, v9, v10 ; CHECK-NEXT: ret %splat = insertelement <8 x i1> poison, i1 -1, i32 0 @@ -177,7 +177,7 @@ define <8 x i32> @vpmerge_vpload2(<8 x i32> %passthru, <8 x i32> * %p, <8 x i32> %x, <8 x i32> %y, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpload2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmseq.vv v0, v9, v10 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vle32.v v8, (a0), v0.t @@ -229,7 +229,7 @@ define <8 x i32> @vpselect_vpadd3(<8 x i32> %passthru, <8 x i32> %x, <8 x i32> %y, i32 zeroext %vl) { ; CHECK-LABEL: vpselect_vpadd3: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vadd.vv v8, v9, v10 ; CHECK-NEXT: ret %splat = insertelement <8 x i1> poison, i1 -1, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll @@ -9,9 +9,9 @@ define half @vpreduce_fadd_v2f16(half %s, <2 x half> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 ; CHECK-NEXT: ret @@ -22,9 +22,9 @@ define half @vpreduce_ord_fadd_v2f16(half %s, <2 x half> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_ord_fadd_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 ; CHECK-NEXT: ret @@ -37,9 +37,9 @@ define half @vpreduce_fadd_v4f16(half %s, <4 x half> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 ; CHECK-NEXT: ret @@ -50,9 +50,9 @@ define half @vpreduce_ord_fadd_v4f16(half %s, <4 x half> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_ord_fadd_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 ; CHECK-NEXT: ret @@ -65,9 +65,9 @@ define float @vpreduce_fadd_v2f32(float %s, <2 x float> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 ; CHECK-NEXT: ret @@ -78,9 +78,9 @@ define float @vpreduce_ord_fadd_v2f32(float %s, <2 x float> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_ord_fadd_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 ; CHECK-NEXT: ret @@ -93,9 +93,9 @@ define float @vpreduce_fadd_v4f32(float %s, <4 x float> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 ; CHECK-NEXT: ret @@ -106,9 +106,9 @@ define float @vpreduce_ord_fadd_v4f32(float %s, <4 x float> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_ord_fadd_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 ; CHECK-NEXT: ret @@ -127,21 +127,21 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a1, a2 ; CHECK-NEXT: .LBB8_2: -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vslidedown.vi v24, v0, 4 ; CHECK-NEXT: bltu a0, a2, .LBB8_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: .LBB8_4: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v25, fa0 -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vfredusum.vs v25, v8, v25, v0.t ; CHECK-NEXT: vfmv.f.s ft0, v25 -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v8, ft0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfredusum.vs v8, v16, v8, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -159,21 +159,21 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a1, a2 ; CHECK-NEXT: .LBB9_2: -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vslidedown.vi v24, v0, 4 ; CHECK-NEXT: bltu a0, a2, .LBB9_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: .LBB9_4: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v25, fa0 -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vfredosum.vs v25, v8, v25, v0.t ; CHECK-NEXT: vfmv.f.s ft0, v25 -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v8, ft0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfredosum.vs v8, v16, v8, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -187,9 +187,9 @@ define double @vpreduce_fadd_v2f64(double %s, <2 x double> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 ; CHECK-NEXT: ret @@ -200,9 +200,9 @@ define double @vpreduce_ord_fadd_v2f64(double %s, <2 x double> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_ord_fadd_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 ; CHECK-NEXT: ret @@ -215,9 +215,9 @@ define double @vpreduce_fadd_v3f64(double %s, <3 x double> %v, <3 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_v3f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v10, fa0 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vfredusum.vs v10, v8, v10, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v10 ; CHECK-NEXT: ret @@ -228,9 +228,9 @@ define double @vpreduce_ord_fadd_v3f64(double %s, <3 x double> %v, <3 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_ord_fadd_v3f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v10, fa0 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vfredosum.vs v10, v8, v10, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v10 ; CHECK-NEXT: ret @@ -243,9 +243,9 @@ define double @vpreduce_fadd_v4f64(double %s, <4 x double> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v10, fa0 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vfredusum.vs v10, v8, v10, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v10 ; CHECK-NEXT: ret @@ -256,9 +256,9 @@ define double @vpreduce_ord_fadd_v4f64(double %s, <4 x double> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_ord_fadd_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v10, fa0 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vfredosum.vs v10, v8, v10, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v10 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll @@ -7,7 +7,7 @@ define half @vreduce_fadd_v1f16(<1 x half>* %x, half %s) { ; CHECK-LABEL: vreduce_fadd_v1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfmv.f.s ft0, v8 ; CHECK-NEXT: fadd.h fa0, fa0, ft0 @@ -20,7 +20,7 @@ define half @vreduce_ord_fadd_v1f16(<1 x half>* %x, half %s) { ; CHECK-LABEL: vreduce_ord_fadd_v1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vfredosum.vs v8, v8, v9 @@ -36,7 +36,7 @@ define half @vreduce_fadd_v2f16(<2 x half>* %x, half %s) { ; CHECK-LABEL: vreduce_fadd_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vfredusum.vs v8, v8, v9 @@ -50,7 +50,7 @@ define half @vreduce_ord_fadd_v2f16(<2 x half>* %x, half %s) { ; CHECK-LABEL: vreduce_ord_fadd_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vfredosum.vs v8, v8, v9 @@ -66,7 +66,7 @@ define half @vreduce_fadd_v4f16(<4 x half>* %x, half %s) { ; CHECK-LABEL: vreduce_fadd_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vfredusum.vs v8, v8, v9 @@ -80,7 +80,7 @@ define half @vreduce_ord_fadd_v4f16(<4 x half>* %x, half %s) { ; CHECK-LABEL: vreduce_ord_fadd_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vfredosum.vs v8, v8, v9 @@ -96,7 +96,7 @@ define half @vreduce_fadd_v8f16(<8 x half>* %x, half %s) { ; CHECK-LABEL: vreduce_fadd_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vfredusum.vs v8, v8, v9 @@ -110,7 +110,7 @@ define half @vreduce_ord_fadd_v8f16(<8 x half>* %x, half %s) { ; CHECK-LABEL: vreduce_ord_fadd_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vfredosum.vs v8, v8, v9 @@ -126,7 +126,7 @@ define half @vreduce_fadd_v16f16(<16 x half>* %x, half %s) { ; CHECK-LABEL: vreduce_fadd_v16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfmv.s.f v10, fa0 ; CHECK-NEXT: vfredusum.vs v8, v8, v10 @@ -140,7 +140,7 @@ define half @vreduce_ord_fadd_v16f16(<16 x half>* %x, half %s) { ; CHECK-LABEL: vreduce_ord_fadd_v16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfmv.s.f v10, fa0 ; CHECK-NEXT: vfredosum.vs v8, v8, v10 @@ -157,11 +157,11 @@ ; CHECK-LABEL: vreduce_fadd_v32f16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v12, fa0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v8, v12 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -174,11 +174,11 @@ ; CHECK-LABEL: vreduce_ord_fadd_v32f16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v12, fa0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vfredosum.vs v8, v8, v12 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -193,11 +193,11 @@ ; CHECK-LABEL: vreduce_fadd_v64f16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v16, fa0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -210,11 +210,11 @@ ; CHECK-LABEL: vreduce_ord_fadd_v64f16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v16, fa0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vfredosum.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -229,14 +229,14 @@ ; CHECK-LABEL: vreduce_fadd_v128f16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vfadd.vv v8, v8, v16 -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v16, fa0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -250,17 +250,17 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: addi a1, a0, 128 ; CHECK-NEXT: li a2, 64 -; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v8, (a1) ; CHECK-NEXT: vle16.v v16, (a0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v24, fa0 -; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma ; CHECK-NEXT: vfredosum.vs v16, v16, v24 ; CHECK-NEXT: vfmv.f.s ft0, v16 -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v16, ft0 -; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma ; CHECK-NEXT: vfredosum.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -274,7 +274,7 @@ define float @vreduce_fadd_v1f32(<1 x float>* %x, float %s) { ; CHECK-LABEL: vreduce_fadd_v1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfmv.f.s ft0, v8 ; CHECK-NEXT: fadd.s fa0, fa0, ft0 @@ -287,7 +287,7 @@ define float @vreduce_ord_fadd_v1f32(<1 x float>* %x, float %s) { ; CHECK-LABEL: vreduce_ord_fadd_v1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vfredosum.vs v8, v8, v9 @@ -301,10 +301,10 @@ define float @vreduce_fwadd_v1f32(<1 x half>* %x, float %s) { ; CHECK-LABEL: vreduce_fwadd_v1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfwcvt.f.f.v v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfmv.f.s ft0, v9 ; CHECK-NEXT: fadd.s fa0, fa0, ft0 ; CHECK-NEXT: ret @@ -317,13 +317,13 @@ define float @vreduce_ord_fwadd_v1f32(<1 x half>* %x, float %s) { ; CHECK-LABEL: vreduce_ord_fwadd_v1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vfwredosum.vs v8, v8, v9 -; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %v = load <1 x half>, <1 x half>* %x @@ -337,7 +337,7 @@ define float @vreduce_fadd_v2f32(<2 x float>* %x, float %s) { ; CHECK-LABEL: vreduce_fadd_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vfredusum.vs v8, v8, v9 @@ -351,7 +351,7 @@ define float @vreduce_ord_fadd_v2f32(<2 x float>* %x, float %s) { ; CHECK-LABEL: vreduce_ord_fadd_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vfredosum.vs v8, v8, v9 @@ -365,13 +365,13 @@ define float @vreduce_fwadd_v2f32(<2 x half>* %x, float %s) { ; CHECK-LABEL: vreduce_fwadd_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vfwredusum.vs v8, v8, v9 -; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %v = load <2 x half>, <2 x half>* %x @@ -383,13 +383,13 @@ define float @vreduce_ord_fwadd_v2f32(<2 x half>* %x, float %s) { ; CHECK-LABEL: vreduce_ord_fwadd_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vfwredosum.vs v8, v8, v9 -; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %v = load <2 x half>, <2 x half>* %x @@ -403,7 +403,7 @@ define float @vreduce_fadd_v4f32(<4 x float>* %x, float %s) { ; CHECK-LABEL: vreduce_fadd_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vfredusum.vs v8, v8, v9 @@ -417,7 +417,7 @@ define float @vreduce_ord_fadd_v4f32(<4 x float>* %x, float %s) { ; CHECK-LABEL: vreduce_ord_fadd_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vfredosum.vs v8, v8, v9 @@ -431,12 +431,12 @@ define float @vreduce_fwadd_v4f32(<4 x half>* %x, float %s) { ; CHECK-LABEL: vreduce_fwadd_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfwredusum.vs v8, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %v = load <4 x half>, <4 x half>* %x @@ -448,12 +448,12 @@ define float @vreduce_ord_fwadd_v4f32(<4 x half>* %x, float %s) { ; CHECK-LABEL: vreduce_ord_fwadd_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfwredosum.vs v8, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %v = load <4 x half>, <4 x half>* %x @@ -467,7 +467,7 @@ define float @vreduce_fadd_v8f32(<8 x float>* %x, float %s) { ; CHECK-LABEL: vreduce_fadd_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfmv.s.f v10, fa0 ; CHECK-NEXT: vfredusum.vs v8, v8, v10 @@ -481,7 +481,7 @@ define float @vreduce_ord_fadd_v8f32(<8 x float>* %x, float %s) { ; CHECK-LABEL: vreduce_ord_fadd_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfmv.s.f v10, fa0 ; CHECK-NEXT: vfredosum.vs v8, v8, v10 @@ -495,13 +495,13 @@ define float @vreduce_fwadd_v8f32(<8 x half>* %x, float %s) { ; CHECK-LABEL: vreduce_fwadd_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vfwredusum.vs v8, v8, v9 -; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %v = load <8 x half>, <8 x half>* %x @@ -513,13 +513,13 @@ define float @vreduce_ord_fwadd_v8f32(<8 x half>* %x, float %s) { ; CHECK-LABEL: vreduce_ord_fwadd_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vfwredosum.vs v8, v8, v9 -; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %v = load <8 x half>, <8 x half>* %x @@ -533,7 +533,7 @@ define float @vreduce_fadd_v16f32(<16 x float>* %x, float %s) { ; CHECK-LABEL: vreduce_fadd_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfmv.s.f v12, fa0 ; CHECK-NEXT: vfredusum.vs v8, v8, v12 @@ -547,7 +547,7 @@ define float @vreduce_ord_fadd_v16f32(<16 x float>* %x, float %s) { ; CHECK-LABEL: vreduce_ord_fadd_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfmv.s.f v12, fa0 ; CHECK-NEXT: vfredosum.vs v8, v8, v12 @@ -561,13 +561,13 @@ define float @vreduce_fwadd_v16f32(<16 x half>* %x, float %s) { ; CHECK-LABEL: vreduce_fwadd_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v10, fa0 -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vfwredusum.vs v8, v8, v10 -; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %v = load <16 x half>, <16 x half>* %x @@ -579,13 +579,13 @@ define float @vreduce_ord_fwadd_v16f32(<16 x half>* %x, float %s) { ; CHECK-LABEL: vreduce_ord_fwadd_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v10, fa0 -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vfwredosum.vs v8, v8, v10 -; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %v = load <16 x half>, <16 x half>* %x @@ -600,11 +600,11 @@ ; CHECK-LABEL: vreduce_fadd_v32f32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v16, fa0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -617,11 +617,11 @@ ; CHECK-LABEL: vreduce_ord_fadd_v32f32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v16, fa0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vfredosum.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -634,13 +634,13 @@ ; CHECK-LABEL: vreduce_fwadd_v32f32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v12, fa0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vfwredusum.vs v8, v8, v12 -; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %v = load <32 x half>, <32 x half>* %x @@ -653,13 +653,13 @@ ; CHECK-LABEL: vreduce_ord_fwadd_v32f32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v12, fa0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vfwredosum.vs v8, v8, v12 -; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %v = load <32 x half>, <32 x half>* %x @@ -674,14 +674,14 @@ ; CHECK-LABEL: vreduce_fadd_v64f32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vfadd.vv v8, v8, v16 -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v16, fa0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -695,17 +695,17 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: addi a1, a0, 128 ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: vle32.v v16, (a0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v24, fa0 -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: vfredosum.vs v16, v16, v24 ; CHECK-NEXT: vfmv.f.s ft0, v16 -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v16, ft0 -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: vfredosum.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -718,16 +718,16 @@ ; CHECK-LABEL: vreduce_fwadd_v64f32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v16, v8, a0 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfwadd.vv v24, v8, v16 -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v24, v8 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -741,22 +741,22 @@ ; CHECK-LABEL: vreduce_ord_fwadd_v64f32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v16, a0 -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v24, fa0 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfwredosum.vs v16, v16, v24 -; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; CHECK-NEXT: vfmv.f.s ft0, v16 -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v16, ft0 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfwredosum.vs v8, v8, v16 -; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %v = load <64 x half>, <64 x half>* %x @@ -770,7 +770,7 @@ define double @vreduce_fadd_v1f64(<1 x double>* %x, double %s) { ; CHECK-LABEL: vreduce_fadd_v1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfmv.f.s ft0, v8 ; CHECK-NEXT: fadd.d fa0, fa0, ft0 @@ -783,7 +783,7 @@ define double @vreduce_ord_fadd_v1f64(<1 x double>* %x, double %s) { ; CHECK-LABEL: vreduce_ord_fadd_v1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vfredosum.vs v8, v8, v9 @@ -797,10 +797,10 @@ define double @vreduce_fwadd_v1f64(<1 x float>* %x, double %s) { ; CHECK-LABEL: vreduce_fwadd_v1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfwcvt.f.f.v v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; CHECK-NEXT: vfmv.f.s ft0, v9 ; CHECK-NEXT: fadd.d fa0, fa0, ft0 ; CHECK-NEXT: ret @@ -813,12 +813,12 @@ define double @vreduce_ord_fwadd_v1f64(<1 x float>* %x, double %s) { ; CHECK-LABEL: vreduce_ord_fwadd_v1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwredosum.vs v8, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %v = load <1 x float>, <1 x float>* %x @@ -832,7 +832,7 @@ define double @vreduce_fadd_v2f64(<2 x double>* %x, double %s) { ; CHECK-LABEL: vreduce_fadd_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vfredusum.vs v8, v8, v9 @@ -846,7 +846,7 @@ define double @vreduce_ord_fadd_v2f64(<2 x double>* %x, double %s) { ; CHECK-LABEL: vreduce_ord_fadd_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vfredosum.vs v8, v8, v9 @@ -860,12 +860,12 @@ define double @vreduce_fwadd_v2f64(<2 x float>* %x, double %s) { ; CHECK-LABEL: vreduce_fwadd_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwredusum.vs v8, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %v = load <2 x float>, <2 x float>* %x @@ -877,12 +877,12 @@ define double @vreduce_ord_fwadd_v2f64(<2 x float>* %x, double %s) { ; CHECK-LABEL: vreduce_ord_fwadd_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwredosum.vs v8, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %v = load <2 x float>, <2 x float>* %x @@ -896,7 +896,7 @@ define double @vreduce_fadd_v4f64(<4 x double>* %x, double %s) { ; CHECK-LABEL: vreduce_fadd_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfmv.s.f v10, fa0 ; CHECK-NEXT: vfredusum.vs v8, v8, v10 @@ -910,7 +910,7 @@ define double @vreduce_ord_fadd_v4f64(<4 x double>* %x, double %s) { ; CHECK-LABEL: vreduce_ord_fadd_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfmv.s.f v10, fa0 ; CHECK-NEXT: vfredosum.vs v8, v8, v10 @@ -924,13 +924,13 @@ define double @vreduce_fwadd_v4f64(<4 x float>* %x, double %s) { ; CHECK-LABEL: vreduce_fwadd_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vfwredusum.vs v8, v8, v9 -; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %v = load <4 x float>, <4 x float>* %x @@ -942,13 +942,13 @@ define double @vreduce_ord_fwadd_v4f64(<4 x float>* %x, double %s) { ; CHECK-LABEL: vreduce_ord_fwadd_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vfwredosum.vs v8, v8, v9 -; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %v = load <4 x float>, <4 x float>* %x @@ -962,7 +962,7 @@ define double @vreduce_fadd_v8f64(<8 x double>* %x, double %s) { ; CHECK-LABEL: vreduce_fadd_v8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfmv.s.f v12, fa0 ; CHECK-NEXT: vfredusum.vs v8, v8, v12 @@ -976,7 +976,7 @@ define double @vreduce_ord_fadd_v8f64(<8 x double>* %x, double %s) { ; CHECK-LABEL: vreduce_ord_fadd_v8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfmv.s.f v12, fa0 ; CHECK-NEXT: vfredosum.vs v8, v8, v12 @@ -990,13 +990,13 @@ define double @vreduce_fwadd_v8f64(<8 x float>* %x, double %s) { ; CHECK-LABEL: vreduce_fwadd_v8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v10, fa0 -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vfwredusum.vs v8, v8, v10 -; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %v = load <8 x float>, <8 x float>* %x @@ -1008,13 +1008,13 @@ define double @vreduce_ord_fwadd_v8f64(<8 x float>* %x, double %s) { ; CHECK-LABEL: vreduce_ord_fwadd_v8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v10, fa0 -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vfwredosum.vs v8, v8, v10 -; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %v = load <8 x float>, <8 x float>* %x @@ -1028,7 +1028,7 @@ define double @vreduce_fadd_v16f64(<16 x double>* %x, double %s) { ; CHECK-LABEL: vreduce_fadd_v16f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfmv.s.f v16, fa0 ; CHECK-NEXT: vfredusum.vs v8, v8, v16 @@ -1042,7 +1042,7 @@ define double @vreduce_ord_fadd_v16f64(<16 x double>* %x, double %s) { ; CHECK-LABEL: vreduce_ord_fadd_v16f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfmv.s.f v16, fa0 ; CHECK-NEXT: vfredosum.vs v8, v8, v16 @@ -1056,13 +1056,13 @@ define double @vreduce_fwadd_v16f64(<16 x float>* %x, double %s) { ; CHECK-LABEL: vreduce_fwadd_v16f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v12, fa0 -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vfwredusum.vs v8, v8, v12 -; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %v = load <16 x float>, <16 x float>* %x @@ -1074,13 +1074,13 @@ define double @vreduce_ord_fwadd_v16f64(<16 x float>* %x, double %s) { ; CHECK-LABEL: vreduce_ord_fwadd_v16f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v12, fa0 -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vfwredosum.vs v8, v8, v12 -; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %v = load <16 x float>, <16 x float>* %x @@ -1094,7 +1094,7 @@ define double @vreduce_fadd_v32f64(<32 x double>* %x, double %s) { ; CHECK-LABEL: vreduce_fadd_v32f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle64.v v16, (a0) @@ -1111,7 +1111,7 @@ define double @vreduce_ord_fadd_v32f64(<32 x double>* %x, double %s) { ; CHECK-LABEL: vreduce_ord_fadd_v32f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle64.v v16, (a0) @@ -1168,15 +1168,15 @@ ; CHECK-LABEL: vreduce_fwadd_v32f64: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v16, v8, 16 -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vfwadd.vv v24, v8, v16 -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v24, v8 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -1190,21 +1190,21 @@ ; CHECK-LABEL: vreduce_ord_fwadd_v32f64: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v16, (a0) -; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v16, 16 -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v24, fa0 -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vfwredosum.vs v16, v16, v24 -; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; CHECK-NEXT: vfmv.f.s ft0, v16 -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v16, ft0 -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vfwredosum.vs v8, v8, v16 -; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %v = load <32 x float>, <32 x float>* %x @@ -1218,13 +1218,13 @@ define half @vreduce_fmin_v2f16(<2 x half>* %x) { ; CHECK-LABEL: vreduce_fmin_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: lui a0, %hi(.LCPI68_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI68_0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vlse16.v v9, (a0), zero -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -1238,13 +1238,13 @@ define half @vreduce_fmin_v4f16(<4 x half>* %x) { ; CHECK-LABEL: vreduce_fmin_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: lui a0, %hi(.LCPI69_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI69_0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vlse16.v v9, (a0), zero -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -1256,13 +1256,13 @@ define half @vreduce_fmin_v4f16_nonans(<4 x half>* %x) { ; CHECK-LABEL: vreduce_fmin_v4f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: lui a0, %hi(.LCPI70_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI70_0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vlse16.v v9, (a0), zero -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -1274,13 +1274,13 @@ define half @vreduce_fmin_v4f16_nonans_noinfs(<4 x half>* %x) { ; CHECK-LABEL: vreduce_fmin_v4f16_nonans_noinfs: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: lui a0, %hi(.LCPI71_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI71_0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vlse16.v v9, (a0), zero -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -1295,16 +1295,16 @@ ; CHECK-LABEL: vreduce_fmin_v128f16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vfmin.vv v8, v8, v16 ; CHECK-NEXT: lui a0, %hi(.LCPI72_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI72_0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vlse16.v v16, (a0), zero -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -1318,13 +1318,13 @@ define float @vreduce_fmin_v2f32(<2 x float>* %x) { ; CHECK-LABEL: vreduce_fmin_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: lui a0, %hi(.LCPI73_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI73_0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vlse32.v v9, (a0), zero -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -1338,13 +1338,13 @@ define float @vreduce_fmin_v4f32(<4 x float>* %x) { ; CHECK-LABEL: vreduce_fmin_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: lui a0, %hi(.LCPI74_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI74_0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vlse32.v v9, (a0), zero -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -1356,13 +1356,13 @@ define float @vreduce_fmin_v4f32_nonans(<4 x float>* %x) { ; CHECK-LABEL: vreduce_fmin_v4f32_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: lui a0, %hi(.LCPI75_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI75_0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vlse32.v v9, (a0), zero -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -1374,13 +1374,13 @@ define float @vreduce_fmin_v4f32_nonans_noinfs(<4 x float>* %x) { ; CHECK-LABEL: vreduce_fmin_v4f32_nonans_noinfs: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: lui a0, %hi(.LCPI76_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI76_0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vlse32.v v9, (a0), zero -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -1395,7 +1395,7 @@ ; CHECK-LABEL: vreduce_fmin_v128f32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: addi a2, a0, 384 ; CHECK-NEXT: vle32.v v16, (a2) @@ -1408,9 +1408,9 @@ ; CHECK-NEXT: vfmin.vv v8, v8, v16 ; CHECK-NEXT: lui a0, %hi(.LCPI77_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI77_0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vlse32.v v16, (a0), zero -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -1424,13 +1424,13 @@ define double @vreduce_fmin_v2f64(<2 x double>* %x) { ; CHECK-LABEL: vreduce_fmin_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: lui a0, %hi(.LCPI78_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI78_0) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v9, (a0), zero -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -1444,13 +1444,13 @@ define double @vreduce_fmin_v4f64(<4 x double>* %x) { ; CHECK-LABEL: vreduce_fmin_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: lui a0, %hi(.LCPI79_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI79_0) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -1462,13 +1462,13 @@ define double @vreduce_fmin_v4f64_nonans(<4 x double>* %x) { ; CHECK-LABEL: vreduce_fmin_v4f64_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: lui a0, %hi(.LCPI80_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI80_0) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -1480,13 +1480,13 @@ define double @vreduce_fmin_v4f64_nonans_noinfs(<4 x double>* %x) { ; CHECK-LABEL: vreduce_fmin_v4f64_nonans_noinfs: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: lui a0, %hi(.LCPI81_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI81_0) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -1500,16 +1500,16 @@ define double @vreduce_fmin_v32f64(<32 x double>* %x) { ; CHECK-LABEL: vreduce_fmin_v32f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle64.v v16, (a0) ; CHECK-NEXT: vfmin.vv v8, v8, v16 ; CHECK-NEXT: lui a0, %hi(.LCPI82_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI82_0) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -1523,13 +1523,13 @@ define half @vreduce_fmax_v2f16(<2 x half>* %x) { ; CHECK-LABEL: vreduce_fmax_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: lui a0, %hi(.LCPI83_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI83_0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vlse16.v v9, (a0), zero -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -1543,13 +1543,13 @@ define half @vreduce_fmax_v4f16(<4 x half>* %x) { ; CHECK-LABEL: vreduce_fmax_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: lui a0, %hi(.LCPI84_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI84_0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vlse16.v v9, (a0), zero -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -1561,13 +1561,13 @@ define half @vreduce_fmax_v4f16_nonans(<4 x half>* %x) { ; CHECK-LABEL: vreduce_fmax_v4f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: lui a0, %hi(.LCPI85_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI85_0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vlse16.v v9, (a0), zero -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -1579,13 +1579,13 @@ define half @vreduce_fmax_v4f16_nonans_noinfs(<4 x half>* %x) { ; CHECK-LABEL: vreduce_fmax_v4f16_nonans_noinfs: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: lui a0, %hi(.LCPI86_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI86_0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vlse16.v v9, (a0), zero -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -1600,16 +1600,16 @@ ; CHECK-LABEL: vreduce_fmax_v128f16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vfmax.vv v8, v8, v16 ; CHECK-NEXT: lui a0, %hi(.LCPI87_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI87_0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vlse16.v v16, (a0), zero -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -1623,13 +1623,13 @@ define float @vreduce_fmax_v2f32(<2 x float>* %x) { ; CHECK-LABEL: vreduce_fmax_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: lui a0, %hi(.LCPI88_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI88_0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vlse32.v v9, (a0), zero -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -1643,13 +1643,13 @@ define float @vreduce_fmax_v4f32(<4 x float>* %x) { ; CHECK-LABEL: vreduce_fmax_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: lui a0, %hi(.LCPI89_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI89_0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vlse32.v v9, (a0), zero -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -1661,13 +1661,13 @@ define float @vreduce_fmax_v4f32_nonans(<4 x float>* %x) { ; CHECK-LABEL: vreduce_fmax_v4f32_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: lui a0, %hi(.LCPI90_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI90_0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vlse32.v v9, (a0), zero -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -1679,13 +1679,13 @@ define float @vreduce_fmax_v4f32_nonans_noinfs(<4 x float>* %x) { ; CHECK-LABEL: vreduce_fmax_v4f32_nonans_noinfs: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: lui a0, %hi(.LCPI91_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI91_0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vlse32.v v9, (a0), zero -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -1700,7 +1700,7 @@ ; CHECK-LABEL: vreduce_fmax_v128f32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: addi a2, a0, 384 ; CHECK-NEXT: vle32.v v16, (a2) @@ -1713,9 +1713,9 @@ ; CHECK-NEXT: vfmax.vv v8, v8, v16 ; CHECK-NEXT: lui a0, %hi(.LCPI92_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI92_0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vlse32.v v16, (a0), zero -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -1729,13 +1729,13 @@ define double @vreduce_fmax_v2f64(<2 x double>* %x) { ; CHECK-LABEL: vreduce_fmax_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: lui a0, %hi(.LCPI93_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI93_0) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v9, (a0), zero -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -1749,13 +1749,13 @@ define double @vreduce_fmax_v4f64(<4 x double>* %x) { ; CHECK-LABEL: vreduce_fmax_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: lui a0, %hi(.LCPI94_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI94_0) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -1767,13 +1767,13 @@ define double @vreduce_fmax_v4f64_nonans(<4 x double>* %x) { ; CHECK-LABEL: vreduce_fmax_v4f64_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: lui a0, %hi(.LCPI95_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI95_0) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -1785,13 +1785,13 @@ define double @vreduce_fmax_v4f64_nonans_noinfs(<4 x double>* %x) { ; CHECK-LABEL: vreduce_fmax_v4f64_nonans_noinfs: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: lui a0, %hi(.LCPI96_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI96_0) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -1805,16 +1805,16 @@ define double @vreduce_fmax_v32f64(<32 x double>* %x) { ; CHECK-LABEL: vreduce_fmax_v32f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle64.v v16, (a0) ; CHECK-NEXT: vfmax.vv v8, v8, v16 ; CHECK-NEXT: lui a0, %hi(.LCPI97_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI97_0) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -1826,7 +1826,7 @@ define float @vreduce_nsz_fadd_v4f32(<4 x float>* %x, float %s) { ; CHECK-LABEL: vreduce_nsz_fadd_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: vfredusum.vs v8, v8, v9 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll @@ -9,9 +9,9 @@ define signext i8 @vpreduce_add_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -25,9 +25,9 @@ ; CHECK-LABEL: vpreduce_umax_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: andi a0, a0, 255 -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma ; CHECK-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -40,9 +40,9 @@ define signext i8 @vpreduce_smax_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -56,9 +56,9 @@ ; CHECK-LABEL: vpreduce_umin_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: andi a0, a0, 255 -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma ; CHECK-NEXT: vredminu.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -71,9 +71,9 @@ define signext i8 @vpreduce_smin_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -86,9 +86,9 @@ define signext i8 @vpreduce_and_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -101,9 +101,9 @@ define signext i8 @vpreduce_or_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -116,9 +116,9 @@ define signext i8 @vpreduce_xor_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -132,9 +132,9 @@ ; CHECK-LABEL: vpreduce_umin_v3i8: ; CHECK: # %bb.0: ; CHECK-NEXT: andi a0, a0, 255 -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma ; CHECK-NEXT: vredminu.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -147,9 +147,9 @@ define signext i8 @vpreduce_add_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -163,9 +163,9 @@ ; CHECK-LABEL: vpreduce_umax_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: andi a0, a0, 255 -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma ; CHECK-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -178,9 +178,9 @@ define signext i8 @vpreduce_smax_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -194,9 +194,9 @@ ; CHECK-LABEL: vpreduce_umin_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: andi a0, a0, 255 -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma ; CHECK-NEXT: vredminu.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -209,9 +209,9 @@ define signext i8 @vpreduce_smin_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -224,9 +224,9 @@ define signext i8 @vpreduce_and_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -239,9 +239,9 @@ define signext i8 @vpreduce_or_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -254,9 +254,9 @@ define signext i8 @vpreduce_xor_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -269,9 +269,9 @@ define signext i16 @vpreduce_add_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -286,9 +286,9 @@ ; RV32: # %bb.0: ; RV32-NEXT: slli a0, a0, 16 ; RV32-NEXT: srli a0, a0, 16 -; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV32-NEXT: vmv.s.x v9, a0 -; RV32-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; RV32-NEXT: vsetvli zero, a1, e16, mf4, tu, ma ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: ret @@ -297,9 +297,9 @@ ; RV64: # %bb.0: ; RV64-NEXT: slli a0, a0, 48 ; RV64-NEXT: srli a0, a0, 48 -; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; RV64-NEXT: vsetvli zero, a1, e16, mf4, tu, ma ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -312,9 +312,9 @@ define signext i16 @vpreduce_smax_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -329,9 +329,9 @@ ; RV32: # %bb.0: ; RV32-NEXT: slli a0, a0, 16 ; RV32-NEXT: srli a0, a0, 16 -; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV32-NEXT: vmv.s.x v9, a0 -; RV32-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; RV32-NEXT: vsetvli zero, a1, e16, mf4, tu, ma ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: ret @@ -340,9 +340,9 @@ ; RV64: # %bb.0: ; RV64-NEXT: slli a0, a0, 48 ; RV64-NEXT: srli a0, a0, 48 -; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; RV64-NEXT: vsetvli zero, a1, e16, mf4, tu, ma ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -355,9 +355,9 @@ define signext i16 @vpreduce_smin_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -370,9 +370,9 @@ define signext i16 @vpreduce_and_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -385,9 +385,9 @@ define signext i16 @vpreduce_or_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -400,9 +400,9 @@ define signext i16 @vpreduce_xor_v2i16(i16 signext %s, <2 x i16> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -415,9 +415,9 @@ define signext i16 @vpreduce_add_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -432,9 +432,9 @@ ; RV32: # %bb.0: ; RV32-NEXT: slli a0, a0, 16 ; RV32-NEXT: srli a0, a0, 16 -; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV32-NEXT: vmv.s.x v9, a0 -; RV32-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; RV32-NEXT: vsetvli zero, a1, e16, mf2, tu, ma ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: ret @@ -443,9 +443,9 @@ ; RV64: # %bb.0: ; RV64-NEXT: slli a0, a0, 48 ; RV64-NEXT: srli a0, a0, 48 -; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; RV64-NEXT: vsetvli zero, a1, e16, mf2, tu, ma ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -458,9 +458,9 @@ define signext i16 @vpreduce_smax_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -475,9 +475,9 @@ ; RV32: # %bb.0: ; RV32-NEXT: slli a0, a0, 16 ; RV32-NEXT: srli a0, a0, 16 -; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV32-NEXT: vmv.s.x v9, a0 -; RV32-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; RV32-NEXT: vsetvli zero, a1, e16, mf2, tu, ma ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: ret @@ -486,9 +486,9 @@ ; RV64: # %bb.0: ; RV64-NEXT: slli a0, a0, 48 ; RV64-NEXT: srli a0, a0, 48 -; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; RV64-NEXT: vsetvli zero, a1, e16, mf2, tu, ma ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -501,9 +501,9 @@ define signext i16 @vpreduce_smin_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -516,9 +516,9 @@ define signext i16 @vpreduce_and_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -531,9 +531,9 @@ define signext i16 @vpreduce_or_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -546,9 +546,9 @@ define signext i16 @vpreduce_xor_v4i16(i16 signext %s, <4 x i16> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -561,9 +561,9 @@ define signext i32 @vpreduce_add_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -576,9 +576,9 @@ define signext i32 @vpreduce_umax_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umax_v2i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32-NEXT: vmv.s.x v9, a0 -; RV32-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; RV32-NEXT: vsetvli zero, a1, e32, mf2, tu, ma ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: ret @@ -587,9 +587,9 @@ ; RV64: # %bb.0: ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: srli a0, a0, 32 -; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; RV64-NEXT: vsetvli zero, a1, e32, mf2, tu, ma ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -602,9 +602,9 @@ define signext i32 @vpreduce_smax_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -617,9 +617,9 @@ define signext i32 @vpreduce_umin_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umin_v2i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32-NEXT: vmv.s.x v9, a0 -; RV32-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; RV32-NEXT: vsetvli zero, a1, e32, mf2, tu, ma ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: ret @@ -628,9 +628,9 @@ ; RV64: # %bb.0: ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: srli a0, a0, 32 -; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; RV64-NEXT: vsetvli zero, a1, e32, mf2, tu, ma ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -643,9 +643,9 @@ define signext i32 @vpreduce_smin_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -658,9 +658,9 @@ define signext i32 @vpreduce_and_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -673,9 +673,9 @@ define signext i32 @vpreduce_or_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -688,9 +688,9 @@ define signext i32 @vpreduce_xor_v2i32(i32 signext %s, <2 x i32> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -703,9 +703,9 @@ define signext i32 @vpreduce_add_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -718,9 +718,9 @@ define signext i32 @vpreduce_umax_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umax_v4i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32-NEXT: vmv.s.x v9, a0 -; RV32-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; RV32-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: ret @@ -729,9 +729,9 @@ ; RV64: # %bb.0: ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: srli a0, a0, 32 -; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; RV64-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -744,9 +744,9 @@ define signext i32 @vpreduce_smax_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -759,9 +759,9 @@ define signext i32 @vpreduce_umin_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umin_v4i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32-NEXT: vmv.s.x v9, a0 -; RV32-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; RV32-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: ret @@ -770,9 +770,9 @@ ; RV64: # %bb.0: ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: srli a0, a0, 32 -; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; RV64-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -785,9 +785,9 @@ define signext i32 @vpreduce_smin_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -800,9 +800,9 @@ define signext i32 @vpreduce_and_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -815,9 +815,9 @@ define signext i32 @vpreduce_or_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -830,9 +830,9 @@ define signext i32 @vpreduce_xor_v4i32(i32 signext %s, <4 x i32> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -851,21 +851,21 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a2, a3 ; CHECK-NEXT: .LBB49_2: -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; CHECK-NEXT: li a3, 32 ; CHECK-NEXT: vslidedown.vi v24, v0, 4 ; CHECK-NEXT: bltu a1, a3, .LBB49_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: .LBB49_4: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v25, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma ; CHECK-NEXT: vredxor.vs v25, v8, v25, v0.t ; CHECK-NEXT: vmv.x.s a0, v25 -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v8, a0 -; CHECK-NEXT: vsetvli zero, a2, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m8, tu, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vredxor.vs v8, v16, v8, v0.t ; CHECK-NEXT: vmv.x.s a0, v8 @@ -884,13 +884,13 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, ma ; RV32-NEXT: vredsum.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -898,9 +898,9 @@ ; ; RV64-LABEL: vpreduce_add_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma ; RV64-NEXT: vredsum.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -918,13 +918,13 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, ma ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -932,9 +932,9 @@ ; ; RV64-LABEL: vpreduce_umax_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -952,13 +952,13 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, ma ; RV32-NEXT: vredmax.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -966,9 +966,9 @@ ; ; RV64-LABEL: vpreduce_smax_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma ; RV64-NEXT: vredmax.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -986,13 +986,13 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, ma ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -1000,9 +1000,9 @@ ; ; RV64-LABEL: vpreduce_umin_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -1020,13 +1020,13 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, ma ; RV32-NEXT: vredmin.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -1034,9 +1034,9 @@ ; ; RV64-LABEL: vpreduce_smin_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma ; RV64-NEXT: vredmin.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -1054,13 +1054,13 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, ma ; RV32-NEXT: vredand.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -1068,9 +1068,9 @@ ; ; RV64-LABEL: vpreduce_and_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma ; RV64-NEXT: vredand.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -1088,13 +1088,13 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, ma ; RV32-NEXT: vredor.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -1102,9 +1102,9 @@ ; ; RV64-LABEL: vpreduce_or_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma ; RV64-NEXT: vredor.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -1122,13 +1122,13 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, ma ; RV32-NEXT: vredxor.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -1136,9 +1136,9 @@ ; ; RV64-LABEL: vpreduce_xor_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma ; RV64-NEXT: vredxor.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -1156,13 +1156,13 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, ma ; RV32-NEXT: vredsum.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -1170,9 +1170,9 @@ ; ; RV64-LABEL: vpreduce_add_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, ma ; RV64-NEXT: vredsum.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 ; RV64-NEXT: ret @@ -1190,13 +1190,13 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, ma ; RV32-NEXT: vredmaxu.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -1204,9 +1204,9 @@ ; ; RV64-LABEL: vpreduce_umax_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, ma ; RV64-NEXT: vredmaxu.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 ; RV64-NEXT: ret @@ -1224,13 +1224,13 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, ma ; RV32-NEXT: vredmax.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -1238,9 +1238,9 @@ ; ; RV64-LABEL: vpreduce_smax_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, ma ; RV64-NEXT: vredmax.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 ; RV64-NEXT: ret @@ -1258,13 +1258,13 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, ma ; RV32-NEXT: vredminu.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -1272,9 +1272,9 @@ ; ; RV64-LABEL: vpreduce_umin_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, ma ; RV64-NEXT: vredminu.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 ; RV64-NEXT: ret @@ -1292,13 +1292,13 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, ma ; RV32-NEXT: vredmin.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -1306,9 +1306,9 @@ ; ; RV64-LABEL: vpreduce_smin_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, ma ; RV64-NEXT: vredmin.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 ; RV64-NEXT: ret @@ -1326,13 +1326,13 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, ma ; RV32-NEXT: vredand.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -1340,9 +1340,9 @@ ; ; RV64-LABEL: vpreduce_and_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, ma ; RV64-NEXT: vredand.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 ; RV64-NEXT: ret @@ -1360,13 +1360,13 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, ma ; RV32-NEXT: vredor.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -1374,9 +1374,9 @@ ; ; RV64-LABEL: vpreduce_or_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, ma ; RV64-NEXT: vredor.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 ; RV64-NEXT: ret @@ -1394,13 +1394,13 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, ma ; RV32-NEXT: vredxor.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -1408,9 +1408,9 @@ ; ; RV64-LABEL: vpreduce_xor_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, ma ; RV64-NEXT: vredxor.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 ; RV64-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll @@ -7,7 +7,7 @@ define i8 @vreduce_add_v1i8(<1 x i8>* %x) { ; CHECK-LABEL: vreduce_add_v1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -21,7 +21,7 @@ define i8 @vreduce_add_v2i8(<2 x i8>* %x) { ; CHECK-LABEL: vreduce_add_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredsum.vs v8, v8, v9 @@ -37,7 +37,7 @@ define i8 @vreduce_add_v4i8(<4 x i8>* %x) { ; CHECK-LABEL: vreduce_add_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredsum.vs v8, v8, v9 @@ -53,7 +53,7 @@ define i8 @vreduce_add_v8i8(<8 x i8>* %x) { ; CHECK-LABEL: vreduce_add_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredsum.vs v8, v8, v9 @@ -69,7 +69,7 @@ define i8 @vreduce_add_v16i8(<16 x i8>* %x) { ; CHECK-LABEL: vreduce_add_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredsum.vs v8, v8, v9 @@ -86,11 +86,11 @@ ; CHECK-LABEL: vreduce_add_v32i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, zero -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vredsum.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -105,11 +105,11 @@ ; CHECK-LABEL: vreduce_add_v64i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v12, zero -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vredsum.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -124,11 +124,11 @@ ; CHECK-LABEL: vreduce_add_v128i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 128 -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v16, zero -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vredsum.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -143,14 +143,14 @@ ; CHECK-LABEL: vreduce_add_v256i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 128 -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vadd.vv v8, v8, v16 -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v16, zero -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vredsum.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -164,7 +164,7 @@ define i16 @vreduce_add_v1i16(<1 x i16>* %x) { ; CHECK-LABEL: vreduce_add_v1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -176,7 +176,7 @@ define i16 @vwreduce_add_v1i16(<1 x i8>* %x) { ; CHECK-LABEL: vwreduce_add_v1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsext.vf2 v9, v8 ; CHECK-NEXT: vmv.x.s a0, v9 @@ -190,7 +190,7 @@ define i16 @vwreduce_uadd_v1i16(<1 x i8>* %x) { ; CHECK-LABEL: vwreduce_uadd_v1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vzext.vf2 v9, v8 ; CHECK-NEXT: vmv.x.s a0, v9 @@ -206,7 +206,7 @@ define i16 @vreduce_add_v2i16(<2 x i16>* %x) { ; CHECK-LABEL: vreduce_add_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredsum.vs v8, v8, v9 @@ -220,13 +220,13 @@ define i16 @vwreduce_add_v2i16(<2 x i8>* %x) { ; CHECK-LABEL: vwreduce_add_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v8, v9 -; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <2 x i8>, <2 x i8>* %x @@ -238,13 +238,13 @@ define i16 @vwreduce_uadd_v2i16(<2 x i8>* %x) { ; CHECK-LABEL: vwreduce_uadd_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v8, v9 -; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <2 x i8>, <2 x i8>* %x @@ -258,7 +258,7 @@ define i16 @vreduce_add_v4i16(<4 x i16>* %x) { ; CHECK-LABEL: vreduce_add_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredsum.vs v8, v8, v9 @@ -272,13 +272,13 @@ define i16 @vwreduce_add_v4i16(<4 x i8>* %x) { ; CHECK-LABEL: vwreduce_add_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v8, v9 -; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <4 x i8>, <4 x i8>* %x @@ -290,13 +290,13 @@ define i16 @vwreduce_uadd_v4i16(<4 x i8>* %x) { ; CHECK-LABEL: vwreduce_uadd_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v8, v9 -; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <4 x i8>, <4 x i8>* %x @@ -310,7 +310,7 @@ define i16 @vreduce_add_v8i16(<8 x i16>* %x) { ; CHECK-LABEL: vreduce_add_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredsum.vs v8, v8, v9 @@ -324,12 +324,12 @@ define i16 @vwreduce_add_v8i16(<8 x i8>* %x) { ; CHECK-LABEL: vwreduce_add_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <8 x i8>, <8 x i8>* %x @@ -341,12 +341,12 @@ define i16 @vwreduce_uadd_v8i16(<8 x i8>* %x) { ; CHECK-LABEL: vwreduce_uadd_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <8 x i8>, <8 x i8>* %x @@ -360,7 +360,7 @@ define i16 @vreduce_add_v16i16(<16 x i16>* %x) { ; CHECK-LABEL: vreduce_add_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vredsum.vs v8, v8, v10 @@ -374,13 +374,13 @@ define i16 @vwreduce_add_v16i16(<16 x i8>* %x) { ; CHECK-LABEL: vwreduce_add_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v8, v9 -; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <16 x i8>, <16 x i8>* %x @@ -392,13 +392,13 @@ define i16 @vwreduce_uadd_v16i16(<16 x i8>* %x) { ; CHECK-LABEL: vwreduce_uadd_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v8, v9 -; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <16 x i8>, <16 x i8>* %x @@ -413,11 +413,11 @@ ; CHECK-LABEL: vreduce_add_v32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v12, zero -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vredsum.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -430,13 +430,13 @@ ; CHECK-LABEL: vwreduce_add_v32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, zero -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v8, v10 -; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <32 x i8>, <32 x i8>* %x @@ -449,13 +449,13 @@ ; CHECK-LABEL: vwreduce_uadd_v32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, zero -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v8, v10 -; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <32 x i8>, <32 x i8>* %x @@ -470,11 +470,11 @@ ; CHECK-LABEL: vreduce_add_v64i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v16, zero -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vredsum.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -487,13 +487,13 @@ ; CHECK-LABEL: vwreduce_add_v64i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v12, zero -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v8, v12 -; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <64 x i8>, <64 x i8>* %x @@ -506,13 +506,13 @@ ; CHECK-LABEL: vwreduce_uadd_v64i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v12, zero -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v8, v12 -; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <64 x i8>, <64 x i8>* %x @@ -527,14 +527,14 @@ ; CHECK-LABEL: vreduce_add_v128i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vadd.vv v8, v8, v16 -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v16, zero -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vredsum.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -547,16 +547,16 @@ ; CHECK-LABEL: vwreduce_add_v128i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 128 -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a0, 64 -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v16, v8, a0 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwadd.vv v24, v8, v16 -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v8, zero -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vredsum.vs v8, v24, v8 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -570,16 +570,16 @@ ; CHECK-LABEL: vwreduce_uadd_v128i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 128 -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a0, 64 -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v16, v8, a0 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwaddu.vv v24, v8, v16 -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v8, zero -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vredsum.vs v8, v24, v8 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -594,7 +594,7 @@ define i32 @vreduce_add_v1i32(<1 x i32>* %x) { ; CHECK-LABEL: vreduce_add_v1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -606,7 +606,7 @@ define i32 @vwreduce_add_v1i32(<1 x i16>* %x) { ; CHECK-LABEL: vwreduce_add_v1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsext.vf2 v9, v8 ; CHECK-NEXT: vmv.x.s a0, v9 @@ -620,7 +620,7 @@ define i32 @vwreduce_uadd_v1i32(<1 x i16>* %x) { ; CHECK-LABEL: vwreduce_uadd_v1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vzext.vf2 v9, v8 ; CHECK-NEXT: vmv.x.s a0, v9 @@ -636,7 +636,7 @@ define i32 @vreduce_add_v2i32(<2 x i32>* %x) { ; CHECK-LABEL: vreduce_add_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredsum.vs v8, v8, v9 @@ -650,13 +650,13 @@ define i32 @vwreduce_add_v2i32(<2 x i16>* %x) { ; CHECK-LABEL: vwreduce_add_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v8, v9 -; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <2 x i16>, <2 x i16>* %x @@ -668,13 +668,13 @@ define i32 @vwreduce_uadd_v2i32(<2 x i16>* %x) { ; CHECK-LABEL: vwreduce_uadd_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v8, v9 -; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <2 x i16>, <2 x i16>* %x @@ -688,7 +688,7 @@ define i32 @vreduce_add_v4i32(<4 x i32>* %x) { ; CHECK-LABEL: vreduce_add_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredsum.vs v8, v8, v9 @@ -702,12 +702,12 @@ define i32 @vwreduce_add_v4i32(<4 x i16>* %x) { ; CHECK-LABEL: vwreduce_add_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <4 x i16>, <4 x i16>* %x @@ -719,12 +719,12 @@ define i32 @vwreduce_uadd_v4i32(<4 x i16>* %x) { ; CHECK-LABEL: vwreduce_uadd_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <4 x i16>, <4 x i16>* %x @@ -738,7 +738,7 @@ define i32 @vreduce_add_v8i32(<8 x i32>* %x) { ; CHECK-LABEL: vreduce_add_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vredsum.vs v8, v8, v10 @@ -752,13 +752,13 @@ define i32 @vwreduce_add_v8i32(<8 x i16>* %x) { ; CHECK-LABEL: vwreduce_add_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v8, v9 -; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <8 x i16>, <8 x i16>* %x @@ -770,13 +770,13 @@ define i32 @vwreduce_uadd_v8i32(<8 x i16>* %x) { ; CHECK-LABEL: vwreduce_uadd_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v8, v9 -; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <8 x i16>, <8 x i16>* %x @@ -790,7 +790,7 @@ define i32 @vreduce_add_v16i32(<16 x i32>* %x) { ; CHECK-LABEL: vreduce_add_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.s.x v12, zero ; CHECK-NEXT: vredsum.vs v8, v8, v12 @@ -804,13 +804,13 @@ define i32 @vwreduce_add_v16i32(<16 x i16>* %x) { ; CHECK-LABEL: vwreduce_add_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, zero -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v8, v10 -; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <16 x i16>, <16 x i16>* %x @@ -822,13 +822,13 @@ define i32 @vwreduce_uadd_v16i32(<16 x i16>* %x) { ; CHECK-LABEL: vwreduce_uadd_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, zero -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v8, v10 -; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <16 x i16>, <16 x i16>* %x @@ -843,11 +843,11 @@ ; CHECK-LABEL: vreduce_add_v32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v16, zero -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vredsum.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -860,13 +860,13 @@ ; CHECK-LABEL: vwreduce_add_v32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v12, zero -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v8, v12 -; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <32 x i16>, <32 x i16>* %x @@ -879,13 +879,13 @@ ; CHECK-LABEL: vwreduce_uadd_v32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v12, zero -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v8, v12 -; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %v = load <32 x i16>, <32 x i16>* %x @@ -900,14 +900,14 @@ ; CHECK-LABEL: vreduce_add_v64i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vadd.vv v8, v8, v16 -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v16, zero -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vredsum.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -920,16 +920,16 @@ ; CHECK-LABEL: vwreduce_add_v64i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v16, v8, a0 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vwadd.vv v24, v8, v16 -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v8, zero -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vredsum.vs v8, v24, v8 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -943,16 +943,16 @@ ; CHECK-LABEL: vwreduce_uadd_v64i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v16, v8, a0 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vwaddu.vv v24, v8, v16 -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v8, zero -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vredsum.vs v8, v24, v8 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -967,7 +967,7 @@ define i64 @vreduce_add_v1i64(<1 x i64>* %x) { ; RV32-LABEL: vreduce_add_v1i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsrl.vx v9, v8, a0 @@ -977,7 +977,7 @@ ; ; RV64-LABEL: vreduce_add_v1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -989,7 +989,7 @@ define i64 @vwreduce_add_v1i64(<1 x i32>* %x) { ; RV32-LABEL: vwreduce_add_v1i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: vsext.vf2 v9, v8 ; RV32-NEXT: li a0, 32 @@ -1000,7 +1000,7 @@ ; ; RV64-LABEL: vwreduce_add_v1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: vsext.vf2 v9, v8 ; RV64-NEXT: vmv.x.s a0, v9 @@ -1014,7 +1014,7 @@ define i64 @vwreduce_uadd_v1i64(<1 x i32>* %x) { ; RV32-LABEL: vwreduce_uadd_v1i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: vzext.vf2 v9, v8 ; RV32-NEXT: li a0, 32 @@ -1025,7 +1025,7 @@ ; ; RV64-LABEL: vwreduce_uadd_v1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: vzext.vf2 v9, v8 ; RV64-NEXT: vmv.x.s a0, v9 @@ -1041,20 +1041,20 @@ define i64 @vreduce_add_v2i64(<2 x i64>* %x) { ; RV32-LABEL: vreduce_add_v2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vmv.s.x v9, zero ; RV32-NEXT: vredsum.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_add_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.s.x v9, zero ; RV64-NEXT: vredsum.vs v8, v8, v9 @@ -1068,27 +1068,27 @@ define i64 @vwreduce_add_v2i64(<2 x i32>* %x) { ; RV32-LABEL: vwreduce_add_v2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: vmv.s.x v9, zero -; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; RV32-NEXT: vwredsum.vs v8, v8, v9 -; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vwreduce_add_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: vmv.s.x v9, zero -; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; RV64-NEXT: vwredsum.vs v8, v8, v9 -; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <2 x i32>, <2 x i32>* %x @@ -1100,27 +1100,27 @@ define i64 @vwreduce_uadd_v2i64(<2 x i32>* %x) { ; RV32-LABEL: vwreduce_uadd_v2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: vmv.s.x v9, zero -; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; RV32-NEXT: vwredsumu.vs v8, v8, v9 -; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vwreduce_uadd_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: vmv.s.x v9, zero -; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; RV64-NEXT: vwredsumu.vs v8, v8, v9 -; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <2 x i32>, <2 x i32>* %x @@ -1134,20 +1134,20 @@ define i64 @vreduce_add_v4i64(<4 x i64>* %x) { ; RV32-LABEL: vreduce_add_v4i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vmv.s.x v10, zero ; RV32-NEXT: vredsum.vs v8, v8, v10 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_add_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.s.x v10, zero ; RV64-NEXT: vredsum.vs v8, v8, v10 @@ -1161,29 +1161,29 @@ define i64 @vwreduce_add_v4i64(<4 x i32>* %x) { ; RV32-LABEL: vwreduce_add_v4i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vle32.v v8, (a0) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.s.x v9, zero -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vwredsum.vs v8, v8, v9 -; RV32-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vwreduce_add_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64-NEXT: vle32.v v8, (a0) -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, zero -; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64-NEXT: vwredsum.vs v8, v8, v9 -; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <4 x i32>, <4 x i32>* %x @@ -1195,29 +1195,29 @@ define i64 @vwreduce_uadd_v4i64(<4 x i32>* %x) { ; RV32-LABEL: vwreduce_uadd_v4i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vle32.v v8, (a0) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.s.x v9, zero -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vwredsumu.vs v8, v8, v9 -; RV32-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vwreduce_uadd_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64-NEXT: vle32.v v8, (a0) -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, zero -; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64-NEXT: vwredsumu.vs v8, v8, v9 -; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <4 x i32>, <4 x i32>* %x @@ -1231,20 +1231,20 @@ define i64 @vreduce_add_v8i64(<8 x i64>* %x) { ; RV32-LABEL: vreduce_add_v8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vmv.s.x v12, zero ; RV32-NEXT: vredsum.vs v8, v8, v12 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_add_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.s.x v12, zero ; RV64-NEXT: vredsum.vs v8, v8, v12 @@ -1258,29 +1258,29 @@ define i64 @vwreduce_add_v8i64(<8 x i32>* %x) { ; RV32-LABEL: vwreduce_add_v8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vle32.v v8, (a0) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.s.x v10, zero -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vwredsum.vs v8, v8, v10 -; RV32-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vwreduce_add_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64-NEXT: vle32.v v8, (a0) -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, zero -; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64-NEXT: vwredsum.vs v8, v8, v10 -; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <8 x i32>, <8 x i32>* %x @@ -1292,29 +1292,29 @@ define i64 @vwreduce_uadd_v8i64(<8 x i32>* %x) { ; RV32-LABEL: vwreduce_uadd_v8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vle32.v v8, (a0) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.s.x v10, zero -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vwredsumu.vs v8, v8, v10 -; RV32-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vwreduce_uadd_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64-NEXT: vle32.v v8, (a0) -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, zero -; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64-NEXT: vwredsumu.vs v8, v8, v10 -; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <8 x i32>, <8 x i32>* %x @@ -1328,20 +1328,20 @@ define i64 @vreduce_add_v16i64(<16 x i64>* %x) { ; RV32-LABEL: vreduce_add_v16i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vmv.s.x v16, zero ; RV32-NEXT: vredsum.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_add_v16i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.s.x v16, zero ; RV64-NEXT: vredsum.vs v8, v8, v16 @@ -1355,29 +1355,29 @@ define i64 @vwreduce_add_v16i64(<16 x i32>* %x) { ; RV32-LABEL: vwreduce_add_v16i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; RV32-NEXT: vle32.v v8, (a0) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.s.x v12, zero -; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; RV32-NEXT: vwredsum.vs v8, v8, v12 -; RV32-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vwreduce_add_v16i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; RV64-NEXT: vle32.v v8, (a0) -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v12, zero -; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; RV64-NEXT: vwredsum.vs v8, v8, v12 -; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <16 x i32>, <16 x i32>* %x @@ -1389,29 +1389,29 @@ define i64 @vwreduce_uadd_v16i64(<16 x i32>* %x) { ; RV32-LABEL: vwreduce_uadd_v16i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; RV32-NEXT: vle32.v v8, (a0) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.s.x v12, zero -; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; RV32-NEXT: vwredsumu.vs v8, v8, v12 -; RV32-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vwreduce_uadd_v16i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; RV64-NEXT: vle32.v v8, (a0) -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v12, zero -; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; RV64-NEXT: vwredsumu.vs v8, v8, v12 -; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %v = load <16 x i32>, <16 x i32>* %x @@ -1425,7 +1425,7 @@ define i64 @vreduce_add_v32i64(<32 x i64>* %x) { ; RV32-LABEL: vreduce_add_v32i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: addi a0, a0, 128 ; RV32-NEXT: vle64.v v16, (a0) @@ -1434,14 +1434,14 @@ ; RV32-NEXT: vredsum.vs v8, v8, v24 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_add_v32i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle64.v v16, (a0) @@ -1459,18 +1459,18 @@ ; RV32-LABEL: vwreduce_add_v32i64: ; RV32: # %bb.0: ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; RV32-NEXT: vle32.v v8, (a0) -; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; RV32-NEXT: vslidedown.vi v16, v8, 16 -; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; RV32-NEXT: vwadd.vv v24, v8, v16 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.s.x v8, zero -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vredsum.vs v8, v24, v8 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret @@ -1478,15 +1478,15 @@ ; RV64-LABEL: vwreduce_add_v32i64: ; RV64: # %bb.0: ; RV64-NEXT: li a1, 32 -; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; RV64-NEXT: vle32.v v8, (a0) -; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; RV64-NEXT: vslidedown.vi v16, v8, 16 -; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; RV64-NEXT: vwadd.vv v24, v8, v16 -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v8, zero -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vredsum.vs v8, v24, v8 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -1500,18 +1500,18 @@ ; RV32-LABEL: vwreduce_uadd_v32i64: ; RV32: # %bb.0: ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; RV32-NEXT: vle32.v v8, (a0) -; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; RV32-NEXT: vslidedown.vi v16, v8, 16 -; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; RV32-NEXT: vwaddu.vv v24, v8, v16 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.s.x v8, zero -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vredsum.vs v8, v24, v8 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret @@ -1519,15 +1519,15 @@ ; RV64-LABEL: vwreduce_uadd_v32i64: ; RV64: # %bb.0: ; RV64-NEXT: li a1, 32 -; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; RV64-NEXT: vle32.v v8, (a0) -; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; RV64-NEXT: vslidedown.vi v16, v8, 16 -; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; RV64-NEXT: vwaddu.vv v24, v8, v16 -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v8, zero -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vredsum.vs v8, v24, v8 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -1542,7 +1542,7 @@ define i64 @vreduce_add_v64i64(<64 x i64>* %x) nounwind { ; RV32-LABEL: vreduce_add_v64i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: addi a1, a0, 384 ; RV32-NEXT: vle64.v v16, (a1) @@ -1557,14 +1557,14 @@ ; RV32-NEXT: vredsum.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_add_v64i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: addi a1, a0, 384 ; RV64-NEXT: vle64.v v16, (a1) @@ -1594,12 +1594,12 @@ ; RV32-NEXT: sub sp, sp, a1 ; RV32-NEXT: addi a1, a0, 128 ; RV32-NEXT: li a2, 32 -; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: addi a0, sp, 16 ; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill ; RV32-NEXT: vle32.v v16, (a1) -; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; RV32-NEXT: vslidedown.vi v24, v8, 16 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: li a1, 24 @@ -1613,7 +1613,7 @@ ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 ; RV32-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill -; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: li a1, 24 ; RV32-NEXT: mul a0, a0, a1 @@ -1634,7 +1634,7 @@ ; RV32-NEXT: addi a0, sp, 16 ; RV32-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vwadd.vv v0, v8, v16 -; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 4 ; RV32-NEXT: add a0, sp, a0 @@ -1644,7 +1644,7 @@ ; RV32-NEXT: vmv.s.x v16, zero ; RV32-NEXT: vredsum.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a2 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: csrr a2, vlenb @@ -1662,12 +1662,12 @@ ; RV64-NEXT: sub sp, sp, a1 ; RV64-NEXT: addi a1, a0, 128 ; RV64-NEXT: li a2, 32 -; RV64-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; RV64-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: addi a0, sp, 16 ; RV64-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill ; RV64-NEXT: vle32.v v16, (a1) -; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; RV64-NEXT: vslidedown.vi v24, v8, 16 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: li a1, 24 @@ -1681,7 +1681,7 @@ ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 16 ; RV64-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill -; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: li a1, 24 ; RV64-NEXT: mul a0, a0, a1 @@ -1702,7 +1702,7 @@ ; RV64-NEXT: addi a0, sp, 16 ; RV64-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload ; RV64-NEXT: vwadd.vv v0, v8, v16 -; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 4 ; RV64-NEXT: add a0, sp, a0 @@ -1733,12 +1733,12 @@ ; RV32-NEXT: sub sp, sp, a1 ; RV32-NEXT: addi a1, a0, 128 ; RV32-NEXT: li a2, 32 -; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: addi a0, sp, 16 ; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill ; RV32-NEXT: vle32.v v16, (a1) -; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; RV32-NEXT: vslidedown.vi v24, v8, 16 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: li a1, 24 @@ -1752,7 +1752,7 @@ ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 16 ; RV32-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill -; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: li a1, 24 ; RV32-NEXT: mul a0, a0, a1 @@ -1773,7 +1773,7 @@ ; RV32-NEXT: addi a0, sp, 16 ; RV32-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vwaddu.vv v0, v8, v16 -; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 4 ; RV32-NEXT: add a0, sp, a0 @@ -1783,7 +1783,7 @@ ; RV32-NEXT: vmv.s.x v16, zero ; RV32-NEXT: vredsum.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a2 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: csrr a2, vlenb @@ -1801,12 +1801,12 @@ ; RV64-NEXT: sub sp, sp, a1 ; RV64-NEXT: addi a1, a0, 128 ; RV64-NEXT: li a2, 32 -; RV64-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; RV64-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: addi a0, sp, 16 ; RV64-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill ; RV64-NEXT: vle32.v v16, (a1) -; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; RV64-NEXT: vslidedown.vi v24, v8, 16 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: li a1, 24 @@ -1820,7 +1820,7 @@ ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 16 ; RV64-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill -; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: li a1, 24 ; RV64-NEXT: mul a0, a0, a1 @@ -1841,7 +1841,7 @@ ; RV64-NEXT: addi a0, sp, 16 ; RV64-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload ; RV64-NEXT: vwaddu.vv v0, v8, v16 -; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 4 ; RV64-NEXT: add a0, sp, a0 @@ -1867,7 +1867,7 @@ define i8 @vreduce_and_v1i8(<1 x i8>* %x) { ; CHECK-LABEL: vreduce_and_v1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -1881,11 +1881,11 @@ define i8 @vreduce_and_v2i8(<2 x i8>* %x) { ; CHECK-LABEL: vreduce_and_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, -1 -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -1899,11 +1899,11 @@ define i8 @vreduce_and_v4i8(<4 x i8>* %x) { ; CHECK-LABEL: vreduce_and_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, -1 -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -1917,11 +1917,11 @@ define i8 @vreduce_and_v8i8(<8 x i8>* %x) { ; CHECK-LABEL: vreduce_and_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, -1 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -1935,11 +1935,11 @@ define i8 @vreduce_and_v16i8(<16 x i8>* %x) { ; CHECK-LABEL: vreduce_and_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, -1 -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -1954,11 +1954,11 @@ ; CHECK-LABEL: vreduce_and_v32i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v10, -1 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vredand.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -1973,11 +1973,11 @@ ; CHECK-LABEL: vreduce_and_v64i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v12, -1 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vredand.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -1992,11 +1992,11 @@ ; CHECK-LABEL: vreduce_and_v128i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 128 -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v16, -1 -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vredand.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -2011,14 +2011,14 @@ ; CHECK-LABEL: vreduce_and_v256i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 128 -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vand.vv v8, v8, v16 -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v16, -1 -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vredand.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -2032,7 +2032,7 @@ define i16 @vreduce_and_v1i16(<1 x i16>* %x) { ; CHECK-LABEL: vreduce_and_v1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -2046,11 +2046,11 @@ define i16 @vreduce_and_v2i16(<2 x i16>* %x) { ; CHECK-LABEL: vreduce_and_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, -1 -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -2064,11 +2064,11 @@ define i16 @vreduce_and_v4i16(<4 x i16>* %x) { ; CHECK-LABEL: vreduce_and_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, -1 -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -2082,11 +2082,11 @@ define i16 @vreduce_and_v8i16(<8 x i16>* %x) { ; CHECK-LABEL: vreduce_and_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, -1 -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -2100,11 +2100,11 @@ define i16 @vreduce_and_v16i16(<16 x i16>* %x) { ; CHECK-LABEL: vreduce_and_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.i v10, -1 -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vredand.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -2119,11 +2119,11 @@ ; CHECK-LABEL: vreduce_and_v32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.i v12, -1 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vredand.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -2138,11 +2138,11 @@ ; CHECK-LABEL: vreduce_and_v64i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.i v16, -1 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vredand.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -2157,14 +2157,14 @@ ; CHECK-LABEL: vreduce_and_v128i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vand.vv v8, v8, v16 -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.i v16, -1 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vredand.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -2178,7 +2178,7 @@ define i32 @vreduce_and_v1i32(<1 x i32>* %x) { ; CHECK-LABEL: vreduce_and_v1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -2192,11 +2192,11 @@ define i32 @vreduce_and_v2i32(<2 x i32>* %x) { ; CHECK-LABEL: vreduce_and_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, -1 -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -2210,11 +2210,11 @@ define i32 @vreduce_and_v4i32(<4 x i32>* %x) { ; CHECK-LABEL: vreduce_and_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, -1 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -2228,11 +2228,11 @@ define i32 @vreduce_and_v8i32(<8 x i32>* %x) { ; CHECK-LABEL: vreduce_and_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v10, -1 -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vredand.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -2246,11 +2246,11 @@ define i32 @vreduce_and_v16i32(<16 x i32>* %x) { ; CHECK-LABEL: vreduce_and_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v12, -1 -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vredand.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -2265,11 +2265,11 @@ ; CHECK-LABEL: vreduce_and_v32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v16, -1 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vredand.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -2284,14 +2284,14 @@ ; CHECK-LABEL: vreduce_and_v64i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vand.vv v8, v8, v16 -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v16, -1 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vredand.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -2305,7 +2305,7 @@ define i64 @vreduce_and_v1i64(<1 x i64>* %x) { ; RV32-LABEL: vreduce_and_v1i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsrl.vx v9, v8, a0 @@ -2315,7 +2315,7 @@ ; ; RV64-LABEL: vreduce_and_v1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -2329,26 +2329,26 @@ define i64 @vreduce_and_v2i64(<2 x i64>* %x) { ; RV32-LABEL: vreduce_and_v2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.v.i v9, -1 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vredand.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_and_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.v.i v9, -1 -; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vredand.vs v8, v8, v9 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -2362,26 +2362,26 @@ define i64 @vreduce_and_v4i64(<4 x i64>* %x) { ; RV32-LABEL: vreduce_and_v4i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.v.i v10, -1 -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vredand.vs v8, v8, v10 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_and_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.v.i v10, -1 -; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-NEXT: vredand.vs v8, v8, v10 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -2395,26 +2395,26 @@ define i64 @vreduce_and_v8i64(<8 x i64>* %x) { ; RV32-LABEL: vreduce_and_v8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.v.i v12, -1 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vredand.vs v8, v8, v12 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_and_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.v.i v12, -1 -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vredand.vs v8, v8, v12 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -2428,26 +2428,26 @@ define i64 @vreduce_and_v16i64(<16 x i64>* %x) { ; RV32-LABEL: vreduce_and_v16i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.v.i v16, -1 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vredand.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_and_v16i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.v.i v16, -1 -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vredand.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -2461,32 +2461,32 @@ define i64 @vreduce_and_v32i64(<32 x i64>* %x) { ; RV32-LABEL: vreduce_and_v32i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: addi a0, a0, 128 ; RV32-NEXT: vle64.v v16, (a0) ; RV32-NEXT: vand.vv v8, v8, v16 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.v.i v16, -1 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vredand.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_and_v32i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle64.v v16, (a0) ; RV64-NEXT: vand.vv v8, v8, v16 -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.v.i v16, -1 -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vredand.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -2500,7 +2500,7 @@ define i64 @vreduce_and_v64i64(<64 x i64>* %x) nounwind { ; RV32-LABEL: vreduce_and_v64i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: addi a1, a0, 384 ; RV32-NEXT: vle64.v v16, (a1) @@ -2511,20 +2511,20 @@ ; RV32-NEXT: vand.vv v16, v24, v16 ; RV32-NEXT: vand.vv v8, v8, v0 ; RV32-NEXT: vand.vv v8, v8, v16 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.v.i v16, -1 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vredand.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_and_v64i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: addi a1, a0, 384 ; RV64-NEXT: vle64.v v16, (a1) @@ -2535,9 +2535,9 @@ ; RV64-NEXT: vand.vv v16, v24, v16 ; RV64-NEXT: vand.vv v8, v8, v0 ; RV64-NEXT: vand.vv v8, v8, v16 -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.v.i v16, -1 -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vredand.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -2551,7 +2551,7 @@ define i8 @vreduce_or_v1i8(<1 x i8>* %x) { ; CHECK-LABEL: vreduce_or_v1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -2565,7 +2565,7 @@ define i8 @vreduce_or_v2i8(<2 x i8>* %x) { ; CHECK-LABEL: vreduce_or_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredor.vs v8, v8, v9 @@ -2581,7 +2581,7 @@ define i8 @vreduce_or_v4i8(<4 x i8>* %x) { ; CHECK-LABEL: vreduce_or_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredor.vs v8, v8, v9 @@ -2597,7 +2597,7 @@ define i8 @vreduce_or_v8i8(<8 x i8>* %x) { ; CHECK-LABEL: vreduce_or_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredor.vs v8, v8, v9 @@ -2613,7 +2613,7 @@ define i8 @vreduce_or_v16i8(<16 x i8>* %x) { ; CHECK-LABEL: vreduce_or_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredor.vs v8, v8, v9 @@ -2630,11 +2630,11 @@ ; CHECK-LABEL: vreduce_or_v32i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, zero -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vredor.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -2649,11 +2649,11 @@ ; CHECK-LABEL: vreduce_or_v64i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v12, zero -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vredor.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -2668,11 +2668,11 @@ ; CHECK-LABEL: vreduce_or_v128i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 128 -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v16, zero -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vredor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -2687,14 +2687,14 @@ ; CHECK-LABEL: vreduce_or_v256i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 128 -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vor.vv v8, v8, v16 -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v16, zero -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vredor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -2708,7 +2708,7 @@ define i16 @vreduce_or_v1i16(<1 x i16>* %x) { ; CHECK-LABEL: vreduce_or_v1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -2722,7 +2722,7 @@ define i16 @vreduce_or_v2i16(<2 x i16>* %x) { ; CHECK-LABEL: vreduce_or_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredor.vs v8, v8, v9 @@ -2738,7 +2738,7 @@ define i16 @vreduce_or_v4i16(<4 x i16>* %x) { ; CHECK-LABEL: vreduce_or_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredor.vs v8, v8, v9 @@ -2754,7 +2754,7 @@ define i16 @vreduce_or_v8i16(<8 x i16>* %x) { ; CHECK-LABEL: vreduce_or_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredor.vs v8, v8, v9 @@ -2770,7 +2770,7 @@ define i16 @vreduce_or_v16i16(<16 x i16>* %x) { ; CHECK-LABEL: vreduce_or_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vredor.vs v8, v8, v10 @@ -2787,11 +2787,11 @@ ; CHECK-LABEL: vreduce_or_v32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v12, zero -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vredor.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -2806,11 +2806,11 @@ ; CHECK-LABEL: vreduce_or_v64i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v16, zero -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vredor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -2825,14 +2825,14 @@ ; CHECK-LABEL: vreduce_or_v128i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vor.vv v8, v8, v16 -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v16, zero -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vredor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -2846,7 +2846,7 @@ define i32 @vreduce_or_v1i32(<1 x i32>* %x) { ; CHECK-LABEL: vreduce_or_v1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -2860,7 +2860,7 @@ define i32 @vreduce_or_v2i32(<2 x i32>* %x) { ; CHECK-LABEL: vreduce_or_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredor.vs v8, v8, v9 @@ -2876,7 +2876,7 @@ define i32 @vreduce_or_v4i32(<4 x i32>* %x) { ; CHECK-LABEL: vreduce_or_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredor.vs v8, v8, v9 @@ -2892,7 +2892,7 @@ define i32 @vreduce_or_v8i32(<8 x i32>* %x) { ; CHECK-LABEL: vreduce_or_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vredor.vs v8, v8, v10 @@ -2908,7 +2908,7 @@ define i32 @vreduce_or_v16i32(<16 x i32>* %x) { ; CHECK-LABEL: vreduce_or_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.s.x v12, zero ; CHECK-NEXT: vredor.vs v8, v8, v12 @@ -2925,11 +2925,11 @@ ; CHECK-LABEL: vreduce_or_v32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v16, zero -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vredor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -2944,14 +2944,14 @@ ; CHECK-LABEL: vreduce_or_v64i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vor.vv v8, v8, v16 -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v16, zero -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vredor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -2965,7 +2965,7 @@ define i64 @vreduce_or_v1i64(<1 x i64>* %x) { ; RV32-LABEL: vreduce_or_v1i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsrl.vx v9, v8, a0 @@ -2975,7 +2975,7 @@ ; ; RV64-LABEL: vreduce_or_v1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -2989,20 +2989,20 @@ define i64 @vreduce_or_v2i64(<2 x i64>* %x) { ; RV32-LABEL: vreduce_or_v2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vmv.s.x v9, zero ; RV32-NEXT: vredor.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_or_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.s.x v9, zero ; RV64-NEXT: vredor.vs v8, v8, v9 @@ -3018,20 +3018,20 @@ define i64 @vreduce_or_v4i64(<4 x i64>* %x) { ; RV32-LABEL: vreduce_or_v4i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vmv.s.x v10, zero ; RV32-NEXT: vredor.vs v8, v8, v10 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_or_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.s.x v10, zero ; RV64-NEXT: vredor.vs v8, v8, v10 @@ -3047,20 +3047,20 @@ define i64 @vreduce_or_v8i64(<8 x i64>* %x) { ; RV32-LABEL: vreduce_or_v8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vmv.s.x v12, zero ; RV32-NEXT: vredor.vs v8, v8, v12 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_or_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.s.x v12, zero ; RV64-NEXT: vredor.vs v8, v8, v12 @@ -3076,20 +3076,20 @@ define i64 @vreduce_or_v16i64(<16 x i64>* %x) { ; RV32-LABEL: vreduce_or_v16i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vmv.s.x v16, zero ; RV32-NEXT: vredor.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_or_v16i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.s.x v16, zero ; RV64-NEXT: vredor.vs v8, v8, v16 @@ -3105,7 +3105,7 @@ define i64 @vreduce_or_v32i64(<32 x i64>* %x) { ; RV32-LABEL: vreduce_or_v32i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: addi a0, a0, 128 ; RV32-NEXT: vle64.v v16, (a0) @@ -3114,14 +3114,14 @@ ; RV32-NEXT: vredor.vs v8, v8, v24 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_or_v32i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle64.v v16, (a0) @@ -3140,7 +3140,7 @@ define i64 @vreduce_or_v64i64(<64 x i64>* %x) nounwind { ; RV32-LABEL: vreduce_or_v64i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: addi a1, a0, 384 ; RV32-NEXT: vle64.v v16, (a1) @@ -3155,14 +3155,14 @@ ; RV32-NEXT: vredor.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_or_v64i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: addi a1, a0, 384 ; RV64-NEXT: vle64.v v16, (a1) @@ -3187,7 +3187,7 @@ define i8 @vreduce_xor_v1i8(<1 x i8>* %x) { ; CHECK-LABEL: vreduce_xor_v1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -3201,7 +3201,7 @@ define i8 @vreduce_xor_v2i8(<2 x i8>* %x) { ; CHECK-LABEL: vreduce_xor_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredxor.vs v8, v8, v9 @@ -3217,7 +3217,7 @@ define i8 @vreduce_xor_v4i8(<4 x i8>* %x) { ; CHECK-LABEL: vreduce_xor_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredxor.vs v8, v8, v9 @@ -3233,7 +3233,7 @@ define i8 @vreduce_xor_v8i8(<8 x i8>* %x) { ; CHECK-LABEL: vreduce_xor_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredxor.vs v8, v8, v9 @@ -3249,7 +3249,7 @@ define i8 @vreduce_xor_v16i8(<16 x i8>* %x) { ; CHECK-LABEL: vreduce_xor_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredxor.vs v8, v8, v9 @@ -3266,11 +3266,11 @@ ; CHECK-LABEL: vreduce_xor_v32i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, zero -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vredxor.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -3285,11 +3285,11 @@ ; CHECK-LABEL: vreduce_xor_v64i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v12, zero -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vredxor.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -3304,11 +3304,11 @@ ; CHECK-LABEL: vreduce_xor_v128i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 128 -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v16, zero -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vredxor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -3323,14 +3323,14 @@ ; CHECK-LABEL: vreduce_xor_v256i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 128 -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vxor.vv v8, v8, v16 -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v16, zero -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vredxor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -3344,7 +3344,7 @@ define i16 @vreduce_xor_v1i16(<1 x i16>* %x) { ; CHECK-LABEL: vreduce_xor_v1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -3358,7 +3358,7 @@ define i16 @vreduce_xor_v2i16(<2 x i16>* %x) { ; CHECK-LABEL: vreduce_xor_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredxor.vs v8, v8, v9 @@ -3374,7 +3374,7 @@ define i16 @vreduce_xor_v4i16(<4 x i16>* %x) { ; CHECK-LABEL: vreduce_xor_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredxor.vs v8, v8, v9 @@ -3390,7 +3390,7 @@ define i16 @vreduce_xor_v8i16(<8 x i16>* %x) { ; CHECK-LABEL: vreduce_xor_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredxor.vs v8, v8, v9 @@ -3406,7 +3406,7 @@ define i16 @vreduce_xor_v16i16(<16 x i16>* %x) { ; CHECK-LABEL: vreduce_xor_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vredxor.vs v8, v8, v10 @@ -3423,11 +3423,11 @@ ; CHECK-LABEL: vreduce_xor_v32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v12, zero -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vredxor.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -3442,11 +3442,11 @@ ; CHECK-LABEL: vreduce_xor_v64i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v16, zero -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vredxor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -3461,14 +3461,14 @@ ; CHECK-LABEL: vreduce_xor_v128i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vxor.vv v8, v8, v16 -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v16, zero -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vredxor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -3482,7 +3482,7 @@ define i32 @vreduce_xor_v1i32(<1 x i32>* %x) { ; CHECK-LABEL: vreduce_xor_v1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -3496,7 +3496,7 @@ define i32 @vreduce_xor_v2i32(<2 x i32>* %x) { ; CHECK-LABEL: vreduce_xor_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredxor.vs v8, v8, v9 @@ -3512,7 +3512,7 @@ define i32 @vreduce_xor_v4i32(<4 x i32>* %x) { ; CHECK-LABEL: vreduce_xor_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredxor.vs v8, v8, v9 @@ -3528,7 +3528,7 @@ define i32 @vreduce_xor_v8i32(<8 x i32>* %x) { ; CHECK-LABEL: vreduce_xor_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vredxor.vs v8, v8, v10 @@ -3544,7 +3544,7 @@ define i32 @vreduce_xor_v16i32(<16 x i32>* %x) { ; CHECK-LABEL: vreduce_xor_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.s.x v12, zero ; CHECK-NEXT: vredxor.vs v8, v8, v12 @@ -3561,11 +3561,11 @@ ; CHECK-LABEL: vreduce_xor_v32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v16, zero -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vredxor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -3580,14 +3580,14 @@ ; CHECK-LABEL: vreduce_xor_v64i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vxor.vv v8, v8, v16 -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v16, zero -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vredxor.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -3601,7 +3601,7 @@ define i64 @vreduce_xor_v1i64(<1 x i64>* %x) { ; RV32-LABEL: vreduce_xor_v1i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsrl.vx v9, v8, a0 @@ -3611,7 +3611,7 @@ ; ; RV64-LABEL: vreduce_xor_v1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -3625,20 +3625,20 @@ define i64 @vreduce_xor_v2i64(<2 x i64>* %x) { ; RV32-LABEL: vreduce_xor_v2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vmv.s.x v9, zero ; RV32-NEXT: vredxor.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_xor_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.s.x v9, zero ; RV64-NEXT: vredxor.vs v8, v8, v9 @@ -3654,20 +3654,20 @@ define i64 @vreduce_xor_v4i64(<4 x i64>* %x) { ; RV32-LABEL: vreduce_xor_v4i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vmv.s.x v10, zero ; RV32-NEXT: vredxor.vs v8, v8, v10 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_xor_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.s.x v10, zero ; RV64-NEXT: vredxor.vs v8, v8, v10 @@ -3683,20 +3683,20 @@ define i64 @vreduce_xor_v8i64(<8 x i64>* %x) { ; RV32-LABEL: vreduce_xor_v8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vmv.s.x v12, zero ; RV32-NEXT: vredxor.vs v8, v8, v12 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_xor_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.s.x v12, zero ; RV64-NEXT: vredxor.vs v8, v8, v12 @@ -3712,20 +3712,20 @@ define i64 @vreduce_xor_v16i64(<16 x i64>* %x) { ; RV32-LABEL: vreduce_xor_v16i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vmv.s.x v16, zero ; RV32-NEXT: vredxor.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_xor_v16i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.s.x v16, zero ; RV64-NEXT: vredxor.vs v8, v8, v16 @@ -3741,7 +3741,7 @@ define i64 @vreduce_xor_v32i64(<32 x i64>* %x) { ; RV32-LABEL: vreduce_xor_v32i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: addi a0, a0, 128 ; RV32-NEXT: vle64.v v16, (a0) @@ -3750,14 +3750,14 @@ ; RV32-NEXT: vredxor.vs v8, v8, v24 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_xor_v32i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle64.v v16, (a0) @@ -3776,7 +3776,7 @@ define i64 @vreduce_xor_v64i64(<64 x i64>* %x) nounwind { ; RV32-LABEL: vreduce_xor_v64i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: addi a1, a0, 384 ; RV32-NEXT: vle64.v v16, (a1) @@ -3791,14 +3791,14 @@ ; RV32-NEXT: vredxor.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_xor_v64i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: addi a1, a0, 384 ; RV64-NEXT: vle64.v v16, (a1) @@ -3823,7 +3823,7 @@ define i8 @vreduce_smin_v1i8(<1 x i8>* %x) { ; CHECK-LABEL: vreduce_smin_v1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -3837,7 +3837,7 @@ define i8 @vreduce_smin_v2i8(<2 x i8>* %x) { ; CHECK-LABEL: vreduce_smin_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a0, 127 ; CHECK-NEXT: vmv.s.x v9, a0 @@ -3854,7 +3854,7 @@ define i8 @vreduce_smin_v4i8(<4 x i8>* %x) { ; CHECK-LABEL: vreduce_smin_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a0, 127 ; CHECK-NEXT: vmv.s.x v9, a0 @@ -3871,7 +3871,7 @@ define i8 @vreduce_smin_v8i8(<8 x i8>* %x) { ; CHECK-LABEL: vreduce_smin_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a0, 127 ; CHECK-NEXT: vmv.s.x v9, a0 @@ -3888,7 +3888,7 @@ define i8 @vreduce_smin_v16i8(<16 x i8>* %x) { ; CHECK-LABEL: vreduce_smin_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a0, 127 ; CHECK-NEXT: vmv.s.x v9, a0 @@ -3906,12 +3906,12 @@ ; CHECK-LABEL: vreduce_smin_v32i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a0, 127 -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vredmin.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -3926,12 +3926,12 @@ ; CHECK-LABEL: vreduce_smin_v64i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a0, 127 -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v12, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vredmin.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -3946,12 +3946,12 @@ ; CHECK-LABEL: vreduce_smin_v128i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 128 -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a0, 127 -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v16, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vredmin.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -3966,15 +3966,15 @@ ; CHECK-LABEL: vreduce_smin_v256i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 128 -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vmin.vv v8, v8, v16 ; CHECK-NEXT: li a0, 127 -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v16, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vredmin.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -3988,7 +3988,7 @@ define i16 @vreduce_smin_v1i16(<1 x i16>* %x) { ; CHECK-LABEL: vreduce_smin_v1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -4002,7 +4002,7 @@ define i16 @vreduce_smin_v2i16(<2 x i16>* %x) { ; RV32-LABEL: vreduce_smin_v2i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; RV32-NEXT: vle16.v v8, (a0) ; RV32-NEXT: lui a0, 8 ; RV32-NEXT: addi a0, a0, -1 @@ -4013,7 +4013,7 @@ ; ; RV64-LABEL: vreduce_smin_v2i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; RV64-NEXT: vle16.v v8, (a0) ; RV64-NEXT: lui a0, 8 ; RV64-NEXT: addiw a0, a0, -1 @@ -4031,7 +4031,7 @@ define i16 @vreduce_smin_v4i16(<4 x i16>* %x) { ; RV32-LABEL: vreduce_smin_v4i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; RV32-NEXT: vle16.v v8, (a0) ; RV32-NEXT: lui a0, 8 ; RV32-NEXT: addi a0, a0, -1 @@ -4042,7 +4042,7 @@ ; ; RV64-LABEL: vreduce_smin_v4i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; RV64-NEXT: vle16.v v8, (a0) ; RV64-NEXT: lui a0, 8 ; RV64-NEXT: addiw a0, a0, -1 @@ -4060,7 +4060,7 @@ define i16 @vreduce_smin_v8i16(<8 x i16>* %x) { ; RV32-LABEL: vreduce_smin_v8i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV32-NEXT: vle16.v v8, (a0) ; RV32-NEXT: lui a0, 8 ; RV32-NEXT: addi a0, a0, -1 @@ -4071,7 +4071,7 @@ ; ; RV64-LABEL: vreduce_smin_v8i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV64-NEXT: vle16.v v8, (a0) ; RV64-NEXT: lui a0, 8 ; RV64-NEXT: addiw a0, a0, -1 @@ -4089,7 +4089,7 @@ define i16 @vreduce_smin_v16i16(<16 x i16>* %x) { ; RV32-LABEL: vreduce_smin_v16i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; RV32-NEXT: vle16.v v8, (a0) ; RV32-NEXT: lui a0, 8 ; RV32-NEXT: addi a0, a0, -1 @@ -4100,7 +4100,7 @@ ; ; RV64-LABEL: vreduce_smin_v16i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; RV64-NEXT: vle16.v v8, (a0) ; RV64-NEXT: lui a0, 8 ; RV64-NEXT: addiw a0, a0, -1 @@ -4119,13 +4119,13 @@ ; RV32-LABEL: vreduce_smin_v32i16: ; RV32: # %bb.0: ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; RV32-NEXT: vle16.v v8, (a0) ; RV32-NEXT: lui a0, 8 ; RV32-NEXT: addi a0, a0, -1 -; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV32-NEXT: vmv.s.x v12, a0 -; RV32-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; RV32-NEXT: vredmin.vs v8, v8, v12 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: ret @@ -4133,13 +4133,13 @@ ; RV64-LABEL: vreduce_smin_v32i16: ; RV64: # %bb.0: ; RV64-NEXT: li a1, 32 -; RV64-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; RV64-NEXT: vle16.v v8, (a0) ; RV64-NEXT: lui a0, 8 ; RV64-NEXT: addiw a0, a0, -1 -; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64-NEXT: vmv.s.x v12, a0 -; RV64-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; RV64-NEXT: vredmin.vs v8, v8, v12 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -4154,13 +4154,13 @@ ; RV32-LABEL: vreduce_smin_v64i16: ; RV32: # %bb.0: ; RV32-NEXT: li a1, 64 -; RV32-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; RV32-NEXT: vle16.v v8, (a0) ; RV32-NEXT: lui a0, 8 ; RV32-NEXT: addi a0, a0, -1 -; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV32-NEXT: vmv.s.x v16, a0 -; RV32-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; RV32-NEXT: vredmin.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: ret @@ -4168,13 +4168,13 @@ ; RV64-LABEL: vreduce_smin_v64i16: ; RV64: # %bb.0: ; RV64-NEXT: li a1, 64 -; RV64-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; RV64-NEXT: vle16.v v8, (a0) ; RV64-NEXT: lui a0, 8 ; RV64-NEXT: addiw a0, a0, -1 -; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64-NEXT: vmv.s.x v16, a0 -; RV64-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; RV64-NEXT: vredmin.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -4189,16 +4189,16 @@ ; RV32-LABEL: vreduce_smin_v128i16: ; RV32: # %bb.0: ; RV32-NEXT: li a1, 64 -; RV32-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; RV32-NEXT: vle16.v v8, (a0) ; RV32-NEXT: addi a0, a0, 128 ; RV32-NEXT: vle16.v v16, (a0) ; RV32-NEXT: vmin.vv v8, v8, v16 ; RV32-NEXT: lui a0, 8 ; RV32-NEXT: addi a0, a0, -1 -; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV32-NEXT: vmv.s.x v16, a0 -; RV32-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; RV32-NEXT: vredmin.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: ret @@ -4206,16 +4206,16 @@ ; RV64-LABEL: vreduce_smin_v128i16: ; RV64: # %bb.0: ; RV64-NEXT: li a1, 64 -; RV64-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; RV64-NEXT: vle16.v v8, (a0) ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle16.v v16, (a0) ; RV64-NEXT: vmin.vv v8, v8, v16 ; RV64-NEXT: lui a0, 8 ; RV64-NEXT: addiw a0, a0, -1 -; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64-NEXT: vmv.s.x v16, a0 -; RV64-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; RV64-NEXT: vredmin.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -4229,7 +4229,7 @@ define i32 @vreduce_smin_v1i32(<1 x i32>* %x) { ; CHECK-LABEL: vreduce_smin_v1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -4243,7 +4243,7 @@ define i32 @vreduce_smin_v2i32(<2 x i32>* %x) { ; RV32-LABEL: vreduce_smin_v2i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: addi a0, a0, -1 @@ -4254,7 +4254,7 @@ ; ; RV64-LABEL: vreduce_smin_v2i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: lui a0, 524288 ; RV64-NEXT: addiw a0, a0, -1 @@ -4272,7 +4272,7 @@ define i32 @vreduce_smin_v4i32(<4 x i32>* %x) { ; RV32-LABEL: vreduce_smin_v4i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: addi a0, a0, -1 @@ -4283,7 +4283,7 @@ ; ; RV64-LABEL: vreduce_smin_v4i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: lui a0, 524288 ; RV64-NEXT: addiw a0, a0, -1 @@ -4301,7 +4301,7 @@ define i32 @vreduce_smin_v8i32(<8 x i32>* %x) { ; RV32-LABEL: vreduce_smin_v8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: addi a0, a0, -1 @@ -4312,7 +4312,7 @@ ; ; RV64-LABEL: vreduce_smin_v8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: lui a0, 524288 ; RV64-NEXT: addiw a0, a0, -1 @@ -4330,7 +4330,7 @@ define i32 @vreduce_smin_v16i32(<16 x i32>* %x) { ; RV32-LABEL: vreduce_smin_v16i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: addi a0, a0, -1 @@ -4341,7 +4341,7 @@ ; ; RV64-LABEL: vreduce_smin_v16i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: lui a0, 524288 ; RV64-NEXT: addiw a0, a0, -1 @@ -4360,13 +4360,13 @@ ; RV32-LABEL: vreduce_smin_v32i32: ; RV32: # %bb.0: ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: addi a0, a0, -1 -; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32-NEXT: vmv.s.x v16, a0 -; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; RV32-NEXT: vredmin.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: ret @@ -4374,13 +4374,13 @@ ; RV64-LABEL: vreduce_smin_v32i32: ; RV64: # %bb.0: ; RV64-NEXT: li a1, 32 -; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: lui a0, 524288 ; RV64-NEXT: addiw a0, a0, -1 -; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-NEXT: vmv.s.x v16, a0 -; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; RV64-NEXT: vredmin.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -4395,16 +4395,16 @@ ; RV32-LABEL: vreduce_smin_v64i32: ; RV32: # %bb.0: ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: addi a0, a0, 128 ; RV32-NEXT: vle32.v v16, (a0) ; RV32-NEXT: vmin.vv v8, v8, v16 ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: addi a0, a0, -1 -; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32-NEXT: vmv.s.x v16, a0 -; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; RV32-NEXT: vredmin.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: ret @@ -4412,16 +4412,16 @@ ; RV64-LABEL: vreduce_smin_v64i32: ; RV64: # %bb.0: ; RV64-NEXT: li a1, 32 -; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle32.v v16, (a0) ; RV64-NEXT: vmin.vv v8, v8, v16 ; RV64-NEXT: lui a0, 524288 ; RV64-NEXT: addiw a0, a0, -1 -; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-NEXT: vmv.s.x v16, a0 -; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; RV64-NEXT: vredmin.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -4435,7 +4435,7 @@ define i64 @vreduce_smin_v1i64(<1 x i64>* %x) { ; RV32-LABEL: vreduce_smin_v1i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsrl.vx v9, v8, a0 @@ -4445,7 +4445,7 @@ ; ; RV64-LABEL: vreduce_smin_v1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -4461,7 +4461,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: li a0, -1 ; RV32-NEXT: sw a0, 8(sp) @@ -4469,13 +4469,13 @@ ; RV32-NEXT: addi a0, a0, -1 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vredmin.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -4483,7 +4483,7 @@ ; ; RV64-LABEL: vreduce_smin_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: li a0, -1 ; RV64-NEXT: srli a0, a0, 1 @@ -4503,7 +4503,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: li a0, -1 ; RV32-NEXT: sw a0, 8(sp) @@ -4511,13 +4511,13 @@ ; RV32-NEXT: addi a0, a0, -1 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vredmin.vs v8, v8, v10 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -4525,7 +4525,7 @@ ; ; RV64-LABEL: vreduce_smin_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: li a0, -1 ; RV64-NEXT: srli a0, a0, 1 @@ -4545,7 +4545,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: li a0, -1 ; RV32-NEXT: sw a0, 8(sp) @@ -4553,13 +4553,13 @@ ; RV32-NEXT: addi a0, a0, -1 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vredmin.vs v8, v8, v12 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -4567,7 +4567,7 @@ ; ; RV64-LABEL: vreduce_smin_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: li a0, -1 ; RV64-NEXT: srli a0, a0, 1 @@ -4587,7 +4587,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: li a0, -1 ; RV32-NEXT: sw a0, 8(sp) @@ -4595,13 +4595,13 @@ ; RV32-NEXT: addi a0, a0, -1 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vredmin.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -4609,7 +4609,7 @@ ; ; RV64-LABEL: vreduce_smin_v16i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: li a0, -1 ; RV64-NEXT: srli a0, a0, 1 @@ -4629,7 +4629,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: addi a0, a0, 128 ; RV32-NEXT: vle64.v v16, (a0) @@ -4640,13 +4640,13 @@ ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: vmin.vv v8, v8, v16 ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vredmin.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -4654,7 +4654,7 @@ ; ; RV64-LABEL: vreduce_smin_v32i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle64.v v16, (a0) @@ -4676,7 +4676,7 @@ ; RV32-LABEL: vreduce_smin_v64i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: addi a1, a0, 256 ; RV32-NEXT: vle64.v v16, (a1) @@ -4693,13 +4693,13 @@ ; RV32-NEXT: vmin.vv v8, v8, v16 ; RV32-NEXT: vmin.vv v8, v8, v24 ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vredmin.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -4707,7 +4707,7 @@ ; ; RV64-LABEL: vreduce_smin_v64i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: addi a1, a0, 384 ; RV64-NEXT: vle64.v v16, (a1) @@ -4734,7 +4734,7 @@ define i8 @vreduce_smax_v1i8(<1 x i8>* %x) { ; CHECK-LABEL: vreduce_smax_v1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -4748,7 +4748,7 @@ define i8 @vreduce_smax_v2i8(<2 x i8>* %x) { ; CHECK-LABEL: vreduce_smax_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a0, -128 ; CHECK-NEXT: vmv.s.x v9, a0 @@ -4765,7 +4765,7 @@ define i8 @vreduce_smax_v4i8(<4 x i8>* %x) { ; CHECK-LABEL: vreduce_smax_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a0, -128 ; CHECK-NEXT: vmv.s.x v9, a0 @@ -4782,7 +4782,7 @@ define i8 @vreduce_smax_v8i8(<8 x i8>* %x) { ; CHECK-LABEL: vreduce_smax_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a0, -128 ; CHECK-NEXT: vmv.s.x v9, a0 @@ -4799,7 +4799,7 @@ define i8 @vreduce_smax_v16i8(<16 x i8>* %x) { ; CHECK-LABEL: vreduce_smax_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a0, -128 ; CHECK-NEXT: vmv.s.x v9, a0 @@ -4817,12 +4817,12 @@ ; CHECK-LABEL: vreduce_smax_v32i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a0, -128 -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vredmax.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -4837,12 +4837,12 @@ ; CHECK-LABEL: vreduce_smax_v64i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a0, -128 -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v12, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vredmax.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -4857,12 +4857,12 @@ ; CHECK-LABEL: vreduce_smax_v128i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 128 -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a0, -128 -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v16, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vredmax.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -4877,15 +4877,15 @@ ; CHECK-LABEL: vreduce_smax_v256i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 128 -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vmax.vv v8, v8, v16 ; CHECK-NEXT: li a0, -128 -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v16, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vredmax.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -4899,7 +4899,7 @@ define i16 @vreduce_smax_v1i16(<1 x i16>* %x) { ; CHECK-LABEL: vreduce_smax_v1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -4913,7 +4913,7 @@ define i16 @vreduce_smax_v2i16(<2 x i16>* %x) { ; CHECK-LABEL: vreduce_smax_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: lui a0, 1048568 ; CHECK-NEXT: vmv.s.x v9, a0 @@ -4930,7 +4930,7 @@ define i16 @vreduce_smax_v4i16(<4 x i16>* %x) { ; CHECK-LABEL: vreduce_smax_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: lui a0, 1048568 ; CHECK-NEXT: vmv.s.x v9, a0 @@ -4947,7 +4947,7 @@ define i16 @vreduce_smax_v8i16(<8 x i16>* %x) { ; CHECK-LABEL: vreduce_smax_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: lui a0, 1048568 ; CHECK-NEXT: vmv.s.x v9, a0 @@ -4964,7 +4964,7 @@ define i16 @vreduce_smax_v16i16(<16 x i16>* %x) { ; CHECK-LABEL: vreduce_smax_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: lui a0, 1048568 ; CHECK-NEXT: vmv.s.x v10, a0 @@ -4982,12 +4982,12 @@ ; CHECK-LABEL: vreduce_smax_v32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: lui a0, 1048568 -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v12, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vredmax.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -5002,12 +5002,12 @@ ; CHECK-LABEL: vreduce_smax_v64i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: lui a0, 1048568 -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v16, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vredmax.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -5022,15 +5022,15 @@ ; CHECK-LABEL: vreduce_smax_v128i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vmax.vv v8, v8, v16 ; CHECK-NEXT: lui a0, 1048568 -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v16, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vredmax.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -5044,7 +5044,7 @@ define i32 @vreduce_smax_v1i32(<1 x i32>* %x) { ; CHECK-LABEL: vreduce_smax_v1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -5058,7 +5058,7 @@ define i32 @vreduce_smax_v2i32(<2 x i32>* %x) { ; CHECK-LABEL: vreduce_smax_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: lui a0, 524288 ; CHECK-NEXT: vmv.s.x v9, a0 @@ -5075,7 +5075,7 @@ define i32 @vreduce_smax_v4i32(<4 x i32>* %x) { ; CHECK-LABEL: vreduce_smax_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: lui a0, 524288 ; CHECK-NEXT: vmv.s.x v9, a0 @@ -5092,7 +5092,7 @@ define i32 @vreduce_smax_v8i32(<8 x i32>* %x) { ; CHECK-LABEL: vreduce_smax_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: lui a0, 524288 ; CHECK-NEXT: vmv.s.x v10, a0 @@ -5109,7 +5109,7 @@ define i32 @vreduce_smax_v16i32(<16 x i32>* %x) { ; CHECK-LABEL: vreduce_smax_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: lui a0, 524288 ; CHECK-NEXT: vmv.s.x v12, a0 @@ -5127,12 +5127,12 @@ ; CHECK-LABEL: vreduce_smax_v32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: lui a0, 524288 -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v16, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vredmax.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -5147,15 +5147,15 @@ ; CHECK-LABEL: vreduce_smax_v64i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vmax.vv v8, v8, v16 ; CHECK-NEXT: lui a0, 524288 -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v16, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vredmax.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -5169,7 +5169,7 @@ define i64 @vreduce_smax_v1i64(<1 x i64>* %x) { ; RV32-LABEL: vreduce_smax_v1i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsrl.vx v9, v8, a0 @@ -5179,7 +5179,7 @@ ; ; RV64-LABEL: vreduce_smax_v1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -5195,19 +5195,19 @@ ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw zero, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vredmax.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -5215,7 +5215,7 @@ ; ; RV64-LABEL: vreduce_smax_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: li a0, -1 ; RV64-NEXT: slli a0, a0, 63 @@ -5235,19 +5235,19 @@ ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw zero, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vredmax.vs v8, v8, v10 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -5255,7 +5255,7 @@ ; ; RV64-LABEL: vreduce_smax_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: li a0, -1 ; RV64-NEXT: slli a0, a0, 63 @@ -5275,19 +5275,19 @@ ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw zero, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vredmax.vs v8, v8, v12 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -5295,7 +5295,7 @@ ; ; RV64-LABEL: vreduce_smax_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: li a0, -1 ; RV64-NEXT: slli a0, a0, 63 @@ -5315,19 +5315,19 @@ ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw zero, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vredmax.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -5335,7 +5335,7 @@ ; ; RV64-LABEL: vreduce_smax_v16i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: li a0, -1 ; RV64-NEXT: slli a0, a0, 63 @@ -5355,7 +5355,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: addi a0, a0, 128 ; RV32-NEXT: vle64.v v16, (a0) @@ -5364,13 +5364,13 @@ ; RV32-NEXT: sw zero, 8(sp) ; RV32-NEXT: vmax.vv v8, v8, v16 ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vredmax.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -5378,7 +5378,7 @@ ; ; RV64-LABEL: vreduce_smax_v32i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle64.v v16, (a0) @@ -5400,7 +5400,7 @@ ; RV32-LABEL: vreduce_smax_v64i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: addi a1, a0, 256 ; RV32-NEXT: vle64.v v16, (a1) @@ -5415,13 +5415,13 @@ ; RV32-NEXT: vmax.vv v8, v8, v16 ; RV32-NEXT: vmax.vv v8, v8, v24 ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vredmax.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -5429,7 +5429,7 @@ ; ; RV64-LABEL: vreduce_smax_v64i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: addi a1, a0, 384 ; RV64-NEXT: vle64.v v16, (a1) @@ -5456,7 +5456,7 @@ define i8 @vreduce_umin_v1i8(<1 x i8>* %x) { ; CHECK-LABEL: vreduce_umin_v1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -5470,11 +5470,11 @@ define i8 @vreduce_umin_v2i8(<2 x i8>* %x) { ; CHECK-LABEL: vreduce_umin_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, -1 -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -5488,11 +5488,11 @@ define i8 @vreduce_umin_v4i8(<4 x i8>* %x) { ; CHECK-LABEL: vreduce_umin_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, -1 -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -5506,11 +5506,11 @@ define i8 @vreduce_umin_v8i8(<8 x i8>* %x) { ; CHECK-LABEL: vreduce_umin_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, -1 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -5524,11 +5524,11 @@ define i8 @vreduce_umin_v16i8(<16 x i8>* %x) { ; CHECK-LABEL: vreduce_umin_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, -1 -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -5543,11 +5543,11 @@ ; CHECK-LABEL: vreduce_umin_v32i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v10, -1 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vredminu.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -5562,11 +5562,11 @@ ; CHECK-LABEL: vreduce_umin_v64i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v12, -1 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vredminu.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -5581,11 +5581,11 @@ ; CHECK-LABEL: vreduce_umin_v128i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 128 -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v16, -1 -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vredminu.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -5600,14 +5600,14 @@ ; CHECK-LABEL: vreduce_umin_v256i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 128 -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vminu.vv v8, v8, v16 -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v16, -1 -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vredminu.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -5621,7 +5621,7 @@ define i16 @vreduce_umin_v1i16(<1 x i16>* %x) { ; CHECK-LABEL: vreduce_umin_v1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -5635,11 +5635,11 @@ define i16 @vreduce_umin_v2i16(<2 x i16>* %x) { ; CHECK-LABEL: vreduce_umin_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, -1 -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -5653,11 +5653,11 @@ define i16 @vreduce_umin_v4i16(<4 x i16>* %x) { ; CHECK-LABEL: vreduce_umin_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, -1 -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -5671,11 +5671,11 @@ define i16 @vreduce_umin_v8i16(<8 x i16>* %x) { ; CHECK-LABEL: vreduce_umin_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, -1 -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -5689,11 +5689,11 @@ define i16 @vreduce_umin_v16i16(<16 x i16>* %x) { ; CHECK-LABEL: vreduce_umin_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.i v10, -1 -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vredminu.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -5708,11 +5708,11 @@ ; CHECK-LABEL: vreduce_umin_v32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.i v12, -1 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vredminu.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -5727,11 +5727,11 @@ ; CHECK-LABEL: vreduce_umin_v64i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.i v16, -1 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vredminu.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -5746,14 +5746,14 @@ ; CHECK-LABEL: vreduce_umin_v128i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vminu.vv v8, v8, v16 -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.i v16, -1 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vredminu.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -5767,7 +5767,7 @@ define i32 @vreduce_umin_v1i32(<1 x i32>* %x) { ; CHECK-LABEL: vreduce_umin_v1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -5781,11 +5781,11 @@ define i32 @vreduce_umin_v2i32(<2 x i32>* %x) { ; CHECK-LABEL: vreduce_umin_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, -1 -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -5799,11 +5799,11 @@ define i32 @vreduce_umin_v4i32(<4 x i32>* %x) { ; CHECK-LABEL: vreduce_umin_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, -1 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -5817,11 +5817,11 @@ define i32 @vreduce_umin_v8i32(<8 x i32>* %x) { ; CHECK-LABEL: vreduce_umin_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v10, -1 -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vredminu.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -5835,11 +5835,11 @@ define i32 @vreduce_umin_v16i32(<16 x i32>* %x) { ; CHECK-LABEL: vreduce_umin_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v12, -1 -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vredminu.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -5854,11 +5854,11 @@ ; CHECK-LABEL: vreduce_umin_v32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v16, -1 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vredminu.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -5873,14 +5873,14 @@ ; CHECK-LABEL: vreduce_umin_v64i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vminu.vv v8, v8, v16 -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v16, -1 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vredminu.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -5894,7 +5894,7 @@ define i64 @vreduce_umin_v1i64(<1 x i64>* %x) { ; RV32-LABEL: vreduce_umin_v1i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsrl.vx v9, v8, a0 @@ -5904,7 +5904,7 @@ ; ; RV64-LABEL: vreduce_umin_v1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -5918,26 +5918,26 @@ define i64 @vreduce_umin_v2i64(<2 x i64>* %x) { ; RV32-LABEL: vreduce_umin_v2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.v.i v9, -1 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vredminu.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_umin_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.v.i v9, -1 -; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vredminu.vs v8, v8, v9 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -5951,26 +5951,26 @@ define i64 @vreduce_umin_v4i64(<4 x i64>* %x) { ; RV32-LABEL: vreduce_umin_v4i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.v.i v10, -1 -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vredminu.vs v8, v8, v10 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_umin_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.v.i v10, -1 -; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-NEXT: vredminu.vs v8, v8, v10 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -5984,26 +5984,26 @@ define i64 @vreduce_umin_v8i64(<8 x i64>* %x) { ; RV32-LABEL: vreduce_umin_v8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.v.i v12, -1 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vredminu.vs v8, v8, v12 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_umin_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.v.i v12, -1 -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vredminu.vs v8, v8, v12 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -6017,26 +6017,26 @@ define i64 @vreduce_umin_v16i64(<16 x i64>* %x) { ; RV32-LABEL: vreduce_umin_v16i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.v.i v16, -1 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vredminu.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_umin_v16i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.v.i v16, -1 -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vredminu.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -6050,32 +6050,32 @@ define i64 @vreduce_umin_v32i64(<32 x i64>* %x) { ; RV32-LABEL: vreduce_umin_v32i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: addi a0, a0, 128 ; RV32-NEXT: vle64.v v16, (a0) ; RV32-NEXT: vminu.vv v8, v8, v16 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.v.i v16, -1 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vredminu.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_umin_v32i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle64.v v16, (a0) ; RV64-NEXT: vminu.vv v8, v8, v16 -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.v.i v16, -1 -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vredminu.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -6089,7 +6089,7 @@ define i64 @vreduce_umin_v64i64(<64 x i64>* %x) nounwind { ; RV32-LABEL: vreduce_umin_v64i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: addi a1, a0, 384 ; RV32-NEXT: vle64.v v16, (a1) @@ -6100,20 +6100,20 @@ ; RV32-NEXT: vminu.vv v16, v24, v16 ; RV32-NEXT: vminu.vv v8, v8, v0 ; RV32-NEXT: vminu.vv v8, v8, v16 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.v.i v16, -1 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vredminu.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_umin_v64i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: addi a1, a0, 384 ; RV64-NEXT: vle64.v v16, (a1) @@ -6124,9 +6124,9 @@ ; RV64-NEXT: vminu.vv v16, v24, v16 ; RV64-NEXT: vminu.vv v8, v8, v0 ; RV64-NEXT: vminu.vv v8, v8, v16 -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.v.i v16, -1 -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vredminu.vs v8, v8, v16 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -6140,7 +6140,7 @@ define i8 @vreduce_umax_v1i8(<1 x i8>* %x) { ; CHECK-LABEL: vreduce_umax_v1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -6154,7 +6154,7 @@ define i8 @vreduce_umax_v2i8(<2 x i8>* %x) { ; CHECK-LABEL: vreduce_umax_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 @@ -6170,7 +6170,7 @@ define i8 @vreduce_umax_v4i8(<4 x i8>* %x) { ; CHECK-LABEL: vreduce_umax_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 @@ -6186,7 +6186,7 @@ define i8 @vreduce_umax_v8i8(<8 x i8>* %x) { ; CHECK-LABEL: vreduce_umax_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 @@ -6202,7 +6202,7 @@ define i8 @vreduce_umax_v16i8(<16 x i8>* %x) { ; CHECK-LABEL: vreduce_umax_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 @@ -6219,11 +6219,11 @@ ; CHECK-LABEL: vreduce_umax_v32i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, zero -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -6238,11 +6238,11 @@ ; CHECK-LABEL: vreduce_umax_v64i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v12, zero -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -6257,11 +6257,11 @@ ; CHECK-LABEL: vreduce_umax_v128i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 128 -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v16, zero -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -6276,14 +6276,14 @@ ; CHECK-LABEL: vreduce_umax_v256i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 128 -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vmaxu.vv v8, v8, v16 -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v16, zero -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -6297,7 +6297,7 @@ define i16 @vreduce_umax_v1i16(<1 x i16>* %x) { ; CHECK-LABEL: vreduce_umax_v1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -6311,7 +6311,7 @@ define i16 @vreduce_umax_v2i16(<2 x i16>* %x) { ; CHECK-LABEL: vreduce_umax_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 @@ -6327,7 +6327,7 @@ define i16 @vreduce_umax_v4i16(<4 x i16>* %x) { ; CHECK-LABEL: vreduce_umax_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 @@ -6343,7 +6343,7 @@ define i16 @vreduce_umax_v8i16(<8 x i16>* %x) { ; CHECK-LABEL: vreduce_umax_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 @@ -6359,7 +6359,7 @@ define i16 @vreduce_umax_v16i16(<16 x i16>* %x) { ; CHECK-LABEL: vreduce_umax_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vredmaxu.vs v8, v8, v10 @@ -6376,11 +6376,11 @@ ; CHECK-LABEL: vreduce_umax_v32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v12, zero -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v8, v12 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -6395,11 +6395,11 @@ ; CHECK-LABEL: vreduce_umax_v64i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v16, zero -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -6414,14 +6414,14 @@ ; CHECK-LABEL: vreduce_umax_v128i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vmaxu.vv v8, v8, v16 -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v16, zero -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -6435,7 +6435,7 @@ define i32 @vreduce_umax_v1i32(<1 x i32>* %x) { ; CHECK-LABEL: vreduce_umax_v1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -6449,7 +6449,7 @@ define i32 @vreduce_umax_v2i32(<2 x i32>* %x) { ; CHECK-LABEL: vreduce_umax_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 @@ -6465,7 +6465,7 @@ define i32 @vreduce_umax_v4i32(<4 x i32>* %x) { ; CHECK-LABEL: vreduce_umax_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.s.x v9, zero ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 @@ -6481,7 +6481,7 @@ define i32 @vreduce_umax_v8i32(<8 x i32>* %x) { ; CHECK-LABEL: vreduce_umax_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.s.x v10, zero ; CHECK-NEXT: vredmaxu.vs v8, v8, v10 @@ -6497,7 +6497,7 @@ define i32 @vreduce_umax_v16i32(<16 x i32>* %x) { ; CHECK-LABEL: vreduce_umax_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.s.x v12, zero ; CHECK-NEXT: vredmaxu.vs v8, v8, v12 @@ -6514,11 +6514,11 @@ ; CHECK-LABEL: vreduce_umax_v32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v16, zero -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -6533,14 +6533,14 @@ ; CHECK-LABEL: vreduce_umax_v64i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vmaxu.vv v8, v8, v16 -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v16, zero -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v8, v16 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -6554,7 +6554,7 @@ define i64 @vreduce_umax_v1i64(<1 x i64>* %x) { ; RV32-LABEL: vreduce_umax_v1i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsrl.vx v9, v8, a0 @@ -6564,7 +6564,7 @@ ; ; RV64-LABEL: vreduce_umax_v1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -6578,20 +6578,20 @@ define i64 @vreduce_umax_v2i64(<2 x i64>* %x) { ; RV32-LABEL: vreduce_umax_v2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vmv.s.x v9, zero ; RV32-NEXT: vredmaxu.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_umax_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.s.x v9, zero ; RV64-NEXT: vredmaxu.vs v8, v8, v9 @@ -6607,20 +6607,20 @@ define i64 @vreduce_umax_v4i64(<4 x i64>* %x) { ; RV32-LABEL: vreduce_umax_v4i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vmv.s.x v10, zero ; RV32-NEXT: vredmaxu.vs v8, v8, v10 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_umax_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.s.x v10, zero ; RV64-NEXT: vredmaxu.vs v8, v8, v10 @@ -6636,20 +6636,20 @@ define i64 @vreduce_umax_v8i64(<8 x i64>* %x) { ; RV32-LABEL: vreduce_umax_v8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vmv.s.x v12, zero ; RV32-NEXT: vredmaxu.vs v8, v8, v12 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_umax_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.s.x v12, zero ; RV64-NEXT: vredmaxu.vs v8, v8, v12 @@ -6665,20 +6665,20 @@ define i64 @vreduce_umax_v16i64(<16 x i64>* %x) { ; RV32-LABEL: vreduce_umax_v16i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vmv.s.x v16, zero ; RV32-NEXT: vredmaxu.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_umax_v16i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.s.x v16, zero ; RV64-NEXT: vredmaxu.vs v8, v8, v16 @@ -6694,7 +6694,7 @@ define i64 @vreduce_umax_v32i64(<32 x i64>* %x) { ; RV32-LABEL: vreduce_umax_v32i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: addi a0, a0, 128 ; RV32-NEXT: vle64.v v16, (a0) @@ -6703,14 +6703,14 @@ ; RV32-NEXT: vredmaxu.vs v8, v8, v24 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_umax_v32i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle64.v v16, (a0) @@ -6729,7 +6729,7 @@ define i64 @vreduce_umax_v64i64(<64 x i64>* %x) nounwind { ; RV32-LABEL: vreduce_umax_v64i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: addi a1, a0, 384 ; RV32-NEXT: vle64.v v16, (a1) @@ -6744,14 +6744,14 @@ ; RV32-NEXT: vredmaxu.vs v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_umax_v64i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: addi a1, a0, 384 ; RV64-NEXT: vle64.v v16, (a1) @@ -6776,7 +6776,7 @@ define i8 @vreduce_mul_v1i8(<1 x i8>* %x) { ; CHECK-LABEL: vreduce_mul_v1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -6790,7 +6790,7 @@ define i8 @vreduce_mul_v2i8(<2 x i8>* %x) { ; CHECK-LABEL: vreduce_mul_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: lb a0, 1(a0) ; CHECK-NEXT: vmul.vx v8, v8, a0 @@ -6806,7 +6806,7 @@ define i8 @vreduce_mul_v4i8(<4 x i8>* %x) { ; CHECK-LABEL: vreduce_mul_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vslidedown.vi v9, v8, 2 ; CHECK-NEXT: vmul.vv v8, v8, v9 @@ -6824,7 +6824,7 @@ define i8 @vreduce_mul_v8i8(<8 x i8>* %x) { ; CHECK-LABEL: vreduce_mul_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vslidedown.vi v9, v8, 4 ; CHECK-NEXT: vmul.vv v8, v8, v9 @@ -6844,7 +6844,7 @@ define i8 @vreduce_mul_v16i8(<16 x i8>* %x) { ; CHECK-LABEL: vreduce_mul_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vslidedown.vi v9, v8, 8 ; CHECK-NEXT: vmul.vv v8, v8, v9 @@ -6867,7 +6867,7 @@ ; CHECK-LABEL: vreduce_mul_v32i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vslidedown.vi v10, v8, 16 ; CHECK-NEXT: vmul.vv v8, v8, v10 @@ -6892,7 +6892,7 @@ ; CHECK-LABEL: vreduce_mul_v64i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: vslidedown.vx v12, v8, a0 @@ -6920,7 +6920,7 @@ ; CHECK-LABEL: vreduce_mul_v128i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 128 -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: li a0, 64 ; CHECK-NEXT: vslidedown.vx v16, v8, a0 @@ -6951,7 +6951,7 @@ ; CHECK-LABEL: vreduce_mul_v256i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 128 -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle8.v v16, (a0) @@ -6984,7 +6984,7 @@ define i16 @vreduce_mul_v1i16(<1 x i16>* %x) { ; CHECK-LABEL: vreduce_mul_v1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -6998,7 +6998,7 @@ define i16 @vreduce_mul_v2i16(<2 x i16>* %x) { ; CHECK-LABEL: vreduce_mul_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: lh a0, 2(a0) ; CHECK-NEXT: vmul.vx v8, v8, a0 @@ -7014,7 +7014,7 @@ define i16 @vreduce_mul_v4i16(<4 x i16>* %x) { ; CHECK-LABEL: vreduce_mul_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vslidedown.vi v9, v8, 2 ; CHECK-NEXT: vmul.vv v8, v8, v9 @@ -7032,7 +7032,7 @@ define i16 @vreduce_mul_v8i16(<8 x i16>* %x) { ; CHECK-LABEL: vreduce_mul_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vslidedown.vi v9, v8, 4 ; CHECK-NEXT: vmul.vv v8, v8, v9 @@ -7052,7 +7052,7 @@ define i16 @vreduce_mul_v16i16(<16 x i16>* %x) { ; CHECK-LABEL: vreduce_mul_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vslidedown.vi v10, v8, 8 ; CHECK-NEXT: vmul.vv v8, v8, v10 @@ -7075,7 +7075,7 @@ ; CHECK-LABEL: vreduce_mul_v32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vslidedown.vi v12, v8, 16 ; CHECK-NEXT: vmul.vv v8, v8, v12 @@ -7100,7 +7100,7 @@ ; CHECK-LABEL: vreduce_mul_v64i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: vslidedown.vx v16, v8, a0 @@ -7128,7 +7128,7 @@ ; CHECK-LABEL: vreduce_mul_v128i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 64 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle16.v v16, (a0) @@ -7158,7 +7158,7 @@ define i32 @vreduce_mul_v1i32(<1 x i32>* %x) { ; CHECK-LABEL: vreduce_mul_v1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -7172,7 +7172,7 @@ define i32 @vreduce_mul_v2i32(<2 x i32>* %x) { ; CHECK-LABEL: vreduce_mul_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: lw a0, 4(a0) ; CHECK-NEXT: vmul.vx v8, v8, a0 @@ -7188,7 +7188,7 @@ define i32 @vreduce_mul_v4i32(<4 x i32>* %x) { ; CHECK-LABEL: vreduce_mul_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vslidedown.vi v9, v8, 2 ; CHECK-NEXT: vmul.vv v8, v8, v9 @@ -7206,7 +7206,7 @@ define i32 @vreduce_mul_v8i32(<8 x i32>* %x) { ; CHECK-LABEL: vreduce_mul_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vslidedown.vi v10, v8, 4 ; CHECK-NEXT: vmul.vv v8, v8, v10 @@ -7226,7 +7226,7 @@ define i32 @vreduce_mul_v16i32(<16 x i32>* %x) { ; CHECK-LABEL: vreduce_mul_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vslidedown.vi v12, v8, 8 ; CHECK-NEXT: vmul.vv v8, v8, v12 @@ -7249,7 +7249,7 @@ ; CHECK-LABEL: vreduce_mul_v32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vslidedown.vi v16, v8, 16 ; CHECK-NEXT: vmul.vv v8, v8, v16 @@ -7274,7 +7274,7 @@ ; CHECK-LABEL: vreduce_mul_v64i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle32.v v16, (a0) @@ -7301,7 +7301,7 @@ define i64 @vreduce_mul_v1i64(<1 x i64>* %x) { ; RV32-LABEL: vreduce_mul_v1i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsrl.vx v9, v8, a0 @@ -7311,7 +7311,7 @@ ; ; RV64-LABEL: vreduce_mul_v1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -7325,21 +7325,21 @@ define i64 @vreduce_mul_v2i64(<2 x i64>* %x) { ; RV32-LABEL: vreduce_mul_v2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: addi a0, a0, 8 ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vmul.vv v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_mul_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: ld a0, 8(a0) ; RV64-NEXT: vmul.vx v8, v8, a0 @@ -7355,7 +7355,7 @@ define i64 @vreduce_mul_v4i64(<4 x i64>* %x) { ; RV32-LABEL: vreduce_mul_v4i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vslidedown.vi v10, v8, 2 ; RV32-NEXT: vmul.vv v8, v8, v10 @@ -7363,14 +7363,14 @@ ; RV32-NEXT: vmul.vv v8, v8, v10 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_mul_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vslidedown.vi v10, v8, 2 ; RV64-NEXT: vmul.vv v8, v8, v10 @@ -7388,7 +7388,7 @@ define i64 @vreduce_mul_v8i64(<8 x i64>* %x) { ; RV32-LABEL: vreduce_mul_v8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vslidedown.vi v12, v8, 4 ; RV32-NEXT: vmul.vv v8, v8, v12 @@ -7398,14 +7398,14 @@ ; RV32-NEXT: vmul.vv v8, v8, v12 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m4, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_mul_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vslidedown.vi v12, v8, 4 ; RV64-NEXT: vmul.vv v8, v8, v12 @@ -7425,7 +7425,7 @@ define i64 @vreduce_mul_v16i64(<16 x i64>* %x) { ; RV32-LABEL: vreduce_mul_v16i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: vslidedown.vi v16, v8, 8 ; RV32-NEXT: vmul.vv v8, v8, v16 @@ -7437,14 +7437,14 @@ ; RV32-NEXT: vmul.vv v8, v8, v16 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m8, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_mul_v16i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: vslidedown.vi v16, v8, 8 ; RV64-NEXT: vmul.vv v8, v8, v16 @@ -7466,7 +7466,7 @@ define i64 @vreduce_mul_v32i64(<32 x i64>* %x) { ; RV32-LABEL: vreduce_mul_v32i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: addi a0, a0, 128 ; RV32-NEXT: vle64.v v16, (a0) @@ -7479,16 +7479,16 @@ ; RV32-NEXT: vmul.vv v8, v8, v16 ; RV32-NEXT: vrgather.vi v16, v8, 1 ; RV32-NEXT: vmul.vv v8, v8, v16 -; RV32-NEXT: vsetivli zero, 0, e32, m8, ta, mu +; RV32-NEXT: vsetivli zero, 0, e32, m8, ta, ma ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: vsetivli zero, 1, e32, m8, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, m8, ta, ma ; RV32-NEXT: vslidedown.vi v8, v8, 1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_mul_v32i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: addi a0, a0, 128 ; RV64-NEXT: vle64.v v16, (a0) @@ -7513,7 +7513,7 @@ define i64 @vreduce_mul_v64i64(<64 x i64>* %x) nounwind { ; RV32-LABEL: vreduce_mul_v64i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vle64.v v8, (a0) ; RV32-NEXT: addi a1, a0, 384 ; RV32-NEXT: vle64.v v16, (a1) @@ -7532,16 +7532,16 @@ ; RV32-NEXT: vmul.vv v8, v8, v16 ; RV32-NEXT: vrgather.vi v16, v8, 1 ; RV32-NEXT: vmul.vv v8, v8, v16 -; RV32-NEXT: vsetivli zero, 0, e32, m8, ta, mu +; RV32-NEXT: vsetivli zero, 0, e32, m8, ta, ma ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: vsetivli zero, 1, e32, m8, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, m8, ta, ma ; RV32-NEXT: vslidedown.vi v8, v8, 1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_mul_v64i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vle64.v v8, (a0) ; RV64-NEXT: addi a1, a0, 384 ; RV64-NEXT: vle64.v v16, (a1) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll @@ -9,7 +9,7 @@ define signext i1 @vpreduce_and_v1i1(i1 signext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -27,7 +27,7 @@ ; CHECK-LABEL: vpreduce_or_v1i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 @@ -45,7 +45,7 @@ ; CHECK-LABEL: vpreduce_xor_v1i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: xor a0, a1, a0 @@ -61,7 +61,7 @@ define signext i1 @vpreduce_and_v2i1(i1 signext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -79,7 +79,7 @@ ; CHECK-LABEL: vpreduce_or_v2i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 @@ -97,7 +97,7 @@ ; CHECK-LABEL: vpreduce_xor_v2i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: xor a0, a1, a0 @@ -113,7 +113,7 @@ define signext i1 @vpreduce_and_v4i1(i1 signext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -131,7 +131,7 @@ ; CHECK-LABEL: vpreduce_or_v4i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 @@ -149,7 +149,7 @@ ; CHECK-LABEL: vpreduce_xor_v4i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: xor a0, a1, a0 @@ -165,7 +165,7 @@ define signext i1 @vpreduce_and_v8i1(i1 signext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -183,7 +183,7 @@ ; CHECK-LABEL: vpreduce_or_v8i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 @@ -201,7 +201,7 @@ ; CHECK-LABEL: vpreduce_xor_v8i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: xor a0, a1, a0 @@ -217,7 +217,7 @@ define signext i1 @vpreduce_and_v10i1(i1 signext %s, <10 x i1> %v, <10 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v10i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -234,7 +234,7 @@ define signext i1 @vpreduce_and_v16i1(i1 signext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_v16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -258,7 +258,7 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a3, a2 ; CHECK-NEXT: .LBB14_2: -; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma ; CHECK-NEXT: vmnot.m v8, v8 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vcpop.m a2, v8, v0.t @@ -268,7 +268,7 @@ ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: .LBB14_4: -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmnot.m v8, v11 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vcpop.m a1, v8, v0.t @@ -287,7 +287,7 @@ ; CHECK-LABEL: vpreduce_or_v16i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 @@ -305,7 +305,7 @@ ; CHECK-LABEL: vpreduce_xor_v16i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: xor a0, a1, a0 @@ -322,7 +322,7 @@ ; CHECK-LABEL: vpreduce_add_v1i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: xor a0, a1, a0 @@ -339,7 +339,7 @@ ; CHECK-LABEL: vpreduce_add_v2i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: xor a0, a1, a0 @@ -356,7 +356,7 @@ ; CHECK-LABEL: vpreduce_add_v4i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: xor a0, a1, a0 @@ -373,7 +373,7 @@ ; CHECK-LABEL: vpreduce_add_v8i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: xor a0, a1, a0 @@ -390,7 +390,7 @@ ; CHECK-LABEL: vpreduce_add_v16i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: xor a0, a1, a0 @@ -406,7 +406,7 @@ define signext i1 @vpreduce_smax_nxv1i1(i1 signext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -423,7 +423,7 @@ define signext i1 @vpreduce_smax_nxv2i1(i1 signext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -440,7 +440,7 @@ define signext i1 @vpreduce_smax_nxv4i1(i1 signext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -457,7 +457,7 @@ define signext i1 @vpreduce_smax_nxv8i1(i1 signext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -474,7 +474,7 @@ define signext i1 @vpreduce_smax_nxv16i1(i1 signext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -491,7 +491,7 @@ define signext i1 @vpreduce_smax_nxv32i1(i1 signext %s, <32 x i1> %v, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -508,7 +508,7 @@ define signext i1 @vpreduce_smax_nxv64i1(i1 signext %s, <64 x i1> %v, <64 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv64i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -526,7 +526,7 @@ ; CHECK-LABEL: vpreduce_smin_nxv1i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 @@ -544,7 +544,7 @@ ; CHECK-LABEL: vpreduce_smin_nxv2i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 @@ -562,7 +562,7 @@ ; CHECK-LABEL: vpreduce_smin_nxv4i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 @@ -580,7 +580,7 @@ ; CHECK-LABEL: vpreduce_smin_nxv8i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 @@ -598,7 +598,7 @@ ; CHECK-LABEL: vpreduce_smin_nxv16i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 @@ -616,7 +616,7 @@ ; CHECK-LABEL: vpreduce_smin_nxv32i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 @@ -634,7 +634,7 @@ ; CHECK-LABEL: vpreduce_smin_nxv64i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 @@ -652,7 +652,7 @@ ; CHECK-LABEL: vpreduce_umax_nxv1i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 @@ -670,7 +670,7 @@ ; CHECK-LABEL: vpreduce_umax_nxv2i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 @@ -688,7 +688,7 @@ ; CHECK-LABEL: vpreduce_umax_nxv4i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 @@ -706,7 +706,7 @@ ; CHECK-LABEL: vpreduce_umax_nxv8i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 @@ -724,7 +724,7 @@ ; CHECK-LABEL: vpreduce_umax_nxv16i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 @@ -742,7 +742,7 @@ ; CHECK-LABEL: vpreduce_umax_nxv32i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 @@ -760,7 +760,7 @@ ; CHECK-LABEL: vpreduce_umax_nxv64i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 @@ -777,7 +777,7 @@ define signext i1 @vpreduce_umin_nxv1i1(i1 signext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -794,7 +794,7 @@ define signext i1 @vpreduce_umin_nxv2i1(i1 signext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -811,7 +811,7 @@ define signext i1 @vpreduce_umin_nxv4i1(i1 signext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -828,7 +828,7 @@ define signext i1 @vpreduce_umin_nxv8i1(i1 signext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -845,7 +845,7 @@ define signext i1 @vpreduce_umin_nxv16i1(i1 signext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -862,7 +862,7 @@ define signext i1 @vpreduce_umin_nxv32i1(i1 signext %s, <32 x i1> %v, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_nxv32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -879,7 +879,7 @@ define signext i1 @vpreduce_umin_nxv64i1(i1 signext %s, <64 x i1> %v, <64 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_nxv64i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -896,7 +896,7 @@ define i1 @vpreduce_mul_v1i1(i1 %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_mul_v1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -912,7 +912,7 @@ define signext i1 @vpreduce_mul_v2i1(i1 signext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_mul_v2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -929,7 +929,7 @@ define signext i1 @vpreduce_mul_v4i1(i1 signext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_mul_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -946,7 +946,7 @@ define signext i1 @vpreduce_mul_v8i1(i1 signext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_mul_v8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -963,7 +963,7 @@ define signext i1 @vpreduce_mul_v16i1(i1 signext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_mul_v16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -980,7 +980,7 @@ define signext i1 @vpreduce_mul_v32i1(i1 signext %s, <32 x i1> %v, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_mul_v32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -997,7 +997,7 @@ define signext i1 @vpreduce_mul_v64i1(i1 signext %s, <64 x i1> %v, <64 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_mul_v64i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll @@ -29,7 +29,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI1_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI1_0)(a1) -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfabs.v v9, v8, v0.t @@ -71,7 +71,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI3_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI3_0)(a1) -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfabs.v v9, v8, v0.t @@ -113,7 +113,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI5_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI5_0)(a1) -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfabs.v v9, v8, v0.t @@ -157,7 +157,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI7_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI7_0)(a1) -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmset.m v10 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmv1r.v v0, v10 @@ -201,7 +201,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI9_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI9_0)(a1) -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfabs.v v9, v8, v0.t @@ -243,7 +243,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI11_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI11_0)(a1) -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfabs.v v9, v8, v0.t @@ -287,7 +287,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI13_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI13_0)(a1) -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmset.m v10 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmv1r.v v0, v10 @@ -333,7 +333,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI15_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI15_0)(a1) -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmset.m v12 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmv1r.v v0, v12 @@ -377,7 +377,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI17_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI17_0)(a1) -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfabs.v v9, v8, v0.t @@ -421,7 +421,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI19_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI19_0)(a1) -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmset.m v10 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmv1r.v v0, v10 @@ -467,7 +467,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI21_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI21_0)(a1) -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmset.m v12 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmv1r.v v0, v12 @@ -513,7 +513,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI23_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI23_0)(a1) -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmset.m v16 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v16 @@ -559,7 +559,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI25_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI25_0)(a1) -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmset.m v16 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v16 @@ -585,7 +585,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v1, v0 ; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: addi a2, a0, -16 ; CHECK-NEXT: vslidedown.vi v2, v0, 2 ; CHECK-NEXT: bltu a0, a2, .LBB26_2 @@ -645,7 +645,7 @@ ; CHECK-LABEL: vp_round_v32f64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: addi a2, a0, -16 ; CHECK-NEXT: vmset.m v1 ; CHECK-NEXT: bltu a0, a2, .LBB27_2 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll @@ -29,7 +29,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI1_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI1_0)(a1) -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfabs.v v9, v8, v0.t @@ -71,7 +71,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI3_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI3_0)(a1) -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfabs.v v9, v8, v0.t @@ -113,7 +113,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI5_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI5_0)(a1) -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfabs.v v9, v8, v0.t @@ -157,7 +157,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI7_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI7_0)(a1) -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmset.m v10 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vmv1r.v v0, v10 @@ -201,7 +201,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI9_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI9_0)(a1) -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vfabs.v v9, v8, v0.t @@ -243,7 +243,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI11_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI11_0)(a1) -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfabs.v v9, v8, v0.t @@ -287,7 +287,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI13_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI13_0)(a1) -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmset.m v10 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vmv1r.v v0, v10 @@ -333,7 +333,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI15_0) ; CHECK-NEXT: flw ft0, %lo(.LCPI15_0)(a1) -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmset.m v12 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vmv1r.v v0, v12 @@ -377,7 +377,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI17_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI17_0)(a1) -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfabs.v v9, v8, v0.t @@ -421,7 +421,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI19_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI19_0)(a1) -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmset.m v10 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vmv1r.v v0, v10 @@ -467,7 +467,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI21_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI21_0)(a1) -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmset.m v12 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vmv1r.v v0, v12 @@ -513,7 +513,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI23_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI23_0)(a1) -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmset.m v16 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v16 @@ -559,7 +559,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI25_0) ; CHECK-NEXT: fld ft0, %lo(.LCPI25_0)(a1) -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmset.m v16 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v16 @@ -585,7 +585,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v1, v0 ; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: addi a2, a0, -16 ; CHECK-NEXT: vslidedown.vi v2, v0, 2 ; CHECK-NEXT: bltu a0, a2, .LBB26_2 @@ -645,7 +645,7 @@ ; CHECK-LABEL: vp_roundeven_v32f64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: addi a2, a0, -16 ; CHECK-NEXT: vmset.m v1 ; CHECK-NEXT: bltu a0, a2, .LBB27_2 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-fp.ll @@ -7,10 +7,10 @@ define <2 x half> @select_v2f16(i1 zeroext %c, <2 x half> %a, <2 x half> %b) { ; CHECK-LABEL: select_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, <2 x half> %a, <2 x half> %b @@ -21,10 +21,10 @@ ; CHECK-LABEL: selectcc_v2f16: ; CHECK: # %bb.0: ; CHECK-NEXT: feq.h a0, fa0, fa1 -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %cmp = fcmp oeq half %a, %b @@ -35,10 +35,10 @@ define <4 x half> @select_v4f16(i1 zeroext %c, <4 x half> %a, <4 x half> %b) { ; CHECK-LABEL: select_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, <4 x half> %a, <4 x half> %b @@ -49,10 +49,10 @@ ; CHECK-LABEL: selectcc_v4f16: ; CHECK: # %bb.0: ; CHECK-NEXT: feq.h a0, fa0, fa1 -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %cmp = fcmp oeq half %a, %b @@ -63,10 +63,10 @@ define <8 x half> @select_v8f16(i1 zeroext %c, <8 x half> %a, <8 x half> %b) { ; CHECK-LABEL: select_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, <8 x half> %a, <8 x half> %b @@ -77,10 +77,10 @@ ; CHECK-LABEL: selectcc_v8f16: ; CHECK: # %bb.0: ; CHECK-NEXT: feq.h a0, fa0, fa1 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %cmp = fcmp oeq half %a, %b @@ -91,10 +91,10 @@ define <16 x half> @select_v16f16(i1 zeroext %c, <16 x half> %a, <16 x half> %b) { ; CHECK-LABEL: select_v16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsne.vi v0, v12, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, <16 x half> %a, <16 x half> %b @@ -105,10 +105,10 @@ ; CHECK-LABEL: selectcc_v16f16: ; CHECK: # %bb.0: ; CHECK-NEXT: feq.h a0, fa0, fa1 -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsne.vi v0, v12, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %cmp = fcmp oeq half %a, %b @@ -119,10 +119,10 @@ define <2 x float> @select_v2f32(i1 zeroext %c, <2 x float> %a, <2 x float> %b) { ; CHECK-LABEL: select_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, <2 x float> %a, <2 x float> %b @@ -133,10 +133,10 @@ ; CHECK-LABEL: selectcc_v2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: feq.s a0, fa0, fa1 -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %cmp = fcmp oeq float %a, %b @@ -147,10 +147,10 @@ define <4 x float> @select_v4f32(i1 zeroext %c, <4 x float> %a, <4 x float> %b) { ; CHECK-LABEL: select_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, <4 x float> %a, <4 x float> %b @@ -161,10 +161,10 @@ ; CHECK-LABEL: selectcc_v4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: feq.s a0, fa0, fa1 -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %cmp = fcmp oeq float %a, %b @@ -175,10 +175,10 @@ define <8 x float> @select_v8f32(i1 zeroext %c, <8 x float> %a, <8 x float> %b) { ; CHECK-LABEL: select_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsne.vi v0, v12, 0 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, <8 x float> %a, <8 x float> %b @@ -189,10 +189,10 @@ ; CHECK-LABEL: selectcc_v8f32: ; CHECK: # %bb.0: ; CHECK-NEXT: feq.s a0, fa0, fa1 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsne.vi v0, v12, 0 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %cmp = fcmp oeq float %a, %b @@ -203,10 +203,10 @@ define <16 x float> @select_v16f32(i1 zeroext %c, <16 x float> %a, <16 x float> %b) { ; CHECK-LABEL: select_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vmsne.vi v0, v16, 0 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, <16 x float> %a, <16 x float> %b @@ -217,10 +217,10 @@ ; CHECK-LABEL: selectcc_v16f32: ; CHECK: # %bb.0: ; CHECK-NEXT: feq.s a0, fa0, fa1 -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vmsne.vi v0, v16, 0 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 ; CHECK-NEXT: ret %cmp = fcmp oeq float %a, %b @@ -231,10 +231,10 @@ define <2 x double> @select_v2f64(i1 zeroext %c, <2 x double> %a, <2 x double> %b) { ; CHECK-LABEL: select_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, <2 x double> %a, <2 x double> %b @@ -245,10 +245,10 @@ ; CHECK-LABEL: selectcc_v2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: feq.d a0, fa0, fa1 -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %cmp = fcmp oeq double %a, %b @@ -259,10 +259,10 @@ define <4 x double> @select_v4f64(i1 zeroext %c, <4 x double> %a, <4 x double> %b) { ; CHECK-LABEL: select_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsne.vi v0, v12, 0 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, <4 x double> %a, <4 x double> %b @@ -273,10 +273,10 @@ ; CHECK-LABEL: selectcc_v4f64: ; CHECK: # %bb.0: ; CHECK-NEXT: feq.d a0, fa0, fa1 -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsne.vi v0, v12, 0 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %cmp = fcmp oeq double %a, %b @@ -287,10 +287,10 @@ define <8 x double> @select_v8f64(i1 zeroext %c, <8 x double> %a, <8 x double> %b) { ; CHECK-LABEL: select_v8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vmsne.vi v0, v16, 0 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, <8 x double> %a, <8 x double> %b @@ -301,10 +301,10 @@ ; CHECK-LABEL: selectcc_v8f64: ; CHECK: # %bb.0: ; CHECK-NEXT: feq.d a0, fa0, fa1 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vmsne.vi v0, v16, 0 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 ; CHECK-NEXT: ret %cmp = fcmp oeq double %a, %b @@ -315,10 +315,10 @@ define <16 x double> @select_v16f64(i1 zeroext %c, <16 x double> %a, <16 x double> %b) { ; CHECK-LABEL: select_v16f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.x v24, a0 ; CHECK-NEXT: vmsne.vi v0, v24, 0 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, <16 x double> %a, <16 x double> %b @@ -329,10 +329,10 @@ ; CHECK-LABEL: selectcc_v16f64: ; CHECK: # %bb.0: ; CHECK-NEXT: feq.d a0, fa0, fa1 -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.x v24, a0 ; CHECK-NEXT: vmsne.vi v0, v24, 0 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 ; CHECK-NEXT: ret %cmp = fcmp oeq double %a, %b diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-int.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-int.ll @@ -7,7 +7,7 @@ define <1 x i1> @select_v1i1(i1 zeroext %c, <1 x i1> %a, <1 x i1> %b) { ; CHECK-LABEL: select_v1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsne.vi v9, v9, 0 ; CHECK-NEXT: vmandn.mm v8, v8, v9 @@ -23,7 +23,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: xor a0, a0, a1 ; CHECK-NEXT: andi a0, a0, 1 -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsne.vi v9, v9, 0 ; CHECK-NEXT: vmandn.mm v8, v8, v9 @@ -38,7 +38,7 @@ define <2 x i1> @select_v2i1(i1 zeroext %c, <2 x i1> %a, <2 x i1> %b) { ; CHECK-LABEL: select_v2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsne.vi v9, v9, 0 ; CHECK-NEXT: vmandn.mm v8, v8, v9 @@ -54,7 +54,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: xor a0, a0, a1 ; CHECK-NEXT: andi a0, a0, 1 -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsne.vi v9, v9, 0 ; CHECK-NEXT: vmandn.mm v8, v8, v9 @@ -69,7 +69,7 @@ define <4 x i1> @select_v4i1(i1 zeroext %c, <4 x i1> %a, <4 x i1> %b) { ; CHECK-LABEL: select_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsne.vi v9, v9, 0 ; CHECK-NEXT: vmandn.mm v8, v8, v9 @@ -85,7 +85,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: xor a0, a0, a1 ; CHECK-NEXT: andi a0, a0, 1 -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsne.vi v9, v9, 0 ; CHECK-NEXT: vmandn.mm v8, v8, v9 @@ -100,7 +100,7 @@ define <8 x i1> @select_v8i1(i1 zeroext %c, <8 x i1> %a, <8 x i1> %b) { ; CHECK-LABEL: select_v8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsne.vi v9, v9, 0 ; CHECK-NEXT: vmandn.mm v8, v8, v9 @@ -116,7 +116,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: xor a0, a0, a1 ; CHECK-NEXT: andi a0, a0, 1 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsne.vi v9, v9, 0 ; CHECK-NEXT: vmandn.mm v8, v8, v9 @@ -131,7 +131,7 @@ define <16 x i1> @select_v16i1(i1 zeroext %c, <16 x i1> %a, <16 x i1> %b) { ; CHECK-LABEL: select_v16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsne.vi v9, v9, 0 ; CHECK-NEXT: vmandn.mm v8, v8, v9 @@ -147,7 +147,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: xor a0, a0, a1 ; CHECK-NEXT: andi a0, a0, 1 -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsne.vi v9, v9, 0 ; CHECK-NEXT: vmandn.mm v8, v8, v9 @@ -162,7 +162,7 @@ define <2 x i8> @select_v2i8(i1 zeroext %c, <2 x i8> %a, <2 x i8> %b) { ; CHECK-LABEL: select_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 @@ -176,7 +176,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: xor a0, a0, a1 ; CHECK-NEXT: snez a0, a0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 @@ -189,7 +189,7 @@ define <4 x i8> @select_v4i8(i1 zeroext %c, <4 x i8> %a, <4 x i8> %b) { ; CHECK-LABEL: select_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 @@ -203,7 +203,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: xor a0, a0, a1 ; CHECK-NEXT: snez a0, a0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 @@ -216,7 +216,7 @@ define <8 x i8> @select_v8i8(i1 zeroext %c, <8 x i8> %a, <8 x i8> %b) { ; CHECK-LABEL: select_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 @@ -230,7 +230,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: xor a0, a0, a1 ; CHECK-NEXT: snez a0, a0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 @@ -243,7 +243,7 @@ define <16 x i8> @select_v16i8(i1 zeroext %c, <16 x i8> %a, <16 x i8> %b) { ; CHECK-LABEL: select_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 @@ -257,7 +257,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: xor a0, a0, a1 ; CHECK-NEXT: snez a0, a0 -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 @@ -270,10 +270,10 @@ define <2 x i16> @select_v2i16(i1 zeroext %c, <2 x i16> %a, <2 x i16> %b) { ; CHECK-LABEL: select_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, <2 x i16> %a, <2 x i16> %b @@ -285,10 +285,10 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: xor a0, a0, a1 ; CHECK-NEXT: snez a0, a0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %cmp = icmp ne i16 %a, %b @@ -299,10 +299,10 @@ define <4 x i16> @select_v4i16(i1 zeroext %c, <4 x i16> %a, <4 x i16> %b) { ; CHECK-LABEL: select_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, <4 x i16> %a, <4 x i16> %b @@ -314,10 +314,10 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: xor a0, a0, a1 ; CHECK-NEXT: snez a0, a0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %cmp = icmp ne i16 %a, %b @@ -328,10 +328,10 @@ define <8 x i16> @select_v8i16(i1 zeroext %c, <8 x i16> %a, <8 x i16> %b) { ; CHECK-LABEL: select_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, <8 x i16> %a, <8 x i16> %b @@ -343,10 +343,10 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: xor a0, a0, a1 ; CHECK-NEXT: snez a0, a0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %cmp = icmp ne i16 %a, %b @@ -357,10 +357,10 @@ define <16 x i16> @select_v16i16(i1 zeroext %c, <16 x i16> %a, <16 x i16> %b) { ; CHECK-LABEL: select_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsne.vi v0, v12, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, <16 x i16> %a, <16 x i16> %b @@ -372,10 +372,10 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: xor a0, a0, a1 ; CHECK-NEXT: snez a0, a0 -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsne.vi v0, v12, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %cmp = icmp ne i16 %a, %b @@ -386,10 +386,10 @@ define <2 x i32> @select_v2i32(i1 zeroext %c, <2 x i32> %a, <2 x i32> %b) { ; CHECK-LABEL: select_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, <2 x i32> %a, <2 x i32> %b @@ -401,10 +401,10 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: xor a0, a0, a1 ; CHECK-NEXT: snez a0, a0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %cmp = icmp ne i32 %a, %b @@ -415,10 +415,10 @@ define <4 x i32> @select_v4i32(i1 zeroext %c, <4 x i32> %a, <4 x i32> %b) { ; CHECK-LABEL: select_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, <4 x i32> %a, <4 x i32> %b @@ -430,10 +430,10 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: xor a0, a0, a1 ; CHECK-NEXT: snez a0, a0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %cmp = icmp ne i32 %a, %b @@ -444,10 +444,10 @@ define <8 x i32> @select_v8i32(i1 zeroext %c, <8 x i32> %a, <8 x i32> %b) { ; CHECK-LABEL: select_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsne.vi v0, v12, 0 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, <8 x i32> %a, <8 x i32> %b @@ -459,10 +459,10 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: xor a0, a0, a1 ; CHECK-NEXT: snez a0, a0 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsne.vi v0, v12, 0 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %cmp = icmp ne i32 %a, %b @@ -473,10 +473,10 @@ define <16 x i32> @select_v16i32(i1 zeroext %c, <16 x i32> %a, <16 x i32> %b) { ; CHECK-LABEL: select_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vmsne.vi v0, v16, 0 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, <16 x i32> %a, <16 x i32> %b @@ -488,10 +488,10 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: xor a0, a0, a1 ; CHECK-NEXT: snez a0, a0 -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vmsne.vi v0, v16, 0 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 ; CHECK-NEXT: ret %cmp = icmp ne i32 %a, %b @@ -502,10 +502,10 @@ define <2 x i64> @select_v2i64(i1 zeroext %c, <2 x i64> %a, <2 x i64> %b) { ; CHECK-LABEL: select_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, <2 x i64> %a, <2 x i64> %b @@ -519,10 +519,10 @@ ; RV32-NEXT: xor a0, a0, a2 ; RV32-NEXT: or a0, a0, a1 ; RV32-NEXT: snez a0, a0 -; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; RV32-NEXT: vmv.v.x v10, a0 ; RV32-NEXT: vmsne.vi v0, v10, 0 -; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; RV32-NEXT: vmerge.vvm v8, v9, v8, v0 ; RV32-NEXT: ret ; @@ -530,10 +530,10 @@ ; RV64: # %bb.0: ; RV64-NEXT: xor a0, a0, a1 ; RV64-NEXT: snez a0, a0 -; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; RV64-NEXT: vmv.v.x v10, a0 ; RV64-NEXT: vmsne.vi v0, v10, 0 -; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; RV64-NEXT: vmerge.vvm v8, v9, v8, v0 ; RV64-NEXT: ret %cmp = icmp ne i64 %a, %b @@ -544,10 +544,10 @@ define <4 x i64> @select_v4i64(i1 zeroext %c, <4 x i64> %a, <4 x i64> %b) { ; CHECK-LABEL: select_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsne.vi v0, v12, 0 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, <4 x i64> %a, <4 x i64> %b @@ -561,10 +561,10 @@ ; RV32-NEXT: xor a0, a0, a2 ; RV32-NEXT: or a0, a0, a1 ; RV32-NEXT: snez a0, a0 -; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; RV32-NEXT: vmv.v.x v12, a0 ; RV32-NEXT: vmsne.vi v0, v12, 0 -; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; RV32-NEXT: vmerge.vvm v8, v10, v8, v0 ; RV32-NEXT: ret ; @@ -572,10 +572,10 @@ ; RV64: # %bb.0: ; RV64-NEXT: xor a0, a0, a1 ; RV64-NEXT: snez a0, a0 -; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; RV64-NEXT: vmv.v.x v12, a0 ; RV64-NEXT: vmsne.vi v0, v12, 0 -; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; RV64-NEXT: vmerge.vvm v8, v10, v8, v0 ; RV64-NEXT: ret %cmp = icmp ne i64 %a, %b @@ -586,10 +586,10 @@ define <8 x i64> @select_v8i64(i1 zeroext %c, <8 x i64> %a, <8 x i64> %b) { ; CHECK-LABEL: select_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vmsne.vi v0, v16, 0 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, <8 x i64> %a, <8 x i64> %b @@ -603,10 +603,10 @@ ; RV32-NEXT: xor a0, a0, a2 ; RV32-NEXT: or a0, a0, a1 ; RV32-NEXT: snez a0, a0 -; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV32-NEXT: vmv.v.x v16, a0 ; RV32-NEXT: vmsne.vi v0, v16, 0 -; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; RV32-NEXT: vmerge.vvm v8, v12, v8, v0 ; RV32-NEXT: ret ; @@ -614,10 +614,10 @@ ; RV64: # %bb.0: ; RV64-NEXT: xor a0, a0, a1 ; RV64-NEXT: snez a0, a0 -; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV64-NEXT: vmv.v.x v16, a0 ; RV64-NEXT: vmsne.vi v0, v16, 0 -; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; RV64-NEXT: vmerge.vvm v8, v12, v8, v0 ; RV64-NEXT: ret %cmp = icmp ne i64 %a, %b @@ -628,10 +628,10 @@ define <16 x i64> @select_v16i64(i1 zeroext %c, <16 x i64> %a, <16 x i64> %b) { ; CHECK-LABEL: select_v16i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.x v24, a0 ; CHECK-NEXT: vmsne.vi v0, v24, 0 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, <16 x i64> %a, <16 x i64> %b @@ -645,10 +645,10 @@ ; RV32-NEXT: xor a0, a0, a2 ; RV32-NEXT: or a0, a0, a1 ; RV32-NEXT: snez a0, a0 -; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV32-NEXT: vmv.v.x v24, a0 ; RV32-NEXT: vmsne.vi v0, v24, 0 -; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; RV32-NEXT: vmerge.vvm v8, v16, v8, v0 ; RV32-NEXT: ret ; @@ -656,10 +656,10 @@ ; RV64: # %bb.0: ; RV64-NEXT: xor a0, a0, a1 ; RV64-NEXT: snez a0, a0 -; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV64-NEXT: vmv.v.x v24, a0 ; RV64-NEXT: vmsne.vi v0, v24, 0 -; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; RV64-NEXT: vmerge.vvm v8, v16, v8, v0 ; RV64-NEXT: ret %cmp = icmp ne i64 %a, %b diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll @@ -243,7 +243,7 @@ define <8 x i1> @fcmp_ord_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fcmp_ord_vf_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmfeq.vf v9, v9, fa0, v0.t @@ -259,7 +259,7 @@ define <8 x i1> @fcmp_ord_vf_swap_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fcmp_ord_vf_swap_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmfeq.vf v9, v9, fa0, v0.t @@ -509,7 +509,7 @@ define <8 x i1> @fcmp_uno_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fcmp_uno_vf_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmfne.vf v9, v9, fa0, v0.t @@ -525,7 +525,7 @@ define <8 x i1> @fcmp_uno_vf_swap_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fcmp_uno_vf_swap_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmfne.vf v9, v9, fa0, v0.t @@ -554,16 +554,16 @@ ; CHECK-NEXT: li a3, 0 ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: addi a4, a0, 128 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v24, (a4) -; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, ma ; CHECK-NEXT: addi a4, a2, -64 ; CHECK-NEXT: vslidedown.vi v0, v0, 8 ; CHECK-NEXT: bltu a2, a4, .LBB43_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a3, a4 ; CHECK-NEXT: .LBB43_2: -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vsetvli zero, a3, e16, m8, ta, ma ; CHECK-NEXT: vmfeq.vv v2, v16, v24, v0.t @@ -576,7 +576,7 @@ ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vmfeq.vv v16, v24, v8, v0.t -; CHECK-NEXT: vsetivli zero, 16, e8, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, tu, ma ; CHECK-NEXT: vslideup.vi v16, v2, 8 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: csrr a0, vlenb @@ -843,7 +843,7 @@ define <8 x i1> @fcmp_ord_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fcmp_ord_vf_v8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmfeq.vf v16, v12, fa0, v0.t @@ -859,7 +859,7 @@ define <8 x i1> @fcmp_ord_vf_swap_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fcmp_ord_vf_swap_v8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmfeq.vf v16, v12, fa0, v0.t @@ -1112,7 +1112,7 @@ define <8 x i1> @fcmp_uno_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fcmp_uno_vf_v8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmfne.vf v16, v12, fa0, v0.t @@ -1128,7 +1128,7 @@ define <8 x i1> @fcmp_uno_vf_swap_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: fcmp_uno_vf_swap_v8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmfne.vf v16, v12, fa0, v0.t @@ -1153,10 +1153,10 @@ ; CHECK-NEXT: mul a1, a1, a3 ; CHECK-NEXT: sub sp, sp, a1 ; CHECK-NEXT: vmv1r.v v2, v0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v0, v0, 2 ; CHECK-NEXT: addi a1, a0, 128 -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v24, (a1) ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 @@ -1199,7 +1199,7 @@ ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vmfeq.vv v16, v24, v8, v0.t -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, tu, ma ; CHECK-NEXT: vslideup.vi v16, v1, 2 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: csrr a0, vlenb diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp-mask.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp-mask.ll @@ -10,7 +10,7 @@ define <2 x i1> @icmp_eq_vv_v2i1(<2 x i1> %va, <2 x i1> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_v2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <2 x i1> @llvm.vp.icmp.v2i1(<2 x i1> %va, <2 x i1> %vb, metadata !"eq", <2 x i1> %m, i32 %evl) @@ -22,7 +22,7 @@ define <4 x i1> @icmp_eq_vv_v4i1(<4 x i1> %va, <4 x i1> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <4 x i1> @llvm.vp.icmp.v4i1(<4 x i1> %va, <4 x i1> %vb, metadata !"eq", <4 x i1> %m, i32 %evl) @@ -34,7 +34,7 @@ define <8 x i1> @icmp_eq_vv_v8i1(<8 x i1> %va, <8 x i1> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_v8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <8 x i1> @llvm.vp.icmp.v8i1(<8 x i1> %va, <8 x i1> %vb, metadata !"eq", <8 x i1> %m, i32 %evl) @@ -46,7 +46,7 @@ define <16 x i1> @icmp_eq_vv_v16i1(<16 x i1> %va, <16 x i1> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_v16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <16 x i1> @llvm.vp.icmp.v16i1(<16 x i1> %va, <16 x i1> %vb, metadata !"eq", <16 x i1> %m, i32 %evl) @@ -56,7 +56,7 @@ define <2 x i1> @icmp_ne_vv_v2i1(<2 x i1> %va, <2 x i1> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_ne_vv_v2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <2 x i1> @llvm.vp.icmp.v2i1(<2 x i1> %va, <2 x i1> %vb, metadata !"ne", <2 x i1> %m, i32 %evl) @@ -66,7 +66,7 @@ define <4 x i1> @icmp_ne_vv_v4i1(<4 x i1> %va, <4 x i1> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_ne_vv_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <4 x i1> @llvm.vp.icmp.v4i1(<4 x i1> %va, <4 x i1> %vb, metadata !"ne", <4 x i1> %m, i32 %evl) @@ -76,7 +76,7 @@ define <8 x i1> @icmp_ne_vv_v8i1(<8 x i1> %va, <8 x i1> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_ne_vv_v8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <8 x i1> @llvm.vp.icmp.v8i1(<8 x i1> %va, <8 x i1> %vb, metadata !"ne", <8 x i1> %m, i32 %evl) @@ -86,7 +86,7 @@ define <16 x i1> @icmp_ne_vv_v16i1(<16 x i1> %va, <16 x i1> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_ne_vv_v16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <16 x i1> @llvm.vp.icmp.v16i1(<16 x i1> %va, <16 x i1> %vb, metadata !"ne", <16 x i1> %m, i32 %evl) @@ -96,7 +96,7 @@ define <2 x i1> @icmp_slt_vv_v2i1(<2 x i1> %va, <2 x i1> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_slt_vv_v2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <2 x i1> @llvm.vp.icmp.v2i1(<2 x i1> %va, <2 x i1> %vb, metadata !"slt", <2 x i1> %m, i32 %evl) @@ -106,7 +106,7 @@ define <4 x i1> @icmp_slt_vv_v4i1(<4 x i1> %va, <4 x i1> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_slt_vv_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <4 x i1> @llvm.vp.icmp.v4i1(<4 x i1> %va, <4 x i1> %vb, metadata !"slt", <4 x i1> %m, i32 %evl) @@ -116,7 +116,7 @@ define <8 x i1> @icmp_slt_vv_v8i1(<8 x i1> %va, <8 x i1> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_slt_vv_v8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <8 x i1> @llvm.vp.icmp.v8i1(<8 x i1> %va, <8 x i1> %vb, metadata !"slt", <8 x i1> %m, i32 %evl) @@ -126,7 +126,7 @@ define <16 x i1> @icmp_slt_vv_v16i1(<16 x i1> %va, <16 x i1> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_slt_vv_v16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <16 x i1> @llvm.vp.icmp.v16i1(<16 x i1> %va, <16 x i1> %vb, metadata !"slt", <16 x i1> %m, i32 %evl) @@ -136,7 +136,7 @@ define <2 x i1> @icmp_ult_vv_v2i1(<2 x i1> %va, <2 x i1> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_ult_vv_v2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmandn.mm v0, v8, v0 ; CHECK-NEXT: ret %v = call <2 x i1> @llvm.vp.icmp.v2i1(<2 x i1> %va, <2 x i1> %vb, metadata !"ult", <2 x i1> %m, i32 %evl) @@ -146,7 +146,7 @@ define <4 x i1> @icmp_ult_vv_v4i1(<4 x i1> %va, <4 x i1> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_ult_vv_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmandn.mm v0, v8, v0 ; CHECK-NEXT: ret %v = call <4 x i1> @llvm.vp.icmp.v4i1(<4 x i1> %va, <4 x i1> %vb, metadata !"ult", <4 x i1> %m, i32 %evl) @@ -156,7 +156,7 @@ define <8 x i1> @icmp_ult_vv_v8i1(<8 x i1> %va, <8 x i1> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_ult_vv_v8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmandn.mm v0, v8, v0 ; CHECK-NEXT: ret %v = call <8 x i1> @llvm.vp.icmp.v8i1(<8 x i1> %va, <8 x i1> %vb, metadata !"ult", <8 x i1> %m, i32 %evl) @@ -166,7 +166,7 @@ define <16 x i1> @icmp_ult_vv_v16i1(<16 x i1> %va, <16 x i1> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_ult_vv_v16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmandn.mm v0, v8, v0 ; CHECK-NEXT: ret %v = call <16 x i1> @llvm.vp.icmp.v16i1(<16 x i1> %va, <16 x i1> %vb, metadata !"ult", <16 x i1> %m, i32 %evl) @@ -176,7 +176,7 @@ define <2 x i1> @icmp_sgt_vv_v2i1(<2 x i1> %va, <2 x i1> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sgt_vv_v2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmandn.mm v0, v8, v0 ; CHECK-NEXT: ret %v = call <2 x i1> @llvm.vp.icmp.v2i1(<2 x i1> %va, <2 x i1> %vb, metadata !"sgt", <2 x i1> %m, i32 %evl) @@ -186,7 +186,7 @@ define <4 x i1> @icmp_sgt_vv_v4i1(<4 x i1> %va, <4 x i1> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sgt_vv_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmandn.mm v0, v8, v0 ; CHECK-NEXT: ret %v = call <4 x i1> @llvm.vp.icmp.v4i1(<4 x i1> %va, <4 x i1> %vb, metadata !"sgt", <4 x i1> %m, i32 %evl) @@ -196,7 +196,7 @@ define <8 x i1> @icmp_sgt_vv_v8i1(<8 x i1> %va, <8 x i1> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sgt_vv_v8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmandn.mm v0, v8, v0 ; CHECK-NEXT: ret %v = call <8 x i1> @llvm.vp.icmp.v8i1(<8 x i1> %va, <8 x i1> %vb, metadata !"sgt", <8 x i1> %m, i32 %evl) @@ -206,7 +206,7 @@ define <16 x i1> @icmp_sgt_vv_v16i1(<16 x i1> %va, <16 x i1> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sgt_vv_v16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmandn.mm v0, v8, v0 ; CHECK-NEXT: ret %v = call <16 x i1> @llvm.vp.icmp.v16i1(<16 x i1> %va, <16 x i1> %vb, metadata !"sgt", <16 x i1> %m, i32 %evl) @@ -216,7 +216,7 @@ define <2 x i1> @icmp_ugt_vv_v2i1(<2 x i1> %va, <2 x i1> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_ugt_vv_v2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <2 x i1> @llvm.vp.icmp.v2i1(<2 x i1> %va, <2 x i1> %vb, metadata !"ugt", <2 x i1> %m, i32 %evl) @@ -226,7 +226,7 @@ define <4 x i1> @icmp_ugt_vv_v4i1(<4 x i1> %va, <4 x i1> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_ugt_vv_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <4 x i1> @llvm.vp.icmp.v4i1(<4 x i1> %va, <4 x i1> %vb, metadata !"ugt", <4 x i1> %m, i32 %evl) @@ -236,7 +236,7 @@ define <8 x i1> @icmp_ugt_vv_v8i1(<8 x i1> %va, <8 x i1> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_ugt_vv_v8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <8 x i1> @llvm.vp.icmp.v8i1(<8 x i1> %va, <8 x i1> %vb, metadata !"ugt", <8 x i1> %m, i32 %evl) @@ -246,7 +246,7 @@ define <16 x i1> @icmp_ugt_vv_v16i1(<16 x i1> %va, <16 x i1> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_ugt_vv_v16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <16 x i1> @llvm.vp.icmp.v16i1(<16 x i1> %va, <16 x i1> %vb, metadata !"ugt", <16 x i1> %m, i32 %evl) @@ -256,7 +256,7 @@ define <2 x i1> @icmp_sle_vv_v2i1(<2 x i1> %va, <2 x i1> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sle_vv_v2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v8, v0 ; CHECK-NEXT: ret %v = call <2 x i1> @llvm.vp.icmp.v2i1(<2 x i1> %va, <2 x i1> %vb, metadata !"sle", <2 x i1> %m, i32 %evl) @@ -266,7 +266,7 @@ define <4 x i1> @icmp_sle_vv_v4i1(<4 x i1> %va, <4 x i1> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sle_vv_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v8, v0 ; CHECK-NEXT: ret %v = call <4 x i1> @llvm.vp.icmp.v4i1(<4 x i1> %va, <4 x i1> %vb, metadata !"sle", <4 x i1> %m, i32 %evl) @@ -276,7 +276,7 @@ define <8 x i1> @icmp_sle_vv_v8i1(<8 x i1> %va, <8 x i1> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sle_vv_v8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v8, v0 ; CHECK-NEXT: ret %v = call <8 x i1> @llvm.vp.icmp.v8i1(<8 x i1> %va, <8 x i1> %vb, metadata !"sle", <8 x i1> %m, i32 %evl) @@ -286,7 +286,7 @@ define <16 x i1> @icmp_sle_vv_v16i1(<16 x i1> %va, <16 x i1> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sle_vv_v16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v8, v0 ; CHECK-NEXT: ret %v = call <16 x i1> @llvm.vp.icmp.v16i1(<16 x i1> %va, <16 x i1> %vb, metadata !"sle", <16 x i1> %m, i32 %evl) @@ -296,7 +296,7 @@ define <2 x i1> @icmp_ule_vv_v2i1(<2 x i1> %va, <2 x i1> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_ule_vv_v2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <2 x i1> @llvm.vp.icmp.v2i1(<2 x i1> %va, <2 x i1> %vb, metadata !"ule", <2 x i1> %m, i32 %evl) @@ -306,7 +306,7 @@ define <4 x i1> @icmp_ule_vv_v4i1(<4 x i1> %va, <4 x i1> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_ule_vv_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <4 x i1> @llvm.vp.icmp.v4i1(<4 x i1> %va, <4 x i1> %vb, metadata !"ule", <4 x i1> %m, i32 %evl) @@ -316,7 +316,7 @@ define <8 x i1> @icmp_ule_vv_v8i1(<8 x i1> %va, <8 x i1> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_ule_vv_v8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <8 x i1> @llvm.vp.icmp.v8i1(<8 x i1> %va, <8 x i1> %vb, metadata !"ule", <8 x i1> %m, i32 %evl) @@ -326,7 +326,7 @@ define <16 x i1> @icmp_ule_vv_v16i1(<16 x i1> %va, <16 x i1> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_ule_vv_v16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <16 x i1> @llvm.vp.icmp.v16i1(<16 x i1> %va, <16 x i1> %vb, metadata !"ule", <16 x i1> %m, i32 %evl) @@ -336,7 +336,7 @@ define <2 x i1> @icmp_sge_vv_v2i1(<2 x i1> %va, <2 x i1> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sge_vv_v2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <2 x i1> @llvm.vp.icmp.v2i1(<2 x i1> %va, <2 x i1> %vb, metadata !"sge", <2 x i1> %m, i32 %evl) @@ -346,7 +346,7 @@ define <4 x i1> @icmp_sge_vv_v4i1(<4 x i1> %va, <4 x i1> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sge_vv_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <4 x i1> @llvm.vp.icmp.v4i1(<4 x i1> %va, <4 x i1> %vb, metadata !"sge", <4 x i1> %m, i32 %evl) @@ -356,7 +356,7 @@ define <8 x i1> @icmp_sge_vv_v8i1(<8 x i1> %va, <8 x i1> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sge_vv_v8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <8 x i1> @llvm.vp.icmp.v8i1(<8 x i1> %va, <8 x i1> %vb, metadata !"sge", <8 x i1> %m, i32 %evl) @@ -366,7 +366,7 @@ define <16 x i1> @icmp_sge_vv_v16i1(<16 x i1> %va, <16 x i1> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sge_vv_v16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <16 x i1> @llvm.vp.icmp.v16i1(<16 x i1> %va, <16 x i1> %vb, metadata !"sge", <16 x i1> %m, i32 %evl) @@ -376,7 +376,7 @@ define <2 x i1> @icmp_uge_vv_v2i1(<2 x i1> %va, <2 x i1> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_uge_vv_v2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v8, v0 ; CHECK-NEXT: ret %v = call <2 x i1> @llvm.vp.icmp.v2i1(<2 x i1> %va, <2 x i1> %vb, metadata !"uge", <2 x i1> %m, i32 %evl) @@ -386,7 +386,7 @@ define <4 x i1> @icmp_uge_vv_v4i1(<4 x i1> %va, <4 x i1> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_uge_vv_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v8, v0 ; CHECK-NEXT: ret %v = call <4 x i1> @llvm.vp.icmp.v4i1(<4 x i1> %va, <4 x i1> %vb, metadata !"uge", <4 x i1> %m, i32 %evl) @@ -396,7 +396,7 @@ define <8 x i1> @icmp_uge_vv_v8i1(<8 x i1> %va, <8 x i1> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_uge_vv_v8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v8, v0 ; CHECK-NEXT: ret %v = call <8 x i1> @llvm.vp.icmp.v8i1(<8 x i1> %va, <8 x i1> %vb, metadata !"uge", <8 x i1> %m, i32 %evl) @@ -406,7 +406,7 @@ define <16 x i1> @icmp_uge_vv_v16i1(<16 x i1> %va, <16 x i1> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_uge_vv_v16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v8, v0 ; CHECK-NEXT: ret %v = call <16 x i1> @llvm.vp.icmp.v16i1(<16 x i1> %va, <16 x i1> %vb, metadata !"uge", <16 x i1> %m, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll @@ -13,7 +13,7 @@ ; CHECK-LABEL: icmp_eq_vv_v8i7: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 127 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vand.vx v9, v9, a1 ; CHECK-NEXT: vand.vx v8, v8, a1 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma @@ -27,7 +27,7 @@ ; CHECK-LABEL: icmp_eq_vx_v8i7: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 127 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a2 ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vand.vx v9, v9, a2 @@ -44,7 +44,7 @@ ; CHECK-LABEL: icmp_eq_vx_swap_v8i7: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 127 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a2 ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vand.vx v9, v9, a2 @@ -282,7 +282,7 @@ define <8 x i1> @icmp_uge_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_uge_vx_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v9, v8, v0.t @@ -458,7 +458,7 @@ define <8 x i1> @icmp_sge_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sge_vx_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmsle.vv v0, v9, v8, v0.t @@ -588,7 +588,7 @@ define <8 x i1> @icmp_sle_vx_swap_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sle_vx_swap_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmsle.vv v0, v9, v8, v0.t @@ -635,7 +635,7 @@ ; CHECK-NEXT: mul a1, a1, a4 ; CHECK-NEXT: sub sp, sp, a1 ; CHECK-NEXT: li a4, 128 -; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, ma ; CHECK-NEXT: vle8.v v24, (a0) ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 @@ -695,7 +695,7 @@ ; CHECK-LABEL: icmp_eq_vx_v256i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a3, 128 -; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma ; CHECK-NEXT: vlm.v v25, (a1) ; CHECK-NEXT: addi a4, a2, -128 ; CHECK-NEXT: vmv1r.v v24, v0 @@ -727,7 +727,7 @@ ; CHECK-LABEL: icmp_eq_vx_swap_v256i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a3, 128 -; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma ; CHECK-NEXT: vlm.v v25, (a1) ; CHECK-NEXT: addi a4, a2, -128 ; CHECK-NEXT: vmv1r.v v24, v0 @@ -960,7 +960,7 @@ define <8 x i1> @icmp_uge_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_uge_vx_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmsleu.vv v10, v12, v8, v0.t @@ -1151,7 +1151,7 @@ define <8 x i1> @icmp_sge_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sge_vx_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmsle.vv v10, v12, v8, v0.t @@ -1292,7 +1292,7 @@ define <8 x i1> @icmp_sle_vx_swap_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sle_vx_swap_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmsle.vv v10, v12, v8, v0.t @@ -1346,16 +1346,16 @@ ; CHECK-NEXT: li a3, 0 ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: addi a4, a0, 128 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v24, (a4) -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; CHECK-NEXT: addi a4, a2, -32 ; CHECK-NEXT: vslidedown.vi v0, v0, 4 ; CHECK-NEXT: bltu a2, a4, .LBB99_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a3, a4 ; CHECK-NEXT: .LBB99_2: -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; CHECK-NEXT: vmseq.vv v2, v16, v24, v0.t @@ -1368,7 +1368,7 @@ ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vmseq.vv v16, v24, v8, v0.t -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v16, v2, 4 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: csrr a0, vlenb @@ -1385,7 +1385,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: li a2, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; CHECK-NEXT: addi a3, a1, -32 ; CHECK-NEXT: vslidedown.vi v0, v0, 4 ; CHECK-NEXT: bltu a1, a3, .LBB100_2 @@ -1402,7 +1402,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vmseq.vx v16, v8, a0, v0.t -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v16, v25, 4 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: ret @@ -1417,7 +1417,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: li a2, 0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; CHECK-NEXT: addi a3, a1, -32 ; CHECK-NEXT: vslidedown.vi v0, v0, 4 ; CHECK-NEXT: bltu a1, a3, .LBB101_2 @@ -1434,7 +1434,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vmseq.vx v16, v8, a0, v0.t -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v16, v25, 4 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: ret @@ -1465,7 +1465,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vmseq.vv v12, v8, v16, v0.t @@ -1493,7 +1493,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vmseq.vv v12, v16, v8, v0.t @@ -1558,7 +1558,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vmsne.vv v12, v8, v16, v0.t @@ -1586,7 +1586,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vmsne.vv v12, v16, v8, v0.t @@ -1651,7 +1651,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vmsltu.vv v12, v16, v8, v0.t @@ -1679,7 +1679,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vmsltu.vv v12, v8, v16, v0.t @@ -1744,7 +1744,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vmsleu.vv v12, v16, v8, v0.t @@ -1754,7 +1754,7 @@ ; ; RV64-LABEL: icmp_uge_vx_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vmv.v.x v16, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vmsleu.vv v12, v16, v8, v0.t @@ -1774,7 +1774,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vmsleu.vv v12, v8, v16, v0.t @@ -1839,7 +1839,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vmsltu.vv v12, v8, v16, v0.t @@ -1867,7 +1867,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vmsltu.vv v12, v16, v8, v0.t @@ -1932,7 +1932,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vmslt.vv v12, v16, v8, v0.t @@ -1960,7 +1960,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vmslt.vv v12, v8, v16, v0.t @@ -2025,7 +2025,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vmsle.vv v12, v16, v8, v0.t @@ -2035,7 +2035,7 @@ ; ; RV64-LABEL: icmp_sge_vx_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vmv.v.x v16, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vmsle.vv v12, v16, v8, v0.t @@ -2055,7 +2055,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vmsle.vv v12, v8, v16, v0.t @@ -2120,7 +2120,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vmslt.vv v12, v8, v16, v0.t @@ -2148,7 +2148,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vmslt.vv v12, v16, v8, v0.t @@ -2213,7 +2213,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vmsle.vv v12, v8, v16, v0.t @@ -2241,7 +2241,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vmsle.vv v12, v16, v8, v0.t @@ -2251,7 +2251,7 @@ ; ; RV64-LABEL: icmp_sle_vx_swap_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vmv.v.x v16, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vmsle.vv v12, v16, v8, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp-mask.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp-mask.ll @@ -9,7 +9,7 @@ define <4 x i16> @vsext_v4i16_v4i1(<4 x i1> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsext_v4i16_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -20,7 +20,7 @@ define <4 x i16> @vsext_v4i16_v4i1_unmasked(<4 x i1> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsext_v4i16_v4i1_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -33,7 +33,7 @@ define <4 x i32> @vsext_v4i32_v4i1(<4 x i1> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsext_v4i32_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -44,7 +44,7 @@ define <4 x i32> @vsext_v4i32_v4i1_unmasked(<4 x i1> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsext_v4i32_v4i1_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -57,7 +57,7 @@ define <4 x i64> @vsext_v4i64_v4i1(<4 x i1> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsext_v4i64_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -68,7 +68,7 @@ define <4 x i64> @vsext_v4i64_v4i1_unmasked(<4 x i1> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsext_v4i64_v4i1_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp.ll @@ -20,7 +20,7 @@ define <4 x i16> @vsext_v4i16_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsext_v4i16_v4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsext.vf2 v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -44,7 +44,7 @@ define <4 x i32> @vsext_v4i32_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsext_v4i32_v4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsext.vf4 v9, v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -68,7 +68,7 @@ define <4 x i64> @vsext_v4i64_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsext_v4i64_v4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsext.vf8 v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -92,7 +92,7 @@ define <4 x i32> @vsext_v4i32_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsext_v4i32_v4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsext.vf2 v9, v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -116,7 +116,7 @@ define <4 x i64> @vsext_v4i64_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsext_v4i64_v4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsext.vf4 v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -140,7 +140,7 @@ define <4 x i64> @vsext_v4i64_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsext_v4i64_v4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsext.vf2 v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -155,14 +155,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v1, v0 ; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: addi a2, a0, -16 ; CHECK-NEXT: vslidedown.vi v0, v0, 2 ; CHECK-NEXT: bltu a0, a2, .LBB12_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a1, a2 ; CHECK-NEXT: .LBB12_2: -; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v24, v8, 16 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: li a1, 16 @@ -189,16 +189,16 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a1, a2 ; CHECK-NEXT: .LBB13_2: -; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v24, v8, 16 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: li a1, 16 ; CHECK-NEXT: vsext.vf2 v16, v24 ; CHECK-NEXT: bltu a0, a1, .LBB13_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: .LBB13_4: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsext.vf2 v24, v8 ; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shufflevector-vnsrl.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shufflevector-vnsrl.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shufflevector-vnsrl.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shufflevector-vnsrl.ll @@ -7,9 +7,9 @@ define void @vnsrl_0_i8(ptr %in, ptr %out) { ; CHECK-LABEL: vnsrl_0_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 16, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 8, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vse8.v v8, (a1) ; CHECK-NEXT: ret @@ -23,9 +23,9 @@ define void @vnsrl_8_i8(ptr %in, ptr %out) { ; CHECK-LABEL: vnsrl_8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 16, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 8, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 8 ; CHECK-NEXT: vse8.v v8, (a1) ; CHECK-NEXT: ret @@ -39,18 +39,18 @@ define void @vnsrl_0_i16(ptr %in, ptr %out) { ; V-LABEL: vnsrl_0_i16: ; V: # %bb.0: # %entry -; V-NEXT: vsetivli zero, 8, e16, mf2, ta, mu +; V-NEXT: vsetivli zero, 8, e16, mf2, ta, ma ; V-NEXT: vle16.v v8, (a0) -; V-NEXT: vsetivli zero, 4, e16, mf4, ta, mu +; V-NEXT: vsetivli zero, 4, e16, mf4, ta, ma ; V-NEXT: vnsrl.wi v8, v8, 0 ; V-NEXT: vse16.v v8, (a1) ; V-NEXT: ret ; ; ZVE32F-LABEL: vnsrl_0_i16: ; ZVE32F: # %bb.0: # %entry -; ZVE32F-NEXT: vsetivli zero, 8, e16, mf2, ta, mu +; ZVE32F-NEXT: vsetivli zero, 8, e16, mf2, ta, ma ; ZVE32F-NEXT: vle16.v v8, (a0) -; ZVE32F-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; ZVE32F-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; ZVE32F-NEXT: vnsrl.wi v8, v8, 0 ; ZVE32F-NEXT: vse16.v v8, (a1) ; ZVE32F-NEXT: ret @@ -64,18 +64,18 @@ define void @vnsrl_16_i16(ptr %in, ptr %out) { ; V-LABEL: vnsrl_16_i16: ; V: # %bb.0: # %entry -; V-NEXT: vsetivli zero, 8, e16, mf2, ta, mu +; V-NEXT: vsetivli zero, 8, e16, mf2, ta, ma ; V-NEXT: vle16.v v8, (a0) -; V-NEXT: vsetivli zero, 4, e16, mf4, ta, mu +; V-NEXT: vsetivli zero, 4, e16, mf4, ta, ma ; V-NEXT: vnsrl.wi v8, v8, 16 ; V-NEXT: vse16.v v8, (a1) ; V-NEXT: ret ; ; ZVE32F-LABEL: vnsrl_16_i16: ; ZVE32F: # %bb.0: # %entry -; ZVE32F-NEXT: vsetivli zero, 8, e16, mf2, ta, mu +; ZVE32F-NEXT: vsetivli zero, 8, e16, mf2, ta, ma ; ZVE32F-NEXT: vle16.v v8, (a0) -; ZVE32F-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; ZVE32F-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; ZVE32F-NEXT: vnsrl.wi v8, v8, 16 ; ZVE32F-NEXT: vse16.v v8, (a1) ; ZVE32F-NEXT: ret @@ -89,18 +89,18 @@ define void @vnsrl_0_half(ptr %in, ptr %out) { ; V-LABEL: vnsrl_0_half: ; V: # %bb.0: # %entry -; V-NEXT: vsetivli zero, 8, e16, mf2, ta, mu +; V-NEXT: vsetivli zero, 8, e16, mf2, ta, ma ; V-NEXT: vle16.v v8, (a0) -; V-NEXT: vsetivli zero, 4, e16, mf4, ta, mu +; V-NEXT: vsetivli zero, 4, e16, mf4, ta, ma ; V-NEXT: vnsrl.wi v8, v8, 0 ; V-NEXT: vse16.v v8, (a1) ; V-NEXT: ret ; ; ZVE32F-LABEL: vnsrl_0_half: ; ZVE32F: # %bb.0: # %entry -; ZVE32F-NEXT: vsetivli zero, 8, e16, mf2, ta, mu +; ZVE32F-NEXT: vsetivli zero, 8, e16, mf2, ta, ma ; ZVE32F-NEXT: vle16.v v8, (a0) -; ZVE32F-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; ZVE32F-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; ZVE32F-NEXT: vnsrl.wi v8, v8, 0 ; ZVE32F-NEXT: vse16.v v8, (a1) ; ZVE32F-NEXT: ret @@ -114,18 +114,18 @@ define void @vnsrl_16_half(ptr %in, ptr %out) { ; V-LABEL: vnsrl_16_half: ; V: # %bb.0: # %entry -; V-NEXT: vsetivli zero, 8, e16, mf2, ta, mu +; V-NEXT: vsetivli zero, 8, e16, mf2, ta, ma ; V-NEXT: vle16.v v8, (a0) -; V-NEXT: vsetivli zero, 4, e16, mf4, ta, mu +; V-NEXT: vsetivli zero, 4, e16, mf4, ta, ma ; V-NEXT: vnsrl.wi v8, v8, 16 ; V-NEXT: vse16.v v8, (a1) ; V-NEXT: ret ; ; ZVE32F-LABEL: vnsrl_16_half: ; ZVE32F: # %bb.0: # %entry -; ZVE32F-NEXT: vsetivli zero, 8, e16, mf2, ta, mu +; ZVE32F-NEXT: vsetivli zero, 8, e16, mf2, ta, ma ; ZVE32F-NEXT: vle16.v v8, (a0) -; ZVE32F-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; ZVE32F-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; ZVE32F-NEXT: vnsrl.wi v8, v8, 16 ; ZVE32F-NEXT: vse16.v v8, (a1) ; ZVE32F-NEXT: ret @@ -139,16 +139,16 @@ define void @vnsrl_0_i32(ptr %in, ptr %out) { ; V-LABEL: vnsrl_0_i32: ; V: # %bb.0: # %entry -; V-NEXT: vsetivli zero, 4, e32, mf2, ta, mu +; V-NEXT: vsetivli zero, 4, e32, mf2, ta, ma ; V-NEXT: vle32.v v8, (a0) -; V-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; V-NEXT: vnsrl.wi v8, v8, 0 ; V-NEXT: vse32.v v8, (a1) ; V-NEXT: ret ; ; ZVE32F-LABEL: vnsrl_0_i32: ; ZVE32F: # %bb.0: # %entry -; ZVE32F-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; ZVE32F-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; ZVE32F-NEXT: vle32.v v8, (a0) ; ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu ; ZVE32F-NEXT: vslidedown.vi v9, v8, 2 @@ -168,17 +168,17 @@ define void @vnsrl_32_i32(ptr %in, ptr %out) { ; V-LABEL: vnsrl_32_i32: ; V: # %bb.0: # %entry -; V-NEXT: vsetivli zero, 4, e32, mf2, ta, mu +; V-NEXT: vsetivli zero, 4, e32, mf2, ta, ma ; V-NEXT: vle32.v v8, (a0) ; V-NEXT: li a0, 32 -; V-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; V-NEXT: vnsrl.wx v8, v8, a0 ; V-NEXT: vse32.v v8, (a1) ; V-NEXT: ret ; ; ZVE32F-LABEL: vnsrl_32_i32: ; ZVE32F: # %bb.0: # %entry -; ZVE32F-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; ZVE32F-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; ZVE32F-NEXT: vle32.v v8, (a0) ; ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu ; ZVE32F-NEXT: vslidedown.vi v9, v8, 2 @@ -198,16 +198,16 @@ define void @vnsrl_0_float(ptr %in, ptr %out) { ; V-LABEL: vnsrl_0_float: ; V: # %bb.0: # %entry -; V-NEXT: vsetivli zero, 4, e32, mf2, ta, mu +; V-NEXT: vsetivli zero, 4, e32, mf2, ta, ma ; V-NEXT: vle32.v v8, (a0) -; V-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; V-NEXT: vnsrl.wi v8, v8, 0 ; V-NEXT: vse32.v v8, (a1) ; V-NEXT: ret ; ; ZVE32F-LABEL: vnsrl_0_float: ; ZVE32F: # %bb.0: # %entry -; ZVE32F-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; ZVE32F-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; ZVE32F-NEXT: vle32.v v8, (a0) ; ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu ; ZVE32F-NEXT: vslidedown.vi v9, v8, 2 @@ -227,17 +227,17 @@ define void @vnsrl_32_float(ptr %in, ptr %out) { ; V-LABEL: vnsrl_32_float: ; V: # %bb.0: # %entry -; V-NEXT: vsetivli zero, 4, e32, mf2, ta, mu +; V-NEXT: vsetivli zero, 4, e32, mf2, ta, ma ; V-NEXT: vle32.v v8, (a0) ; V-NEXT: li a0, 32 -; V-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; V-NEXT: vnsrl.wx v8, v8, a0 ; V-NEXT: vse32.v v8, (a1) ; V-NEXT: ret ; ; ZVE32F-LABEL: vnsrl_32_float: ; ZVE32F: # %bb.0: # %entry -; ZVE32F-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; ZVE32F-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; ZVE32F-NEXT: vle32.v v8, (a0) ; ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, mu ; ZVE32F-NEXT: vslidedown.vi v9, v8, 2 @@ -257,7 +257,7 @@ define void @vnsrl_0_i64(ptr %in, ptr %out) { ; V-LABEL: vnsrl_0_i64: ; V: # %bb.0: # %entry -; V-NEXT: vsetivli zero, 4, e64, m1, ta, mu +; V-NEXT: vsetivli zero, 4, e64, m1, ta, ma ; V-NEXT: vle64.v v8, (a0) ; V-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; V-NEXT: vslidedown.vi v9, v8, 2 @@ -285,7 +285,7 @@ define void @vnsrl_64_i64(ptr %in, ptr %out) { ; V-LABEL: vnsrl_64_i64: ; V: # %bb.0: # %entry -; V-NEXT: vsetivli zero, 4, e64, m1, ta, mu +; V-NEXT: vsetivli zero, 4, e64, m1, ta, ma ; V-NEXT: vle64.v v8, (a0) ; V-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; V-NEXT: vslidedown.vi v9, v8, 2 @@ -313,7 +313,7 @@ define void @vnsrl_0_double(ptr %in, ptr %out) { ; V-LABEL: vnsrl_0_double: ; V: # %bb.0: # %entry -; V-NEXT: vsetivli zero, 4, e64, m1, ta, mu +; V-NEXT: vsetivli zero, 4, e64, m1, ta, ma ; V-NEXT: vle64.v v8, (a0) ; V-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; V-NEXT: vslidedown.vi v9, v8, 2 @@ -341,7 +341,7 @@ define void @vnsrl_64_double(ptr %in, ptr %out) { ; V-LABEL: vnsrl_64_double: ; V: # %bb.0: # %entry -; V-NEXT: vsetivli zero, 4, e64, m1, ta, mu +; V-NEXT: vsetivli zero, 4, e64, m1, ta, ma ; V-NEXT: vle64.v v8, (a0) ; V-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; V-NEXT: vslidedown.vi v9, v8, 2 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp-mask.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp-mask.ll @@ -22,7 +22,7 @@ define <4 x half> @vsitofp_v4f16_v4i1_unmasked(<4 x i1> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f16_v4i1_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: vfcvt.f.x.v v8, v8 @@ -49,7 +49,7 @@ define <4 x float> @vsitofp_v4f32_v4i1_unmasked(<4 x i1> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f32_v4i1_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: vfcvt.f.x.v v8, v8 @@ -76,7 +76,7 @@ define <4 x double> @vsitofp_v4f64_v4i1_unmasked(<4 x i1> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f64_v4i1_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: vfcvt.f.x.v v8, v8 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp.ll @@ -9,7 +9,7 @@ define <4 x half> @vsitofp_v4f16_v4i7(<4 x i7> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f16_v4i7: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vsra.vi v9, v8, 1 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -35,7 +35,7 @@ define <4 x half> @vsitofp_v4f16_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f16_v4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vfwcvt.f.x.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -58,7 +58,7 @@ define <4 x half> @vsitofp_v4f16_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f16_v4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfcvt.f.x.v v8, v8 ; CHECK-NEXT: ret %v = call <4 x half> @llvm.vp.sitofp.v4f16.v4i16(<4 x i16> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) @@ -81,7 +81,7 @@ define <4 x half> @vsitofp_v4f16_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f16_v4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.f.x.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -106,9 +106,9 @@ define <4 x half> @vsitofp_v4f16_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f16_v4i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.f.x.w v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v8, v10 ; CHECK-NEXT: ret %v = call <4 x half> @llvm.vp.sitofp.v4f16.v4i64(<4 x i64> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) @@ -131,7 +131,7 @@ define <4 x float> @vsitofp_v4f32_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f32_v4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsext.vf2 v9, v8 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9 ; CHECK-NEXT: ret @@ -155,7 +155,7 @@ define <4 x float> @vsitofp_v4f32_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f32_v4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.x.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -178,7 +178,7 @@ define <4 x float> @vsitofp_v4f32_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f32_v4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfcvt.f.x.v v8, v8 ; CHECK-NEXT: ret %v = call <4 x float> @llvm.vp.sitofp.v4f32.v4i32(<4 x i32> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) @@ -201,7 +201,7 @@ define <4 x float> @vsitofp_v4f32_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f32_v4i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.f.x.w v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -225,7 +225,7 @@ define <4 x double> @vsitofp_v4f64_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f64_v4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsext.vf4 v10, v8 ; CHECK-NEXT: vfwcvt.f.x.v v8, v10 ; CHECK-NEXT: ret @@ -249,7 +249,7 @@ define <4 x double> @vsitofp_v4f64_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f64_v4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsext.vf2 v10, v8 ; CHECK-NEXT: vfwcvt.f.x.v v8, v10 ; CHECK-NEXT: ret @@ -273,7 +273,7 @@ define <4 x double> @vsitofp_v4f64_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f64_v4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfwcvt.f.x.v v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -296,7 +296,7 @@ define <4 x double> @vsitofp_v4f64_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_v4f64_v4i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfcvt.f.x.v v8, v8 ; CHECK-NEXT: ret %v = call <4 x double> @llvm.vp.sitofp.v4f64.v4i64(<4 x i64> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) @@ -310,7 +310,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: addi a2, a0, -16 ; CHECK-NEXT: vslidedown.vi v0, v0, 2 ; CHECK-NEXT: bltu a0, a2, .LBB25_2 @@ -341,14 +341,14 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a2, a1 ; CHECK-NEXT: .LBB26_2: -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: li a1, 16 ; CHECK-NEXT: vfcvt.f.x.v v16, v16 ; CHECK-NEXT: bltu a0, a1, .LBB26_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: .LBB26_4: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfcvt.f.x.v v8, v8 ; CHECK-NEXT: ret %v = call <32 x double> @llvm.vp.sitofp.v32f64.v32i64(<32 x i64> %va, <32 x i1> shufflevector (<32 x i1> insertelement (<32 x i1> undef, i1 true, i32 0), <32 x i1> undef, <32 x i32> zeroinitializer), i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector-rv32.ll @@ -7,7 +7,7 @@ define <2 x i8> @stepvector_v2i8() { ; CHECK-LABEL: stepvector_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.experimental.stepvector.v2i8() @@ -19,7 +19,7 @@ define <3 x i8> @stepvector_v3i8() { ; CHECK-LABEL: stepvector_v3i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret %v = call <3 x i8> @llvm.experimental.stepvector.v3i8() @@ -31,7 +31,7 @@ define <4 x i8> @stepvector_v4i8() { ; CHECK-LABEL: stepvector_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.experimental.stepvector.v4i8() @@ -43,7 +43,7 @@ define <8 x i8> @stepvector_v8i8() { ; CHECK-LABEL: stepvector_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret %v = call <8 x i8> @llvm.experimental.stepvector.v8i8() @@ -55,7 +55,7 @@ define <16 x i8> @stepvector_v16i8() { ; CHECK-LABEL: stepvector_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret %v = call <16 x i8> @llvm.experimental.stepvector.v16i8() @@ -67,7 +67,7 @@ define <2 x i16> @stepvector_v2i16() { ; CHECK-LABEL: stepvector_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret %v = call <2 x i16> @llvm.experimental.stepvector.v2i16() @@ -79,7 +79,7 @@ define <4 x i16> @stepvector_v4i16() { ; CHECK-LABEL: stepvector_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.experimental.stepvector.v4i16() @@ -91,7 +91,7 @@ define <8 x i16> @stepvector_v8i16() { ; CHECK-LABEL: stepvector_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret %v = call <8 x i16> @llvm.experimental.stepvector.v8i16() @@ -103,14 +103,14 @@ define <16 x i16> @stepvector_v16i16() { ; LMULMAX1-LABEL: stepvector_v16i16: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-NEXT: vid.v v8 ; LMULMAX1-NEXT: vadd.vi v9, v8, 8 ; LMULMAX1-NEXT: ret ; ; LMULMAX2-LABEL: stepvector_v16i16: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX2-NEXT: vid.v v8 ; LMULMAX2-NEXT: ret %v = call <16 x i16> @llvm.experimental.stepvector.v16i16() @@ -122,7 +122,7 @@ define <2 x i32> @stepvector_v2i32() { ; CHECK-LABEL: stepvector_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret %v = call <2 x i32> @llvm.experimental.stepvector.v2i32() @@ -134,7 +134,7 @@ define <4 x i32> @stepvector_v4i32() { ; CHECK-LABEL: stepvector_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret %v = call <4 x i32> @llvm.experimental.stepvector.v4i32() @@ -146,14 +146,14 @@ define <8 x i32> @stepvector_v8i32() { ; LMULMAX1-LABEL: stepvector_v8i32: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vid.v v8 ; LMULMAX1-NEXT: vadd.vi v9, v8, 4 ; LMULMAX1-NEXT: ret ; ; LMULMAX2-LABEL: stepvector_v8i32: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vid.v v8 ; LMULMAX2-NEXT: ret %v = call <8 x i32> @llvm.experimental.stepvector.v8i32() @@ -165,7 +165,7 @@ define <16 x i32> @stepvector_v16i32() { ; LMULMAX1-LABEL: stepvector_v16i32: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vid.v v8 ; LMULMAX1-NEXT: vadd.vi v9, v8, 4 ; LMULMAX1-NEXT: vadd.vi v10, v8, 8 @@ -174,7 +174,7 @@ ; ; LMULMAX2-LABEL: stepvector_v16i32: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vid.v v8 ; LMULMAX2-NEXT: vadd.vi v10, v8, 8 ; LMULMAX2-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector-rv64.ll @@ -7,7 +7,7 @@ define <2 x i8> @stepvector_v2i8() { ; CHECK-LABEL: stepvector_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.experimental.stepvector.v2i8() @@ -19,7 +19,7 @@ define <3 x i8> @stepvector_v3i8() { ; CHECK-LABEL: stepvector_v3i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret %v = call <3 x i8> @llvm.experimental.stepvector.v3i8() @@ -31,7 +31,7 @@ define <4 x i8> @stepvector_v4i8() { ; CHECK-LABEL: stepvector_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.experimental.stepvector.v4i8() @@ -43,7 +43,7 @@ define <8 x i8> @stepvector_v8i8() { ; CHECK-LABEL: stepvector_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret %v = call <8 x i8> @llvm.experimental.stepvector.v8i8() @@ -55,7 +55,7 @@ define <16 x i8> @stepvector_v16i8() { ; CHECK-LABEL: stepvector_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret %v = call <16 x i8> @llvm.experimental.stepvector.v16i8() @@ -67,7 +67,7 @@ define <2 x i16> @stepvector_v2i16() { ; CHECK-LABEL: stepvector_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret %v = call <2 x i16> @llvm.experimental.stepvector.v2i16() @@ -79,7 +79,7 @@ define <4 x i16> @stepvector_v4i16() { ; CHECK-LABEL: stepvector_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.experimental.stepvector.v4i16() @@ -91,7 +91,7 @@ define <8 x i16> @stepvector_v8i16() { ; CHECK-LABEL: stepvector_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret %v = call <8 x i16> @llvm.experimental.stepvector.v8i16() @@ -103,14 +103,14 @@ define <16 x i16> @stepvector_v16i16() { ; LMULMAX1-LABEL: stepvector_v16i16: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; LMULMAX1-NEXT: vid.v v8 ; LMULMAX1-NEXT: vadd.vi v9, v8, 8 ; LMULMAX1-NEXT: ret ; ; LMULMAX2-LABEL: stepvector_v16i16: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; LMULMAX2-NEXT: vid.v v8 ; LMULMAX2-NEXT: ret %v = call <16 x i16> @llvm.experimental.stepvector.v16i16() @@ -122,7 +122,7 @@ define <2 x i32> @stepvector_v2i32() { ; CHECK-LABEL: stepvector_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret %v = call <2 x i32> @llvm.experimental.stepvector.v2i32() @@ -134,7 +134,7 @@ define <4 x i32> @stepvector_v4i32() { ; CHECK-LABEL: stepvector_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret %v = call <4 x i32> @llvm.experimental.stepvector.v4i32() @@ -146,14 +146,14 @@ define <8 x i32> @stepvector_v8i32() { ; LMULMAX1-LABEL: stepvector_v8i32: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vid.v v8 ; LMULMAX1-NEXT: vadd.vi v9, v8, 4 ; LMULMAX1-NEXT: ret ; ; LMULMAX2-LABEL: stepvector_v8i32: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vid.v v8 ; LMULMAX2-NEXT: ret %v = call <8 x i32> @llvm.experimental.stepvector.v8i32() @@ -165,7 +165,7 @@ define <16 x i32> @stepvector_v16i32() { ; LMULMAX1-LABEL: stepvector_v16i32: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; LMULMAX1-NEXT: vid.v v8 ; LMULMAX1-NEXT: vadd.vi v9, v8, 4 ; LMULMAX1-NEXT: vadd.vi v10, v8, 8 @@ -174,7 +174,7 @@ ; ; LMULMAX2-LABEL: stepvector_v16i32: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; LMULMAX2-NEXT: vid.v v8 ; LMULMAX2-NEXT: vadd.vi v10, v8, 8 ; LMULMAX2-NEXT: ret @@ -187,7 +187,7 @@ define <2 x i64> @stepvector_v2i64() { ; CHECK-LABEL: stepvector_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret %v = call <2 x i64> @llvm.experimental.stepvector.v2i64() @@ -199,14 +199,14 @@ define <4 x i64> @stepvector_v4i64() { ; LMULMAX1-LABEL: stepvector_v4i64: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vid.v v8 ; LMULMAX1-NEXT: vadd.vi v9, v8, 2 ; LMULMAX1-NEXT: ret ; ; LMULMAX2-LABEL: stepvector_v4i64: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-NEXT: vid.v v8 ; LMULMAX2-NEXT: ret %v = call <4 x i64> @llvm.experimental.stepvector.v4i64() @@ -218,7 +218,7 @@ define <8 x i64> @stepvector_v8i64() { ; LMULMAX1-LABEL: stepvector_v8i64: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vid.v v8 ; LMULMAX1-NEXT: vadd.vi v9, v8, 2 ; LMULMAX1-NEXT: vadd.vi v10, v8, 4 @@ -227,7 +227,7 @@ ; ; LMULMAX2-LABEL: stepvector_v8i64: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-NEXT: vid.v v8 ; LMULMAX2-NEXT: vadd.vi v10, v8, 4 ; LMULMAX2-NEXT: ret @@ -240,7 +240,7 @@ define <16 x i64> @stepvector_v16i64() { ; LMULMAX1-LABEL: stepvector_v16i64: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; LMULMAX1-NEXT: vid.v v8 ; LMULMAX1-NEXT: vadd.vi v9, v8, 2 ; LMULMAX1-NEXT: vadd.vi v10, v8, 4 @@ -253,7 +253,7 @@ ; ; LMULMAX2-LABEL: stepvector_v16i64: ; LMULMAX2: # %bb.0: -; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; LMULMAX2-NEXT: vid.v v8 ; LMULMAX2-NEXT: vadd.vi v10, v8, 4 ; LMULMAX2-NEXT: vadd.vi v12, v8, 8 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll @@ -99,13 +99,13 @@ define <4 x i8> @strided_vpload_v4i8_allones_mask(i8* %ptr, i32 signext %stride, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_v4i8_allones_mask: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1 ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_v4i8_allones_mask: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1 ; CHECK-RV64-NEXT: ret %a = insertelement <4 x i1> poison, i1 true, i32 0 @@ -189,13 +189,13 @@ define <8 x i16> @strided_vpload_v8i16_allones_mask(i16* %ptr, i32 signext %stride, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_v8i16_allones_mask: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1 ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_v8i16_allones_mask: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1 ; CHECK-RV64-NEXT: ret %a = insertelement <8 x i1> poison, i1 true, i32 0 @@ -261,13 +261,13 @@ define <8 x i32> @strided_vpload_v8i32_allones_mask(i32* %ptr, i32 signext %stride, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_v8i32_allones_mask: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1 ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_v8i32_allones_mask: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1 ; CHECK-RV64-NEXT: ret %a = insertelement <8 x i1> poison, i1 true, i32 0 @@ -315,13 +315,13 @@ define <4 x i64> @strided_vpload_v4i64_allones_mask(i64* %ptr, i32 signext %stride, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_v4i64_allones_mask: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1 ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_v4i64_allones_mask: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1 ; CHECK-RV64-NEXT: ret %a = insertelement <4 x i1> poison, i1 true, i32 0 @@ -369,13 +369,13 @@ define <2 x half> @strided_vpload_v2f16_allones_mask(half* %ptr, i32 signext %stride, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_v2f16_allones_mask: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1 ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_v2f16_allones_mask: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1 ; CHECK-RV64-NEXT: ret %a = insertelement <2 x i1> poison, i1 true, i32 0 @@ -477,13 +477,13 @@ define <8 x float> @strided_vpload_v8f32_allones_mask(float* %ptr, i32 signext %stride, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_v8f32_allones_mask: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1 ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_v8f32_allones_mask: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1 ; CHECK-RV64-NEXT: ret %a = insertelement <8 x i1> poison, i1 true, i32 0 @@ -531,13 +531,13 @@ define <4 x double> @strided_vpload_v4f64_allones_mask(double* %ptr, i32 signext %stride, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_v4f64_allones_mask: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1 ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_v4f64_allones_mask: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1 ; CHECK-RV64-NEXT: ret %a = insertelement <4 x i1> poison, i1 true, i32 0 @@ -584,13 +584,13 @@ define <3 x double> @strided_vpload_v3f64_allones_mask(double* %ptr, i32 signext %stride, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_v3f64_allones_mask: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1 ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_v3f64_allones_mask: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1 ; CHECK-RV64-NEXT: ret %one = insertelement <3 x i1> poison, i1 true, i32 0 @@ -619,7 +619,7 @@ ; CHECK-RV32-NEXT: .LBB33_4: ; CHECK-RV32-NEXT: mul a4, a2, a1 ; CHECK-RV32-NEXT: add a4, a0, a4 -; CHECK-RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-RV32-NEXT: vslidedown.vi v0, v8, 2 ; CHECK-RV32-NEXT: vsetvli zero, a3, e64, m8, ta, mu ; CHECK-RV32-NEXT: vlse64.v v16, (a4), a1, v0.t @@ -644,7 +644,7 @@ ; CHECK-RV64-NEXT: .LBB33_4: ; CHECK-RV64-NEXT: mul a4, a2, a1 ; CHECK-RV64-NEXT: add a4, a0, a4 -; CHECK-RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-RV64-NEXT: vslidedown.vi v0, v8, 2 ; CHECK-RV64-NEXT: vsetvli zero, a3, e64, m8, ta, mu ; CHECK-RV64-NEXT: vlse64.v v16, (a4), a1, v0.t @@ -672,9 +672,9 @@ ; CHECK-RV32-NEXT: .LBB34_4: ; CHECK-RV32-NEXT: mul a4, a2, a1 ; CHECK-RV32-NEXT: add a4, a0, a4 -; CHECK-RV32-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; CHECK-RV32-NEXT: vlse64.v v16, (a4), a1 -; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1 ; CHECK-RV32-NEXT: ret ; @@ -693,9 +693,9 @@ ; CHECK-RV64-NEXT: .LBB34_4: ; CHECK-RV64-NEXT: mul a4, a2, a1 ; CHECK-RV64-NEXT: add a4, a0, a4 -; CHECK-RV64-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; CHECK-RV64-NEXT: vlse64.v v16, (a4), a1 -; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1 ; CHECK-RV64-NEXT: ret %one = insertelement <32 x i1> poison, i1 true, i32 0 @@ -731,7 +731,7 @@ ; CHECK-RV32-NEXT: .LBB35_6: ; CHECK-RV32-NEXT: mul t0, a5, a2 ; CHECK-RV32-NEXT: add t0, a1, t0 -; CHECK-RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-RV32-NEXT: vslidedown.vi v0, v8, 2 ; CHECK-RV32-NEXT: vsetvli zero, a7, e64, m8, ta, mu ; CHECK-RV32-NEXT: vlse64.v v16, (t0), a2, v0.t @@ -747,20 +747,20 @@ ; CHECK-RV32-NEXT: .LBB35_10: ; CHECK-RV32-NEXT: mul a3, a3, a2 ; CHECK-RV32-NEXT: add a3, a1, a3 -; CHECK-RV32-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; CHECK-RV32-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; CHECK-RV32-NEXT: vslidedown.vi v0, v8, 4 ; CHECK-RV32-NEXT: vsetvli zero, a7, e64, m8, ta, mu ; CHECK-RV32-NEXT: vlse64.v v24, (a3), a2, v0.t ; CHECK-RV32-NEXT: vsetvli zero, a5, e64, m8, ta, mu ; CHECK-RV32-NEXT: vmv1r.v v0, v8 ; CHECK-RV32-NEXT: vlse64.v v8, (a1), a2, v0.t -; CHECK-RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-RV32-NEXT: vse64.v v8, (a0) ; CHECK-RV32-NEXT: addi a1, a0, 256 -; CHECK-RV32-NEXT: vsetivli zero, 1, e64, m8, ta, mu +; CHECK-RV32-NEXT: vsetivli zero, 1, e64, m8, ta, ma ; CHECK-RV32-NEXT: vse64.v v24, (a1) ; CHECK-RV32-NEXT: addi a0, a0, 128 -; CHECK-RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-RV32-NEXT: vse64.v v16, (a0) ; CHECK-RV32-NEXT: ret ; @@ -787,7 +787,7 @@ ; CHECK-RV64-NEXT: .LBB35_6: ; CHECK-RV64-NEXT: mul t0, a5, a2 ; CHECK-RV64-NEXT: add t0, a1, t0 -; CHECK-RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-RV64-NEXT: vslidedown.vi v0, v8, 2 ; CHECK-RV64-NEXT: vsetvli zero, a7, e64, m8, ta, mu ; CHECK-RV64-NEXT: vlse64.v v16, (t0), a2, v0.t @@ -803,20 +803,20 @@ ; CHECK-RV64-NEXT: .LBB35_10: ; CHECK-RV64-NEXT: mul a3, a4, a2 ; CHECK-RV64-NEXT: add a3, a1, a3 -; CHECK-RV64-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; CHECK-RV64-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; CHECK-RV64-NEXT: vslidedown.vi v0, v8, 4 ; CHECK-RV64-NEXT: vsetvli zero, a7, e64, m8, ta, mu ; CHECK-RV64-NEXT: vlse64.v v24, (a3), a2, v0.t ; CHECK-RV64-NEXT: vsetvli zero, a5, e64, m8, ta, mu ; CHECK-RV64-NEXT: vmv1r.v v0, v8 ; CHECK-RV64-NEXT: vlse64.v v8, (a1), a2, v0.t -; CHECK-RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-RV64-NEXT: vse64.v v8, (a0) ; CHECK-RV64-NEXT: addi a1, a0, 256 -; CHECK-RV64-NEXT: vsetivli zero, 1, e64, m8, ta, mu +; CHECK-RV64-NEXT: vsetivli zero, 1, e64, m8, ta, ma ; CHECK-RV64-NEXT: vse64.v v24, (a1) ; CHECK-RV64-NEXT: addi a0, a0, 128 -; CHECK-RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-RV64-NEXT: vse64.v v16, (a0) ; CHECK-RV64-NEXT: ret %v = call <33 x double> @llvm.experimental.vp.strided.load.v33f64.p0f64.i64(double* %ptr, i64 %stride, <33 x i1> %mask, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll @@ -11,13 +11,13 @@ define void @strided_vpstore_v2i8_i8(<2 x i8> %val, i8* %ptr, i8 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_v2i8_i8: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-RV32-NEXT: vsse8.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_v2i8_i8: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-RV64-NEXT: vsse8.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.v2i8.p0i8.i8(<2 x i8> %val, i8* %ptr, i8 %stride, <2 x i1> %m, i32 %evl) @@ -29,13 +29,13 @@ define void @strided_vpstore_v2i8_i16(<2 x i8> %val, i8* %ptr, i16 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_v2i8_i16: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-RV32-NEXT: vsse8.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_v2i8_i16: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-RV64-NEXT: vsse8.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.v2i8.p0i8.i16(<2 x i8> %val, i8* %ptr, i16 %stride, <2 x i1> %m, i32 %evl) @@ -47,13 +47,13 @@ define void @strided_vpstore_v2i8_i64(<2 x i8> %val, i8* %ptr, i64 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_v2i8_i64: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a3, e8, mf8, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a3, e8, mf8, ta, ma ; CHECK-RV32-NEXT: vsse8.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_v2i8_i64: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-RV64-NEXT: vsse8.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.v2i8.p0i8.i64(<2 x i8> %val, i8* %ptr, i64 %stride, <2 x i1> %m, i32 %evl) @@ -65,13 +65,13 @@ define void @strided_vpstore_v2i8(<2 x i8> %val, i8* %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_v2i8: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-RV32-NEXT: vsse8.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_v2i8: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-RV64-NEXT: vsse8.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.v2i8.p0i8.i32(<2 x i8> %val, i8* %ptr, i32 %stride, <2 x i1> %m, i32 %evl) @@ -83,13 +83,13 @@ define void @strided_vpstore_v4i8(<4 x i8> %val, i8* %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_v4i8: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-RV32-NEXT: vsse8.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_v4i8: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-RV64-NEXT: vsse8.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.v4i8.p0i8.i32(<4 x i8> %val, i8* %ptr, i32 %stride, <4 x i1> %m, i32 %evl) @@ -101,13 +101,13 @@ define void @strided_vpstore_v8i8(<8 x i8> %val, i8* %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_v8i8: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-RV32-NEXT: vsse8.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_v8i8: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-RV64-NEXT: vsse8.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.v8i8.p0i8.i32(<8 x i8> %val, i8* %ptr, i32 %stride, <8 x i1> %m, i32 %evl) @@ -119,13 +119,13 @@ define void @strided_vpstore_v2i16(<2 x i16> %val, i16* %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_v2i16: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-RV32-NEXT: vsse16.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_v2i16: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-RV64-NEXT: vsse16.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.v2i16.p0i16.i32(<2 x i16> %val, i16* %ptr, i32 %stride, <2 x i1> %m, i32 %evl) @@ -137,13 +137,13 @@ define void @strided_vpstore_v4i16(<4 x i16> %val, i16* %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_v4i16: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-RV32-NEXT: vsse16.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_v4i16: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-RV64-NEXT: vsse16.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.v4i16.p0i16.i32(<4 x i16> %val, i16* %ptr, i32 %stride, <4 x i1> %m, i32 %evl) @@ -155,13 +155,13 @@ define void @strided_vpstore_v8i16(<8 x i16> %val, i16* %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_v8i16: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-RV32-NEXT: vsse16.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_v8i16: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-RV64-NEXT: vsse16.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.v8i16.p0i16.i32(<8 x i16> %val, i16* %ptr, i32 %stride, <8 x i1> %m, i32 %evl) @@ -173,13 +173,13 @@ define void @strided_vpstore_v2i32(<2 x i32> %val, i32* %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_v2i32: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-RV32-NEXT: vsse32.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_v2i32: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-RV64-NEXT: vsse32.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.v2i32.p0i32.i32(<2 x i32> %val, i32* %ptr, i32 %stride, <2 x i1> %m, i32 %evl) @@ -191,13 +191,13 @@ define void @strided_vpstore_v4i32(<4 x i32> %val, i32* %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_v4i32: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-RV32-NEXT: vsse32.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_v4i32: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-RV64-NEXT: vsse32.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.v4i32.p0i32.i32(<4 x i32> %val, i32* %ptr, i32 %stride, <4 x i1> %m, i32 %evl) @@ -209,13 +209,13 @@ define void @strided_vpstore_v8i32(<8 x i32> %val, i32* %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_v8i32: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-RV32-NEXT: vsse32.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_v8i32: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-RV64-NEXT: vsse32.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.v8i32.p0i32.i32(<8 x i32> %val, i32* %ptr, i32 %stride, <8 x i1> %m, i32 %evl) @@ -227,13 +227,13 @@ define void @strided_vpstore_v2i64(<2 x i64> %val, i64* %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_v2i64: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-RV32-NEXT: vsse64.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_v2i64: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-RV64-NEXT: vsse64.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.v2i64.p0i64.i32(<2 x i64> %val, i64* %ptr, i32 %stride, <2 x i1> %m, i32 %evl) @@ -245,13 +245,13 @@ define void @strided_vpstore_v4i64(<4 x i64> %val, i64* %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_v4i64: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-RV32-NEXT: vsse64.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_v4i64: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-RV64-NEXT: vsse64.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.v4i64.p0i64.i32(<4 x i64> %val, i64* %ptr, i32 %stride, <4 x i1> %m, i32 %evl) @@ -263,13 +263,13 @@ define void @strided_vpstore_v8i64(<8 x i64> %val, i64* %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_v8i64: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-RV32-NEXT: vsse64.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_v8i64: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-RV64-NEXT: vsse64.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.v8i64.p0i64.i32(<8 x i64> %val, i64* %ptr, i32 %stride, <8 x i1> %m, i32 %evl) @@ -281,13 +281,13 @@ define void @strided_vpstore_v2f16(<2 x half> %val, half* %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_v2f16: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-RV32-NEXT: vsse16.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_v2f16: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-RV64-NEXT: vsse16.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.v2f16.p0f16.i32(<2 x half> %val, half* %ptr, i32 %stride, <2 x i1> %m, i32 %evl) @@ -299,13 +299,13 @@ define void @strided_vpstore_v4f16(<4 x half> %val, half* %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_v4f16: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-RV32-NEXT: vsse16.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_v4f16: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-RV64-NEXT: vsse16.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.v4f16.p0f16.i32(<4 x half> %val, half* %ptr, i32 %stride, <4 x i1> %m, i32 %evl) @@ -317,13 +317,13 @@ define void @strided_vpstore_v8f16(<8 x half> %val, half* %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_v8f16: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-RV32-NEXT: vsse16.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_v8f16: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-RV64-NEXT: vsse16.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.v8f16.p0f16.i32(<8 x half> %val, half* %ptr, i32 %stride, <8 x i1> %m, i32 %evl) @@ -335,13 +335,13 @@ define void @strided_vpstore_v2f32(<2 x float> %val, float* %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_v2f32: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-RV32-NEXT: vsse32.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_v2f32: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-RV64-NEXT: vsse32.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.v2f32.p0f32.i32(<2 x float> %val, float* %ptr, i32 %stride, <2 x i1> %m, i32 %evl) @@ -353,13 +353,13 @@ define void @strided_vpstore_v4f32(<4 x float> %val, float* %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_v4f32: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-RV32-NEXT: vsse32.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_v4f32: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-RV64-NEXT: vsse32.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.v4f32.p0f32.i32(<4 x float> %val, float* %ptr, i32 %stride, <4 x i1> %m, i32 %evl) @@ -371,13 +371,13 @@ define void @strided_vpstore_v8f32(<8 x float> %val, float* %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_v8f32: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-RV32-NEXT: vsse32.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_v8f32: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-RV64-NEXT: vsse32.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.v8f32.p0f32.i32(<8 x float> %val, float* %ptr, i32 %stride, <8 x i1> %m, i32 %evl) @@ -389,13 +389,13 @@ define void @strided_vpstore_v2f64(<2 x double> %val, double* %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_v2f64: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-RV32-NEXT: vsse64.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_v2f64: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-RV64-NEXT: vsse64.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.v2f64.p0f64.i32(<2 x double> %val, double* %ptr, i32 %stride, <2 x i1> %m, i32 %evl) @@ -407,13 +407,13 @@ define void @strided_vpstore_v4f64(<4 x double> %val, double* %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_v4f64: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-RV32-NEXT: vsse64.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_v4f64: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-RV64-NEXT: vsse64.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.v4f64.p0f64.i32(<4 x double> %val, double* %ptr, i32 %stride, <4 x i1> %m, i32 %evl) @@ -425,13 +425,13 @@ define void @strided_vpstore_v8f64(<8 x double> %val, double* %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_v8f64: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-RV32-NEXT: vsse64.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_v8f64: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-RV64-NEXT: vsse64.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.v8f64.p0f64.i32(<8 x double> %val, double* %ptr, i32 %stride, <8 x i1> %m, i32 %evl) @@ -441,13 +441,13 @@ define void @strided_vpstore_v2i8_allones_mask(<2 x i8> %val, i8* %ptr, i32 signext %stride, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_v2i8_allones_mask: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-RV32-NEXT: vsse8.v v8, (a0), a1 ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_v2i8_allones_mask: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-RV64-NEXT: vsse8.v v8, (a0), a1 ; CHECK-RV64-NEXT: ret %a = insertelement <2 x i1> poison, i1 true, i32 0 @@ -460,13 +460,13 @@ define void @strided_vpstore_v3f32(<3 x float> %v, float *%ptr, i32 signext %stride, <3 x i1> %mask, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_v3f32: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-RV32-NEXT: vsse32.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_v3f32: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-RV64-NEXT: vsse32.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.v3f32.p0f32.i32(<3 x float> %v, float* %ptr, i32 %stride, <3 x i1> %mask, i32 %evl) @@ -476,13 +476,13 @@ define void @strided_vpstore_v3f32_allones_mask(<3 x float> %v, float *%ptr, i32 signext %stride, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_v3f32_allones_mask: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-RV32-NEXT: vsse32.v v8, (a0), a1 ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_v3f32_allones_mask: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-RV64-NEXT: vsse32.v v8, (a0), a1 ; CHECK-RV64-NEXT: ret %one = insertelement <3 x i1> poison, i1 true, i32 0 @@ -504,7 +504,7 @@ ; CHECK-RV32-NEXT: li a3, 16 ; CHECK-RV32-NEXT: .LBB27_2: ; CHECK-RV32-NEXT: li a4, 0 -; CHECK-RV32-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; CHECK-RV32-NEXT: addi a5, a2, -16 ; CHECK-RV32-NEXT: vsse64.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: bltu a2, a5, .LBB27_4 @@ -513,9 +513,9 @@ ; CHECK-RV32-NEXT: .LBB27_4: ; CHECK-RV32-NEXT: mul a2, a3, a1 ; CHECK-RV32-NEXT: add a0, a0, a2 -; CHECK-RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-RV32-NEXT: vslidedown.vi v0, v0, 2 -; CHECK-RV32-NEXT: vsetvli zero, a4, e64, m8, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a4, e64, m8, ta, ma ; CHECK-RV32-NEXT: vsse64.v v16, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; @@ -528,7 +528,7 @@ ; CHECK-RV64-NEXT: li a3, 16 ; CHECK-RV64-NEXT: .LBB27_2: ; CHECK-RV64-NEXT: li a4, 0 -; CHECK-RV64-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; CHECK-RV64-NEXT: addi a5, a2, -16 ; CHECK-RV64-NEXT: vsse64.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: bltu a2, a5, .LBB27_4 @@ -537,9 +537,9 @@ ; CHECK-RV64-NEXT: .LBB27_4: ; CHECK-RV64-NEXT: mul a2, a3, a1 ; CHECK-RV64-NEXT: add a0, a0, a2 -; CHECK-RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-RV64-NEXT: vslidedown.vi v0, v0, 2 -; CHECK-RV64-NEXT: vsetvli zero, a4, e64, m8, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a4, e64, m8, ta, ma ; CHECK-RV64-NEXT: vsse64.v v16, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.v32f64.p0f64.i32(<32 x double> %v, double* %ptr, i32 %stride, <32 x i1> %mask, i32 %evl) @@ -556,7 +556,7 @@ ; CHECK-RV32-NEXT: li a3, 16 ; CHECK-RV32-NEXT: .LBB28_2: ; CHECK-RV32-NEXT: li a4, 0 -; CHECK-RV32-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; CHECK-RV32-NEXT: addi a5, a2, -16 ; CHECK-RV32-NEXT: vsse64.v v8, (a0), a1 ; CHECK-RV32-NEXT: bltu a2, a5, .LBB28_4 @@ -565,7 +565,7 @@ ; CHECK-RV32-NEXT: .LBB28_4: ; CHECK-RV32-NEXT: mul a2, a3, a1 ; CHECK-RV32-NEXT: add a0, a0, a2 -; CHECK-RV32-NEXT: vsetvli zero, a4, e64, m8, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a4, e64, m8, ta, ma ; CHECK-RV32-NEXT: vsse64.v v16, (a0), a1 ; CHECK-RV32-NEXT: ret ; @@ -578,7 +578,7 @@ ; CHECK-RV64-NEXT: li a3, 16 ; CHECK-RV64-NEXT: .LBB28_2: ; CHECK-RV64-NEXT: li a4, 0 -; CHECK-RV64-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; CHECK-RV64-NEXT: addi a5, a2, -16 ; CHECK-RV64-NEXT: vsse64.v v8, (a0), a1 ; CHECK-RV64-NEXT: bltu a2, a5, .LBB28_4 @@ -587,7 +587,7 @@ ; CHECK-RV64-NEXT: .LBB28_4: ; CHECK-RV64-NEXT: mul a2, a3, a1 ; CHECK-RV64-NEXT: add a0, a0, a2 -; CHECK-RV64-NEXT: vsetvli zero, a4, e64, m8, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a4, e64, m8, ta, ma ; CHECK-RV64-NEXT: vsse64.v v16, (a0), a1 ; CHECK-RV64-NEXT: ret %one = insertelement <32 x i1> poison, i1 true, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp-mask.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp-mask.ll @@ -22,7 +22,7 @@ define <4 x half> @vuitofp_v4f16_v4i1_unmasked(<4 x i1> %va, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f16_v4i1_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 @@ -49,7 +49,7 @@ define <4 x float> @vuitofp_v4f32_v4i1_unmasked(<4 x i1> %va, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f32_v4i1_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 @@ -76,7 +76,7 @@ define <4 x double> @vuitofp_v4f64_v4i1_unmasked(<4 x i1> %va, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f64_v4i1_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp.ll @@ -10,7 +10,7 @@ ; CHECK-LABEL: vuitofp_v4f16_v4i7: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 127 -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vand.vx v9, v8, a1 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t @@ -35,7 +35,7 @@ define <4 x half> @vuitofp_v4f16_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f16_v4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -58,7 +58,7 @@ define <4 x half> @vuitofp_v4f16_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f16_v4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 ; CHECK-NEXT: ret %v = call <4 x half> @llvm.vp.uitofp.v4f16.v4i16(<4 x i16> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) @@ -81,7 +81,7 @@ define <4 x half> @vuitofp_v4f16_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f16_v4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.f.xu.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -106,9 +106,9 @@ define <4 x half> @vuitofp_v4f16_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f16_v4i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.f.xu.w v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v8, v10 ; CHECK-NEXT: ret %v = call <4 x half> @llvm.vp.uitofp.v4f16.v4i64(<4 x i64> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) @@ -131,7 +131,7 @@ define <4 x float> @vuitofp_v4f32_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f32_v4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vzext.vf2 v9, v8 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9 ; CHECK-NEXT: ret @@ -155,7 +155,7 @@ define <4 x float> @vuitofp_v4f32_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f32_v4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -178,7 +178,7 @@ define <4 x float> @vuitofp_v4f32_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f32_v4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 ; CHECK-NEXT: ret %v = call <4 x float> @llvm.vp.uitofp.v4f32.v4i32(<4 x i32> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) @@ -201,7 +201,7 @@ define <4 x float> @vuitofp_v4f32_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f32_v4i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.f.xu.w v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -225,7 +225,7 @@ define <4 x double> @vuitofp_v4f64_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f64_v4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vzext.vf4 v10, v8 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v10 ; CHECK-NEXT: ret @@ -249,7 +249,7 @@ define <4 x double> @vuitofp_v4f64_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f64_v4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v10 ; CHECK-NEXT: ret @@ -273,7 +273,7 @@ define <4 x double> @vuitofp_v4f64_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f64_v4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfwcvt.f.xu.v v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -296,7 +296,7 @@ define <4 x double> @vuitofp_v4f64_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_v4f64_v4i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 ; CHECK-NEXT: ret %v = call <4 x double> @llvm.vp.uitofp.v4f64.v4i64(<4 x i64> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl) @@ -310,7 +310,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: addi a2, a0, -16 ; CHECK-NEXT: vslidedown.vi v0, v0, 2 ; CHECK-NEXT: bltu a0, a2, .LBB25_2 @@ -341,14 +341,14 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a2, a1 ; CHECK-NEXT: .LBB26_2: -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: li a1, 16 ; CHECK-NEXT: vfcvt.f.xu.v v16, v16 ; CHECK-NEXT: bltu a0, a1, .LBB26_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: .LBB26_4: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 ; CHECK-NEXT: ret %v = call <32 x double> @llvm.vp.uitofp.v32f64.v32i64(<32 x i64> %va, <32 x i1> shufflevector (<32 x i1> insertelement (<32 x i1> undef, i1 true, i32 0), <32 x i1> undef, <32 x i32> zeroinitializer), i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll @@ -7,7 +7,7 @@ define <4 x i32> @load_v4i32_align1(<4 x i32>* %ptr) { ; CHECK-LABEL: load_v4i32_align1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: ret %z = load <4 x i32>, <4 x i32>* %ptr, align 1 @@ -17,7 +17,7 @@ define <4 x i32> @load_v4i32_align2(<4 x i32>* %ptr) { ; CHECK-LABEL: load_v4i32_align2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: ret %z = load <4 x i32>, <4 x i32>* %ptr, align 2 @@ -27,7 +27,7 @@ define void @store_v4i32_align1(<4 x i32> %x, <4 x i32>* %ptr) { ; CHECK-LABEL: store_v4i32_align1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret store <4 x i32> %x, <4 x i32>* %ptr, align 1 @@ -37,7 +37,7 @@ define void @store_v4i32_align2(<4 x i32> %x, <4 x i32>* %ptr) { ; CHECK-LABEL: store_v4i32_align2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret store <4 x i32> %x, <4 x i32>* %ptr, align 2 @@ -49,7 +49,7 @@ define <2 x i16> @mgather_v2i16_align1(<2 x i16*> %ptrs, <2 x i1> %m, <2 x i16> %passthru) { ; RV32-LABEL: mgather_v2i16_align1: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 0, e8, mf8, ta, mu +; RV32-NEXT: vsetivli zero, 0, e8, mf8, ta, ma ; RV32-NEXT: vmv.x.s a0, v0 ; RV32-NEXT: andi a1, a0, 1 ; RV32-NEXT: bnez a1, .LBB4_3 @@ -60,18 +60,18 @@ ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; RV32-NEXT: .LBB4_3: # %cond.load -; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: lb a2, 1(a1) ; RV32-NEXT: lbu a1, 0(a1) ; RV32-NEXT: slli a2, a2, 8 ; RV32-NEXT: or a1, a2, a1 -; RV32-NEXT: vsetivli zero, 2, e16, mf4, tu, mu +; RV32-NEXT: vsetivli zero, 2, e16, mf4, tu, ma ; RV32-NEXT: vmv.s.x v9, a1 ; RV32-NEXT: andi a0, a0, 2 ; RV32-NEXT: beqz a0, .LBB4_2 ; RV32-NEXT: .LBB4_4: # %cond.load1 -; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV32-NEXT: vslidedown.vi v8, v8, 1 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: lb a1, 1(a0) @@ -79,14 +79,14 @@ ; RV32-NEXT: slli a1, a1, 8 ; RV32-NEXT: or a0, a1, a0 ; RV32-NEXT: vmv.s.x v8, a0 -; RV32-NEXT: vsetivli zero, 2, e16, mf4, tu, mu +; RV32-NEXT: vsetivli zero, 2, e16, mf4, tu, ma ; RV32-NEXT: vslideup.vi v9, v8, 1 ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v2i16_align1: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 0, e8, mf8, ta, mu +; RV64-NEXT: vsetivli zero, 0, e8, mf8, ta, ma ; RV64-NEXT: vmv.x.s a0, v0 ; RV64-NEXT: andi a1, a0, 1 ; RV64-NEXT: bnez a1, .LBB4_3 @@ -97,18 +97,18 @@ ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret ; RV64-NEXT: .LBB4_3: # %cond.load -; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; RV64-NEXT: vmv.x.s a1, v8 ; RV64-NEXT: lb a2, 1(a1) ; RV64-NEXT: lbu a1, 0(a1) ; RV64-NEXT: slli a2, a2, 8 ; RV64-NEXT: or a1, a2, a1 -; RV64-NEXT: vsetivli zero, 2, e16, mf4, tu, mu +; RV64-NEXT: vsetivli zero, 2, e16, mf4, tu, ma ; RV64-NEXT: vmv.s.x v9, a1 ; RV64-NEXT: andi a0, a0, 2 ; RV64-NEXT: beqz a0, .LBB4_2 ; RV64-NEXT: .LBB4_4: # %cond.load1 -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vslidedown.vi v8, v8, 1 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: lb a1, 1(a0) @@ -116,7 +116,7 @@ ; RV64-NEXT: slli a1, a1, 8 ; RV64-NEXT: or a0, a1, a0 ; RV64-NEXT: vmv.s.x v8, a0 -; RV64-NEXT: vsetivli zero, 2, e16, mf4, tu, mu +; RV64-NEXT: vsetivli zero, 2, e16, mf4, tu, ma ; RV64-NEXT: vslideup.vi v9, v8, 1 ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret @@ -129,10 +129,10 @@ define <2 x i64> @mgather_v2i64_align4(<2 x i64*> %ptrs, <2 x i1> %m, <2 x i64> %passthru) { ; RV32-LABEL: mgather_v2i64_align4: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 0, e8, mf8, ta, mu +; RV32-NEXT: vsetivli zero, 0, e8, mf8, ta, ma ; RV32-NEXT: vmv.x.s a0, v0 ; RV32-NEXT: andi a1, a0, 1 -; RV32-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV32-NEXT: vmv.v.i v10, 0 ; RV32-NEXT: bnez a1, .LBB5_3 ; RV32-NEXT: # %bb.1: # %else @@ -147,27 +147,27 @@ ; RV32-NEXT: lw a1, 0(a1) ; RV32-NEXT: vslide1up.vx v11, v10, a2 ; RV32-NEXT: vslide1up.vx v12, v11, a1 -; RV32-NEXT: vsetivli zero, 1, e64, m1, tu, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, tu, ma ; RV32-NEXT: vslideup.vi v9, v12, 0 ; RV32-NEXT: andi a0, a0, 2 ; RV32-NEXT: beqz a0, .LBB5_2 ; RV32-NEXT: .LBB5_4: # %cond.load1 -; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV32-NEXT: vslidedown.vi v8, v8, 1 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: lw a1, 4(a0) ; RV32-NEXT: lw a0, 0(a0) -; RV32-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV32-NEXT: vslide1up.vx v8, v10, a1 ; RV32-NEXT: vslide1up.vx v10, v8, a0 -; RV32-NEXT: vsetivli zero, 2, e64, m1, tu, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, tu, ma ; RV32-NEXT: vslideup.vi v9, v10, 1 ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v2i64_align4: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 0, e8, mf8, ta, mu +; RV64-NEXT: vsetivli zero, 0, e8, mf8, ta, ma ; RV64-NEXT: vmv.x.s a0, v0 ; RV64-NEXT: andi a1, a0, 1 ; RV64-NEXT: bnez a1, .LBB5_3 @@ -178,18 +178,18 @@ ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret ; RV64-NEXT: .LBB5_3: # %cond.load -; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; RV64-NEXT: vmv.x.s a1, v8 ; RV64-NEXT: lwu a2, 4(a1) ; RV64-NEXT: lwu a1, 0(a1) ; RV64-NEXT: slli a2, a2, 32 ; RV64-NEXT: or a1, a2, a1 -; RV64-NEXT: vsetivli zero, 2, e64, m1, tu, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, tu, ma ; RV64-NEXT: vmv.s.x v9, a1 ; RV64-NEXT: andi a0, a0, 2 ; RV64-NEXT: beqz a0, .LBB5_2 ; RV64-NEXT: .LBB5_4: # %cond.load1 -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vslidedown.vi v8, v8, 1 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: lwu a1, 4(a0) @@ -197,7 +197,7 @@ ; RV64-NEXT: slli a1, a1, 32 ; RV64-NEXT: or a0, a1, a0 ; RV64-NEXT: vmv.s.x v8, a0 -; RV64-NEXT: vsetivli zero, 2, e64, m1, tu, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, tu, ma ; RV64-NEXT: vslideup.vi v9, v8, 1 ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret @@ -210,7 +210,7 @@ define void @mscatter_v4i16_align1(<4 x i16> %val, <4 x i16*> %ptrs, <4 x i1> %m) { ; RV32-LABEL: mscatter_v4i16_align1: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 0, e8, mf8, ta, mu +; RV32-NEXT: vsetivli zero, 0, e8, mf8, ta, ma ; RV32-NEXT: vmv.x.s a0, v0 ; RV32-NEXT: andi a1, a0, 1 ; RV32-NEXT: bnez a1, .LBB6_5 @@ -226,9 +226,9 @@ ; RV32-NEXT: .LBB6_4: # %else6 ; RV32-NEXT: ret ; RV32-NEXT: .LBB6_5: # %cond.store -; RV32-NEXT: vsetivli zero, 0, e16, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 0, e16, mf2, ta, ma ; RV32-NEXT: vmv.x.s a1, v8 -; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV32-NEXT: vmv.x.s a2, v9 ; RV32-NEXT: sb a1, 0(a2) ; RV32-NEXT: srli a1, a1, 8 @@ -236,10 +236,10 @@ ; RV32-NEXT: andi a1, a0, 2 ; RV32-NEXT: beqz a1, .LBB6_2 ; RV32-NEXT: .LBB6_6: # %cond.store1 -; RV32-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV32-NEXT: vslidedown.vi v10, v8, 1 ; RV32-NEXT: vmv.x.s a1, v10 -; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV32-NEXT: vslidedown.vi v10, v9, 1 ; RV32-NEXT: vmv.x.s a2, v10 ; RV32-NEXT: sb a1, 0(a2) @@ -248,10 +248,10 @@ ; RV32-NEXT: andi a1, a0, 4 ; RV32-NEXT: beqz a1, .LBB6_3 ; RV32-NEXT: .LBB6_7: # %cond.store3 -; RV32-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV32-NEXT: vslidedown.vi v10, v8, 2 ; RV32-NEXT: vmv.x.s a1, v10 -; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV32-NEXT: vslidedown.vi v10, v9, 2 ; RV32-NEXT: vmv.x.s a2, v10 ; RV32-NEXT: sb a1, 0(a2) @@ -260,10 +260,10 @@ ; RV32-NEXT: andi a0, a0, 8 ; RV32-NEXT: beqz a0, .LBB6_4 ; RV32-NEXT: .LBB6_8: # %cond.store5 -; RV32-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV32-NEXT: vslidedown.vi v8, v8, 3 ; RV32-NEXT: vmv.x.s a0, v8 -; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV32-NEXT: vslidedown.vi v8, v9, 3 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: sb a0, 0(a1) @@ -273,7 +273,7 @@ ; ; RV64-LABEL: mscatter_v4i16_align1: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 0, e8, mf8, ta, mu +; RV64-NEXT: vsetivli zero, 0, e8, mf8, ta, ma ; RV64-NEXT: vmv.x.s a0, v0 ; RV64-NEXT: andi a1, a0, 1 ; RV64-NEXT: bnez a1, .LBB6_5 @@ -289,9 +289,9 @@ ; RV64-NEXT: .LBB6_4: # %else6 ; RV64-NEXT: ret ; RV64-NEXT: .LBB6_5: # %cond.store -; RV64-NEXT: vsetivli zero, 0, e16, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 0, e16, mf2, ta, ma ; RV64-NEXT: vmv.x.s a1, v8 -; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; RV64-NEXT: vmv.x.s a2, v10 ; RV64-NEXT: sb a1, 0(a2) ; RV64-NEXT: srli a1, a1, 8 @@ -299,10 +299,10 @@ ; RV64-NEXT: andi a1, a0, 2 ; RV64-NEXT: beqz a1, .LBB6_2 ; RV64-NEXT: .LBB6_6: # %cond.store1 -; RV64-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64-NEXT: vslidedown.vi v9, v8, 1 ; RV64-NEXT: vmv.x.s a1, v9 -; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; RV64-NEXT: vslidedown.vi v12, v10, 1 ; RV64-NEXT: vmv.x.s a2, v12 ; RV64-NEXT: sb a1, 0(a2) @@ -311,10 +311,10 @@ ; RV64-NEXT: andi a1, a0, 4 ; RV64-NEXT: beqz a1, .LBB6_3 ; RV64-NEXT: .LBB6_7: # %cond.store3 -; RV64-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64-NEXT: vslidedown.vi v9, v8, 2 ; RV64-NEXT: vmv.x.s a1, v9 -; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; RV64-NEXT: vslidedown.vi v12, v10, 2 ; RV64-NEXT: vmv.x.s a2, v12 ; RV64-NEXT: sb a1, 0(a2) @@ -323,10 +323,10 @@ ; RV64-NEXT: andi a0, a0, 8 ; RV64-NEXT: beqz a0, .LBB6_4 ; RV64-NEXT: .LBB6_8: # %cond.store5 -; RV64-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64-NEXT: vslidedown.vi v8, v8, 3 ; RV64-NEXT: vmv.x.s a0, v8 -; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; RV64-NEXT: vslidedown.vi v8, v10, 3 ; RV64-NEXT: vmv.x.s a1, v8 ; RV64-NEXT: sb a0, 0(a1) @@ -342,7 +342,7 @@ define void @mscatter_v2i32_align2(<2 x i32> %val, <2 x i32*> %ptrs, <2 x i1> %m) { ; RV32-LABEL: mscatter_v2i32_align2: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 0, e8, mf8, ta, mu +; RV32-NEXT: vsetivli zero, 0, e8, mf8, ta, ma ; RV32-NEXT: vmv.x.s a0, v0 ; RV32-NEXT: andi a1, a0, 1 ; RV32-NEXT: bnez a1, .LBB7_3 @@ -352,7 +352,7 @@ ; RV32-NEXT: .LBB7_2: # %else2 ; RV32-NEXT: ret ; RV32-NEXT: .LBB7_3: # %cond.store -; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: vmv.x.s a2, v9 ; RV32-NEXT: sh a1, 0(a2) @@ -361,7 +361,7 @@ ; RV32-NEXT: andi a0, a0, 2 ; RV32-NEXT: beqz a0, .LBB7_2 ; RV32-NEXT: .LBB7_4: # %cond.store1 -; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV32-NEXT: vslidedown.vi v8, v8, 1 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: vslidedown.vi v8, v9, 1 @@ -373,7 +373,7 @@ ; ; RV64-LABEL: mscatter_v2i32_align2: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 0, e8, mf8, ta, mu +; RV64-NEXT: vsetivli zero, 0, e8, mf8, ta, ma ; RV64-NEXT: vmv.x.s a0, v0 ; RV64-NEXT: andi a1, a0, 1 ; RV64-NEXT: bnez a1, .LBB7_3 @@ -383,9 +383,9 @@ ; RV64-NEXT: .LBB7_2: # %else2 ; RV64-NEXT: ret ; RV64-NEXT: .LBB7_3: # %cond.store -; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; RV64-NEXT: vmv.x.s a1, v8 -; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; RV64-NEXT: vmv.x.s a2, v9 ; RV64-NEXT: sh a1, 0(a2) ; RV64-NEXT: srli a1, a1, 16 @@ -393,10 +393,10 @@ ; RV64-NEXT: andi a0, a0, 2 ; RV64-NEXT: beqz a0, .LBB7_2 ; RV64-NEXT: .LBB7_4: # %cond.store1 -; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV64-NEXT: vslidedown.vi v8, v8, 1 ; RV64-NEXT: vmv.x.s a0, v8 -; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; RV64-NEXT: vslidedown.vi v8, v9, 1 ; RV64-NEXT: vmv.x.s a1, v8 ; RV64-NEXT: sh a0, 0(a1) @@ -412,9 +412,9 @@ define void @masked_load_v2i32_align1(<2 x i32>* %a, <2 x i32> %m, <2 x i32>* %res_ptr) nounwind { ; RV32-LABEL: masked_load_v2i32_align1: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV32-NEXT: vmseq.vi v8, v8, 0 -; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; RV32-NEXT: vmv.x.s a2, v8 ; RV32-NEXT: andi a3, a2, 1 ; RV32-NEXT: beqz a3, .LBB8_2 @@ -429,13 +429,13 @@ ; RV32-NEXT: or a4, a4, a6 ; RV32-NEXT: slli a4, a4, 16 ; RV32-NEXT: or a3, a4, a3 -; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV32-NEXT: vmv.v.x v8, a3 ; RV32-NEXT: andi a2, a2, 2 ; RV32-NEXT: bnez a2, .LBB8_3 ; RV32-NEXT: j .LBB8_4 ; RV32-NEXT: .LBB8_2: -; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV32-NEXT: vmv.v.i v8, 0 ; RV32-NEXT: andi a2, a2, 2 ; RV32-NEXT: beqz a2, .LBB8_4 @@ -451,18 +451,18 @@ ; RV32-NEXT: slli a0, a0, 16 ; RV32-NEXT: or a0, a0, a2 ; RV32-NEXT: vmv.s.x v9, a0 -; RV32-NEXT: vsetvli zero, zero, e32, mf2, tu, mu +; RV32-NEXT: vsetvli zero, zero, e32, mf2, tu, ma ; RV32-NEXT: vslideup.vi v8, v9, 1 ; RV32-NEXT: .LBB8_4: # %else2 -; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; RV32-NEXT: vse32.v v8, (a1) ; RV32-NEXT: ret ; ; RV64-LABEL: masked_load_v2i32_align1: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-NEXT: vmseq.vi v8, v8, 0 -; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; RV64-NEXT: vmv.x.s a2, v8 ; RV64-NEXT: andi a3, a2, 1 ; RV64-NEXT: beqz a3, .LBB8_2 @@ -477,13 +477,13 @@ ; RV64-NEXT: or a4, a4, a6 ; RV64-NEXT: slli a4, a4, 16 ; RV64-NEXT: or a3, a4, a3 -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-NEXT: vmv.v.x v8, a3 ; RV64-NEXT: andi a2, a2, 2 ; RV64-NEXT: bnez a2, .LBB8_3 ; RV64-NEXT: j .LBB8_4 ; RV64-NEXT: .LBB8_2: -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-NEXT: vmv.v.i v8, 0 ; RV64-NEXT: andi a2, a2, 2 ; RV64-NEXT: beqz a2, .LBB8_4 @@ -499,10 +499,10 @@ ; RV64-NEXT: slli a0, a0, 16 ; RV64-NEXT: or a0, a0, a2 ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, zero, e32, mf2, tu, mu +; RV64-NEXT: vsetvli zero, zero, e32, mf2, tu, ma ; RV64-NEXT: vslideup.vi v8, v9, 1 ; RV64-NEXT: .LBB8_4: # %else2 -; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; RV64-NEXT: vse32.v v8, (a1) ; RV64-NEXT: ret %mask = icmp eq <2 x i32> %m, zeroinitializer @@ -516,9 +516,9 @@ define void @masked_store_v2i32_align2(<2 x i32> %val, <2 x i32>* %a, <2 x i32> %m) nounwind { ; CHECK-LABEL: masked_store_v2i32_align2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vmseq.vi v9, v9, 0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.x.s a1, v9 ; CHECK-NEXT: andi a2, a1, 1 ; CHECK-NEXT: bnez a2, .LBB9_3 @@ -528,7 +528,7 @@ ; CHECK-NEXT: .LBB9_2: # %else2 ; CHECK-NEXT: ret ; CHECK-NEXT: .LBB9_3: # %cond.store -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmv.x.s a2, v8 ; CHECK-NEXT: sh a2, 0(a0) ; CHECK-NEXT: srli a2, a2, 16 @@ -536,7 +536,7 @@ ; CHECK-NEXT: andi a1, a1, 2 ; CHECK-NEXT: beqz a1, .LBB9_2 ; CHECK-NEXT: .LBB9_4: # %cond.store1 -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 1 ; CHECK-NEXT: vmv.x.s a1, v8 ; CHECK-NEXT: sh a1, 4(a0) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp-mask.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp-mask.ll @@ -9,7 +9,7 @@ define <2 x i1> @vadd_vv_v2i1(<2 x i1> %va, <2 x i1> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <2 x i1> @llvm.vp.add.v2i1(<2 x i1> %va, <2 x i1> %b, <2 x i1> %m, i32 %evl) @@ -21,7 +21,7 @@ define <4 x i1> @vadd_vv_v4i1(<4 x i1> %va, <4 x i1> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <4 x i1> @llvm.vp.add.v4i1(<4 x i1> %va, <4 x i1> %b, <4 x i1> %m, i32 %evl) @@ -33,7 +33,7 @@ define <8 x i1> @vadd_vv_v8i1(<8 x i1> %va, <8 x i1> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <8 x i1> @llvm.vp.add.v8i1(<8 x i1> %va, <8 x i1> %b, <8 x i1> %m, i32 %evl) @@ -45,7 +45,7 @@ define <16 x i1> @vadd_vv_v16i1(<16 x i1> %va, <16 x i1> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <16 x i1> @llvm.vp.add.v16i1(<16 x i1> %va, <16 x i1> %b, <16 x i1> %m, i32 %evl) @@ -57,7 +57,7 @@ define <32 x i1> @vadd_vv_v32i1(<32 x i1> %va, <32 x i1> %b, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <32 x i1> @llvm.vp.add.v32i1(<32 x i1> %va, <32 x i1> %b, <32 x i1> %m, i32 %evl) @@ -69,7 +69,7 @@ define <64 x i1> @vadd_vv_v64i1(<64 x i1> %va, <64 x i1> %b, <64 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v64i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <64 x i1> @llvm.vp.add.v64i1(<64 x i1> %va, <64 x i1> %b, <64 x i1> %m, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll @@ -31,7 +31,7 @@ define <2 x i8> @vadd_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -55,7 +55,7 @@ define <2 x i8> @vadd_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_v2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0 @@ -81,7 +81,7 @@ define <2 x i8> @vadd_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_v2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 -1, i32 0 @@ -107,7 +107,7 @@ define <4 x i8> @vadd_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -143,7 +143,7 @@ define <4 x i8> @vadd_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_v4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0 @@ -169,7 +169,7 @@ define <4 x i8> @vadd_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_v4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 -1, i32 0 @@ -195,7 +195,7 @@ define <5 x i8> @vadd_vv_v5i8_unmasked(<5 x i8> %va, <5 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v5i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <5 x i1> poison, i1 true, i32 0 @@ -219,7 +219,7 @@ define <5 x i8> @vadd_vx_v5i8_unmasked(<5 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_v5i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <5 x i8> poison, i8 %b, i32 0 @@ -245,7 +245,7 @@ define <5 x i8> @vadd_vi_v5i8_unmasked(<5 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_v5i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %elt.head = insertelement <5 x i8> poison, i8 -1, i32 0 @@ -271,7 +271,7 @@ define <8 x i8> @vadd_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -295,7 +295,7 @@ define <8 x i8> @vadd_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_v8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 @@ -321,7 +321,7 @@ define <8 x i8> @vadd_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_v8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 -1, i32 0 @@ -347,7 +347,7 @@ define <16 x i8> @vadd_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -371,7 +371,7 @@ define <16 x i8> @vadd_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_v16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0 @@ -397,7 +397,7 @@ define <16 x i8> @vadd_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_v16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 -1, i32 0 @@ -414,7 +414,7 @@ ; CHECK-LABEL: vadd_vi_v258i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 128 -; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma ; CHECK-NEXT: vlm.v v25, (a0) ; CHECK-NEXT: addi a3, a1, -128 ; CHECK-NEXT: vmv1r.v v24, v0 @@ -449,14 +449,14 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a2, a1 ; CHECK-NEXT: .LBB33_2: -; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma ; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: vadd.vi v16, v16, -1 ; CHECK-NEXT: bltu a0, a1, .LBB33_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a0, 128 ; CHECK-NEXT: .LBB33_4: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0 @@ -516,7 +516,7 @@ define <2 x i16> @vadd_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -540,7 +540,7 @@ define <2 x i16> @vadd_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_v2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0 @@ -566,7 +566,7 @@ define <2 x i16> @vadd_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_v2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 -1, i32 0 @@ -592,7 +592,7 @@ define <4 x i16> @vadd_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -616,7 +616,7 @@ define <4 x i16> @vadd_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_v4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0 @@ -642,7 +642,7 @@ define <4 x i16> @vadd_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_v4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 -1, i32 0 @@ -668,7 +668,7 @@ define <8 x i16> @vadd_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -692,7 +692,7 @@ define <8 x i16> @vadd_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_v8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0 @@ -718,7 +718,7 @@ define <8 x i16> @vadd_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_v8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 -1, i32 0 @@ -744,7 +744,7 @@ define <16 x i16> @vadd_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -768,7 +768,7 @@ define <16 x i16> @vadd_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_v16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0 @@ -794,7 +794,7 @@ define <16 x i16> @vadd_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_v16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 -1, i32 0 @@ -820,7 +820,7 @@ define <2 x i32> @vadd_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -844,7 +844,7 @@ define <2 x i32> @vadd_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_v2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0 @@ -870,7 +870,7 @@ define <2 x i32> @vadd_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_v2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 -1, i32 0 @@ -896,7 +896,7 @@ define <4 x i32> @vadd_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -920,7 +920,7 @@ define <4 x i32> @vadd_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_v4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0 @@ -946,7 +946,7 @@ define <4 x i32> @vadd_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_v4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 -1, i32 0 @@ -972,7 +972,7 @@ define <8 x i32> @vadd_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -996,7 +996,7 @@ define <8 x i32> @vadd_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_v8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 @@ -1022,7 +1022,7 @@ define <8 x i32> @vadd_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_v8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 -1, i32 0 @@ -1048,7 +1048,7 @@ define <16 x i32> @vadd_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -1072,7 +1072,7 @@ define <16 x i32> @vadd_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_v16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0 @@ -1098,7 +1098,7 @@ define <16 x i32> @vadd_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_v16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 -1, i32 0 @@ -1124,7 +1124,7 @@ define <2 x i64> @vadd_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -1141,7 +1141,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vadd.vv v8, v8, v9, v0.t @@ -1167,16 +1167,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vadd.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vadd_vx_v2i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vadd.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0 @@ -1202,7 +1202,7 @@ define <2 x i64> @vadd_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_v2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 -1, i32 0 @@ -1228,7 +1228,7 @@ define <4 x i64> @vadd_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v4i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -1245,7 +1245,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vadd.vv v8, v8, v10, v0.t @@ -1271,16 +1271,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vadd.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vadd_vx_v4i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vadd.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0 @@ -1306,7 +1306,7 @@ define <4 x i64> @vadd_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_v4i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 -1, i32 0 @@ -1332,7 +1332,7 @@ define <8 x i64> @vadd_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v8i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -1349,7 +1349,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vadd.vv v8, v8, v12, v0.t @@ -1375,16 +1375,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vadd.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vadd_vx_v8i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vadd.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 @@ -1410,7 +1410,7 @@ define <8 x i64> @vadd_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_v8i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 -1, i32 0 @@ -1436,7 +1436,7 @@ define <16 x i64> @vadd_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_v16i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -1453,7 +1453,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vadd.vv v8, v8, v16, v0.t @@ -1479,16 +1479,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vadd.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vadd_vx_v16i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vadd.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0 @@ -1514,7 +1514,7 @@ define <16 x i64> @vadd_vi_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_v16i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 -1, i32 0 @@ -1534,10 +1534,10 @@ ; RV32: # %bb.0: ; RV32-NEXT: vmv1r.v v1, v0 ; RV32-NEXT: li a1, 0 -; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV32-NEXT: vslidedown.vi v0, v0, 2 ; RV32-NEXT: li a2, 32 -; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; RV32-NEXT: addi a2, a0, -16 ; RV32-NEXT: vmv.v.i v24, -1 ; RV32-NEXT: bltu a0, a2, .LBB108_2 @@ -1560,7 +1560,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vmv1r.v v24, v0 ; RV64-NEXT: li a1, 0 -; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64-NEXT: addi a2, a0, -16 ; RV64-NEXT: vslidedown.vi v0, v0, 2 ; RV64-NEXT: bltu a0, a2, .LBB108_2 @@ -1589,21 +1589,21 @@ ; RV32: # %bb.0: ; RV32-NEXT: li a1, 0 ; RV32-NEXT: li a2, 32 -; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; RV32-NEXT: addi a2, a0, -16 ; RV32-NEXT: vmv.v.i v24, -1 ; RV32-NEXT: bltu a0, a2, .LBB109_2 ; RV32-NEXT: # %bb.1: ; RV32-NEXT: mv a1, a2 ; RV32-NEXT: .LBB109_2: -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: li a1, 16 ; RV32-NEXT: vadd.vv v16, v16, v24 ; RV32-NEXT: bltu a0, a1, .LBB109_4 ; RV32-NEXT: # %bb.3: ; RV32-NEXT: li a0, 16 ; RV32-NEXT: .LBB109_4: -; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vadd.vv v8, v8, v24 ; RV32-NEXT: ret ; @@ -1615,14 +1615,14 @@ ; RV64-NEXT: # %bb.1: ; RV64-NEXT: mv a2, a1 ; RV64-NEXT: .LBB109_2: -; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV64-NEXT: li a1, 16 ; RV64-NEXT: vadd.vi v16, v16, -1 ; RV64-NEXT: bltu a0, a1, .LBB109_4 ; RV64-NEXT: # %bb.3: ; RV64-NEXT: li a0, 16 ; RV64-NEXT: .LBB109_4: -; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV64-NEXT: vadd.vi v8, v8, -1 ; RV64-NEXT: ret %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0 @@ -1639,7 +1639,7 @@ ; RV32-LABEL: vadd_vx_v32i64_evl12: ; RV32: # %bb.0: ; RV32-NEXT: li a0, 32 -; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; RV32-NEXT: vmv.v.i v16, -1 ; RV32-NEXT: vsetivli zero, 12, e64, m8, ta, mu ; RV32-NEXT: vadd.vv v8, v8, v16, v0.t @@ -1659,10 +1659,10 @@ define <32 x i64> @vadd_vx_v32i64_evl27(<32 x i64> %va, <32 x i1> %m) { ; RV32-LABEL: vadd_vx_v32i64_evl27: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV32-NEXT: vslidedown.vi v1, v0, 2 ; RV32-NEXT: li a0, 32 -; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; RV32-NEXT: vmv.v.i v24, -1 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV32-NEXT: vadd.vv v8, v8, v24, v0.t @@ -1673,7 +1673,7 @@ ; ; RV64-LABEL: vadd_vx_v32i64_evl27: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64-NEXT: vslidedown.vi v24, v0, 2 ; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu ; RV64-NEXT: vadd.vi v8, v8, -1, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll @@ -31,7 +31,7 @@ define <2 x i8> @vand_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -67,7 +67,7 @@ define <2 x i8> @vand_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_v2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0 @@ -81,7 +81,7 @@ define <2 x i8> @vand_vx_v2i8_unmasked_commute(<2 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_v2i8_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0 @@ -107,7 +107,7 @@ define <2 x i8> @vand_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_v2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 4, i32 0 @@ -133,7 +133,7 @@ define <4 x i8> @vand_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -157,7 +157,7 @@ define <4 x i8> @vand_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_v4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0 @@ -183,7 +183,7 @@ define <4 x i8> @vand_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_v4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 4, i32 0 @@ -209,7 +209,7 @@ define <8 x i8> @vand_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -233,7 +233,7 @@ define <8 x i8> @vand_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_v8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 @@ -259,7 +259,7 @@ define <8 x i8> @vand_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_v8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 4, i32 0 @@ -285,7 +285,7 @@ define <16 x i8> @vand_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -309,7 +309,7 @@ define <16 x i8> @vand_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_v16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0 @@ -335,7 +335,7 @@ define <16 x i8> @vand_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_v16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 4, i32 0 @@ -361,7 +361,7 @@ define <2 x i16> @vand_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -385,7 +385,7 @@ define <2 x i16> @vand_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_v2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0 @@ -411,7 +411,7 @@ define <2 x i16> @vand_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_v2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 4, i32 0 @@ -437,7 +437,7 @@ define <4 x i16> @vand_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -461,7 +461,7 @@ define <4 x i16> @vand_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_v4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0 @@ -487,7 +487,7 @@ define <4 x i16> @vand_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_v4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 4, i32 0 @@ -513,7 +513,7 @@ define <8 x i16> @vand_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -537,7 +537,7 @@ define <8 x i16> @vand_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_v8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0 @@ -563,7 +563,7 @@ define <8 x i16> @vand_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_v8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 4, i32 0 @@ -589,7 +589,7 @@ define <16 x i16> @vand_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -613,7 +613,7 @@ define <16 x i16> @vand_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_v16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0 @@ -639,7 +639,7 @@ define <16 x i16> @vand_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_v16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 4, i32 0 @@ -665,7 +665,7 @@ define <2 x i32> @vand_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -689,7 +689,7 @@ define <2 x i32> @vand_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_v2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0 @@ -715,7 +715,7 @@ define <2 x i32> @vand_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_v2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 4, i32 0 @@ -741,7 +741,7 @@ define <4 x i32> @vand_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -765,7 +765,7 @@ define <4 x i32> @vand_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_v4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0 @@ -791,7 +791,7 @@ define <4 x i32> @vand_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_v4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 4, i32 0 @@ -817,7 +817,7 @@ define <8 x i32> @vand_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -841,7 +841,7 @@ define <8 x i32> @vand_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_v8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 @@ -867,7 +867,7 @@ define <8 x i32> @vand_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_v8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 4, i32 0 @@ -893,7 +893,7 @@ define <16 x i32> @vand_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -917,7 +917,7 @@ define <16 x i32> @vand_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_v16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0 @@ -943,7 +943,7 @@ define <16 x i32> @vand_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_v16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 4, i32 0 @@ -969,7 +969,7 @@ define <2 x i64> @vand_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -986,7 +986,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vand.vv v8, v8, v9, v0.t @@ -1012,16 +1012,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vand.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vand_vx_v2i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vand.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0 @@ -1047,7 +1047,7 @@ define <2 x i64> @vand_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_v2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 4, i32 0 @@ -1073,7 +1073,7 @@ define <4 x i64> @vand_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v4i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -1090,7 +1090,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vand.vv v8, v8, v10, v0.t @@ -1116,16 +1116,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vand.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vand_vx_v4i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vand.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0 @@ -1151,7 +1151,7 @@ define <4 x i64> @vand_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_v4i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 4, i32 0 @@ -1177,7 +1177,7 @@ define <8 x i64> @vand_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v8i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -1194,7 +1194,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vand.vv v8, v8, v12, v0.t @@ -1220,16 +1220,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vand.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vand_vx_v8i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vand.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 @@ -1255,7 +1255,7 @@ define <8 x i64> @vand_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_v8i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 4, i32 0 @@ -1281,7 +1281,7 @@ define <11 x i64> @vand_vv_v11i64_unmasked(<11 x i64> %va, <11 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v11i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement <11 x i1> poison, i1 true, i32 0 @@ -1295,13 +1295,13 @@ ; RV32: # %bb.0: ; RV32-NEXT: vmv1r.v v16, v0 ; RV32-NEXT: li a3, 32 -; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, mu +; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; RV32-NEXT: vmv.v.x v24, a1 ; RV32-NEXT: lui a1, 341 ; RV32-NEXT: addi a1, a1, 1365 -; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV32-NEXT: vmv.s.x v0, a1 -; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, mu +; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; RV32-NEXT: vmerge.vxm v24, v24, a0, v0 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vmv1r.v v0, v16 @@ -1323,21 +1323,21 @@ ; RV32-LABEL: vand_vx_v11i64_unmasked: ; RV32: # %bb.0: ; RV32-NEXT: li a3, 32 -; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, mu +; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; RV32-NEXT: vmv.v.x v16, a1 ; RV32-NEXT: lui a1, 341 ; RV32-NEXT: addi a1, a1, 1365 -; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV32-NEXT: vmv.s.x v0, a1 -; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, mu +; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; RV32-NEXT: vmerge.vxm v16, v16, a0, v0 -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vand.vv v8, v8, v16 ; RV32-NEXT: ret ; ; RV64-LABEL: vand_vx_v11i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vand.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <11 x i64> poison, i64 %b, i32 0 @@ -1363,7 +1363,7 @@ define <11 x i64> @vand_vi_v11i64_unmasked(<11 x i64> %va, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_v11i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement <11 x i64> poison, i64 4, i32 0 @@ -1389,7 +1389,7 @@ define <16 x i64> @vand_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_v16i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -1406,7 +1406,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vand.vv v8, v8, v16, v0.t @@ -1432,16 +1432,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vand.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vand_vx_v16i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vand.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0 @@ -1467,7 +1467,7 @@ define <16 x i64> @vand_vi_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_v16i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 4, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll @@ -19,7 +19,7 @@ define <2 x half> @vfsgnj_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -43,7 +43,7 @@ define <4 x half> @vfsgnj_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -67,7 +67,7 @@ define <8 x half> @vfsgnj_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -91,7 +91,7 @@ define <16 x half> @vfsgnj_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -115,7 +115,7 @@ define <2 x float> @vfsgnj_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -139,7 +139,7 @@ define <4 x float> @vfsgnj_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -163,7 +163,7 @@ define <8 x float> @vfsgnj_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -187,7 +187,7 @@ define <16 x float> @vfsgnj_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -211,7 +211,7 @@ define <2 x double> @vfsgnj_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -235,7 +235,7 @@ define <4 x double> @vfsgnj_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -259,7 +259,7 @@ define <8 x double> @vfsgnj_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -283,7 +283,7 @@ define <15 x double> @vfsgnj_vv_v15f64_unmasked(<15 x double> %va, <15 x double> %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v15f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement <15 x i1> poison, i1 true, i32 0 @@ -307,7 +307,7 @@ define <16 x double> @vfsgnj_vv_v16f64_unmasked(<16 x double> %va, <16 x double> %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_v16f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -328,10 +328,10 @@ ; CHECK-NEXT: mul a1, a1, a3 ; CHECK-NEXT: sub sp, sp, a1 ; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v0, v0, 2 ; CHECK-NEXT: addi a1, a0, 128 -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v24, (a1) ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 @@ -388,7 +388,7 @@ ; CHECK-LABEL: vfsgnj_vv_v32f64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a1, a0, 128 -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v24, (a1) ; CHECK-NEXT: addi a3, a2, -16 ; CHECK-NEXT: li a1, 0 @@ -397,14 +397,14 @@ ; CHECK-NEXT: mv a1, a3 ; CHECK-NEXT: .LBB27_2: ; CHECK-NEXT: vle64.v v0, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vfsgnj.vv v16, v16, v24 ; CHECK-NEXT: bltu a2, a0, .LBB27_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a2, 16 ; CHECK-NEXT: .LBB27_4: -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v0 ; CHECK-NEXT: ret %head = insertelement <32 x i1> poison, i1 true, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdiv-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdiv-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdiv-vp.ll @@ -9,7 +9,7 @@ define <8 x i7> @vdiv_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v8i7: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vadd.vv v9, v9, v9 ; CHECK-NEXT: vsra.vi v9, v9, 1 ; CHECK-NEXT: vadd.vv v8, v8, v8 @@ -36,7 +36,7 @@ define <2 x i8> @vdiv_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -60,7 +60,7 @@ define <2 x i8> @vdiv_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_v2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0 @@ -86,7 +86,7 @@ define <4 x i8> @vdiv_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -110,7 +110,7 @@ define <4 x i8> @vdiv_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_v4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0 @@ -148,7 +148,7 @@ define <8 x i8> @vdiv_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -172,7 +172,7 @@ define <8 x i8> @vdiv_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_v8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 @@ -198,7 +198,7 @@ define <16 x i8> @vdiv_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -222,7 +222,7 @@ define <16 x i8> @vdiv_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_v16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0 @@ -248,7 +248,7 @@ define <2 x i16> @vdiv_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -272,7 +272,7 @@ define <2 x i16> @vdiv_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_v2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0 @@ -298,7 +298,7 @@ define <4 x i16> @vdiv_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -322,7 +322,7 @@ define <4 x i16> @vdiv_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_v4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0 @@ -348,7 +348,7 @@ define <8 x i16> @vdiv_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -372,7 +372,7 @@ define <8 x i16> @vdiv_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_v8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0 @@ -398,7 +398,7 @@ define <16 x i16> @vdiv_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -422,7 +422,7 @@ define <16 x i16> @vdiv_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_v16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0 @@ -448,7 +448,7 @@ define <2 x i32> @vdiv_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -472,7 +472,7 @@ define <2 x i32> @vdiv_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_v2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0 @@ -498,7 +498,7 @@ define <4 x i32> @vdiv_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -522,7 +522,7 @@ define <4 x i32> @vdiv_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_v4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0 @@ -548,7 +548,7 @@ define <8 x i32> @vdiv_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -572,7 +572,7 @@ define <8 x i32> @vdiv_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_v8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 @@ -598,7 +598,7 @@ define <16 x i32> @vdiv_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -622,7 +622,7 @@ define <16 x i32> @vdiv_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_v16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0 @@ -648,7 +648,7 @@ define <2 x i64> @vdiv_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -665,7 +665,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vdiv.vv v8, v8, v9, v0.t @@ -691,16 +691,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vdiv.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vx_v2i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vdiv.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0 @@ -726,7 +726,7 @@ define <4 x i64> @vdiv_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v4i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -743,7 +743,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vdiv.vv v8, v8, v10, v0.t @@ -769,16 +769,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vdiv.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vx_v4i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vdiv.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0 @@ -804,7 +804,7 @@ define <8 x i64> @vdiv_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v8i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -821,7 +821,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vdiv.vv v8, v8, v12, v0.t @@ -847,16 +847,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vdiv.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vx_v8i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vdiv.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 @@ -882,7 +882,7 @@ define <16 x i64> @vdiv_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_v16i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -899,7 +899,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vdiv.vv v8, v8, v16, v0.t @@ -925,16 +925,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vdiv.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vx_v16i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vdiv.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll @@ -10,7 +10,7 @@ ; CHECK-LABEL: vdivu_vv_v8i7: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 127 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vand.vx v9, v9, a1 ; CHECK-NEXT: vand.vx v8, v8, a1 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -35,7 +35,7 @@ define <2 x i8> @vdivu_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -59,7 +59,7 @@ define <2 x i8> @vdivu_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_v2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0 @@ -85,7 +85,7 @@ define <4 x i8> @vdivu_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -109,7 +109,7 @@ define <4 x i8> @vdivu_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_v4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0 @@ -147,7 +147,7 @@ define <8 x i8> @vdivu_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -171,7 +171,7 @@ define <8 x i8> @vdivu_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_v8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 @@ -197,7 +197,7 @@ define <16 x i8> @vdivu_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -221,7 +221,7 @@ define <16 x i8> @vdivu_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_v16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0 @@ -247,7 +247,7 @@ define <2 x i16> @vdivu_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -271,7 +271,7 @@ define <2 x i16> @vdivu_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_v2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0 @@ -297,7 +297,7 @@ define <4 x i16> @vdivu_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -321,7 +321,7 @@ define <4 x i16> @vdivu_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_v4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0 @@ -347,7 +347,7 @@ define <8 x i16> @vdivu_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -371,7 +371,7 @@ define <8 x i16> @vdivu_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_v8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0 @@ -397,7 +397,7 @@ define <16 x i16> @vdivu_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -421,7 +421,7 @@ define <16 x i16> @vdivu_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_v16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0 @@ -447,7 +447,7 @@ define <2 x i32> @vdivu_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -471,7 +471,7 @@ define <2 x i32> @vdivu_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_v2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0 @@ -497,7 +497,7 @@ define <4 x i32> @vdivu_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -521,7 +521,7 @@ define <4 x i32> @vdivu_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_v4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0 @@ -547,7 +547,7 @@ define <8 x i32> @vdivu_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -571,7 +571,7 @@ define <8 x i32> @vdivu_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_v8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 @@ -597,7 +597,7 @@ define <16 x i32> @vdivu_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -621,7 +621,7 @@ define <16 x i32> @vdivu_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_v16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0 @@ -647,7 +647,7 @@ define <2 x i64> @vdivu_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -664,7 +664,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vdivu.vv v8, v8, v9, v0.t @@ -690,16 +690,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vdivu.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vx_v2i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vdivu.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0 @@ -725,7 +725,7 @@ define <4 x i64> @vdivu_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v4i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -742,7 +742,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vdivu.vv v8, v8, v10, v0.t @@ -768,16 +768,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vdivu.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vx_v4i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vdivu.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0 @@ -803,7 +803,7 @@ define <8 x i64> @vdivu_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v8i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -820,7 +820,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vdivu.vv v8, v8, v12, v0.t @@ -846,16 +846,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vdivu.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vx_v8i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vdivu.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 @@ -881,7 +881,7 @@ define <16 x i64> @vdivu_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_v16i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -898,7 +898,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vdivu.vv v8, v8, v16, v0.t @@ -924,16 +924,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vdivu.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vx_v16i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vdivu.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfabs-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfabs-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfabs-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfabs-vp.ll @@ -19,7 +19,7 @@ define <2 x half> @vfabs_vv_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_v2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -43,7 +43,7 @@ define <4 x half> @vfabs_vv_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_v4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -67,7 +67,7 @@ define <8 x half> @vfabs_vv_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_v8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -91,7 +91,7 @@ define <16 x half> @vfabs_vv_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_v16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -115,7 +115,7 @@ define <2 x float> @vfabs_vv_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_v2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -139,7 +139,7 @@ define <4 x float> @vfabs_vv_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_v4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -163,7 +163,7 @@ define <8 x float> @vfabs_vv_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_v8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -187,7 +187,7 @@ define <16 x float> @vfabs_vv_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_v16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -211,7 +211,7 @@ define <2 x double> @vfabs_vv_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_v2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -235,7 +235,7 @@ define <4 x double> @vfabs_vv_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_v4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -259,7 +259,7 @@ define <8 x double> @vfabs_vv_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_v8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -283,7 +283,7 @@ define <15 x double> @vfabs_vv_v15f64_unmasked(<15 x double> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_v15f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <15 x i1> poison, i1 true, i32 0 @@ -307,7 +307,7 @@ define <16 x double> @vfabs_vv_v16f64_unmasked(<16 x double> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_v16f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -323,7 +323,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: addi a2, a0, -16 ; CHECK-NEXT: vslidedown.vi v0, v0, 2 ; CHECK-NEXT: bltu a0, a2, .LBB26_2 @@ -354,14 +354,14 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a2, a1 ; CHECK-NEXT: .LBB27_2: -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: li a1, 16 ; CHECK-NEXT: vfabs.v v16, v16 ; CHECK-NEXT: bltu a0, a1, .LBB27_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: .LBB27_4: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <32 x i1> poison, i1 true, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-vp.ll @@ -19,7 +19,7 @@ define <2 x half> @vfadd_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_v2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -43,7 +43,7 @@ define <2 x half> @vfadd_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_v2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x half> poison, half %b, i32 0 @@ -81,7 +81,7 @@ define <4 x half> @vfadd_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_v4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -105,7 +105,7 @@ define <4 x half> @vfadd_vf_v4f16_unmasked(<4 x half> %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_v4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x half> poison, half %b, i32 0 @@ -131,7 +131,7 @@ define <8 x half> @vfadd_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_v8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -155,7 +155,7 @@ define <8 x half> @vfadd_vf_v8f16_unmasked(<8 x half> %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_v8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x half> poison, half %b, i32 0 @@ -181,7 +181,7 @@ define <16 x half> @vfadd_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_v16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -205,7 +205,7 @@ define <16 x half> @vfadd_vf_v16f16_unmasked(<16 x half> %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_v16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x half> poison, half %b, i32 0 @@ -231,7 +231,7 @@ define <2 x float> @vfadd_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_v2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -267,7 +267,7 @@ define <2 x float> @vfadd_vf_v2f32_unmasked(<2 x float> %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_v2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x float> poison, float %b, i32 0 @@ -281,7 +281,7 @@ define <2 x float> @vfadd_vf_v2f32_unmasked_commute(<2 x float> %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_v2f32_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x float> poison, float %b, i32 0 @@ -307,7 +307,7 @@ define <4 x float> @vfadd_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_v4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -331,7 +331,7 @@ define <4 x float> @vfadd_vf_v4f32_unmasked(<4 x float> %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_v4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x float> poison, float %b, i32 0 @@ -357,7 +357,7 @@ define <8 x float> @vfadd_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_v8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -381,7 +381,7 @@ define <8 x float> @vfadd_vf_v8f32_unmasked(<8 x float> %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_v8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x float> poison, float %b, i32 0 @@ -407,7 +407,7 @@ define <16 x float> @vfadd_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_v16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -431,7 +431,7 @@ define <16 x float> @vfadd_vf_v16f32_unmasked(<16 x float> %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_v16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x float> poison, float %b, i32 0 @@ -457,7 +457,7 @@ define <2 x double> @vfadd_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_v2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -481,7 +481,7 @@ define <2 x double> @vfadd_vf_v2f64_unmasked(<2 x double> %va, double %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_v2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x double> poison, double %b, i32 0 @@ -507,7 +507,7 @@ define <4 x double> @vfadd_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_v4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -531,7 +531,7 @@ define <4 x double> @vfadd_vf_v4f64_unmasked(<4 x double> %va, double %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_v4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x double> poison, double %b, i32 0 @@ -557,7 +557,7 @@ define <8 x double> @vfadd_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_v8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -581,7 +581,7 @@ define <8 x double> @vfadd_vf_v8f64_unmasked(<8 x double> %va, double %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_v8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x double> poison, double %b, i32 0 @@ -607,7 +607,7 @@ define <16 x double> @vfadd_vv_v16f64_unmasked(<16 x double> %va, <16 x double> %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_v16f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -631,7 +631,7 @@ define <16 x double> @vfadd_vf_v16f64_unmasked(<16 x double> %va, double %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_v16f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x double> poison, double %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-vp.ll @@ -19,7 +19,7 @@ define <2 x half> @vfdiv_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_v2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -43,7 +43,7 @@ define <2 x half> @vfdiv_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_v2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x half> poison, half %b, i32 0 @@ -81,7 +81,7 @@ define <4 x half> @vfdiv_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_v4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -105,7 +105,7 @@ define <4 x half> @vfdiv_vf_v4f16_unmasked(<4 x half> %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_v4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x half> poison, half %b, i32 0 @@ -131,7 +131,7 @@ define <8 x half> @vfdiv_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_v8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -155,7 +155,7 @@ define <8 x half> @vfdiv_vf_v8f16_unmasked(<8 x half> %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_v8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x half> poison, half %b, i32 0 @@ -181,7 +181,7 @@ define <16 x half> @vfdiv_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_v16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -205,7 +205,7 @@ define <16 x half> @vfdiv_vf_v16f16_unmasked(<16 x half> %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_v16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x half> poison, half %b, i32 0 @@ -231,7 +231,7 @@ define <2 x float> @vfdiv_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_v2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -255,7 +255,7 @@ define <2 x float> @vfdiv_vf_v2f32_unmasked(<2 x float> %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_v2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x float> poison, float %b, i32 0 @@ -281,7 +281,7 @@ define <4 x float> @vfdiv_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_v4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -305,7 +305,7 @@ define <4 x float> @vfdiv_vf_v4f32_unmasked(<4 x float> %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_v4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x float> poison, float %b, i32 0 @@ -331,7 +331,7 @@ define <8 x float> @vfdiv_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_v8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -355,7 +355,7 @@ define <8 x float> @vfdiv_vf_v8f32_unmasked(<8 x float> %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_v8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x float> poison, float %b, i32 0 @@ -381,7 +381,7 @@ define <16 x float> @vfdiv_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_v16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -405,7 +405,7 @@ define <16 x float> @vfdiv_vf_v16f32_unmasked(<16 x float> %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_v16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x float> poison, float %b, i32 0 @@ -431,7 +431,7 @@ define <2 x double> @vfdiv_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_v2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -455,7 +455,7 @@ define <2 x double> @vfdiv_vf_v2f64_unmasked(<2 x double> %va, double %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_v2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x double> poison, double %b, i32 0 @@ -481,7 +481,7 @@ define <4 x double> @vfdiv_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_v4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -505,7 +505,7 @@ define <4 x double> @vfdiv_vf_v4f64_unmasked(<4 x double> %va, double %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_v4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x double> poison, double %b, i32 0 @@ -531,7 +531,7 @@ define <8 x double> @vfdiv_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_v8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -555,7 +555,7 @@ define <8 x double> @vfdiv_vf_v8f64_unmasked(<8 x double> %va, double %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_v8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x double> poison, double %b, i32 0 @@ -581,7 +581,7 @@ define <16 x double> @vfdiv_vv_v16f64_unmasked(<16 x double> %va, <16 x double> %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_v16f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -605,7 +605,7 @@ define <16 x double> @vfdiv_vf_v16f64_unmasked(<16 x double> %va, double %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_v16f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x double> poison, double %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll @@ -20,7 +20,7 @@ define <2 x half> @vfma_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %b, <2 x half> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -44,7 +44,7 @@ define <2 x half> @vfma_vf_v2f16_unmasked(<2 x half> %va, half %b, <2 x half> %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_v2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement <2 x half> poison, half %b, i32 0 @@ -71,7 +71,7 @@ define <4 x half> @vfma_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %b, <4 x half> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -95,7 +95,7 @@ define <4 x half> @vfma_vf_v4f16_unmasked(<4 x half> %va, half %b, <4 x half> %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_v4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement <4 x half> poison, half %b, i32 0 @@ -122,7 +122,7 @@ define <8 x half> @vfma_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %b, <8 x half> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -146,7 +146,7 @@ define <8 x half> @vfma_vf_v8f16_unmasked(<8 x half> %va, half %b, <8 x half> %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_v8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement <8 x half> poison, half %b, i32 0 @@ -173,7 +173,7 @@ define <16 x half> @vfma_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %b, <16 x half> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v10, v12 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -197,7 +197,7 @@ define <16 x half> @vfma_vf_v16f16_unmasked(<16 x half> %va, half %b, <16 x half> %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_v16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement <16 x half> poison, half %b, i32 0 @@ -224,7 +224,7 @@ define <2 x float> @vfma_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %b, <2 x float> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -248,7 +248,7 @@ define <2 x float> @vfma_vf_v2f32_unmasked(<2 x float> %va, float %b, <2 x float> %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_v2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement <2 x float> poison, float %b, i32 0 @@ -275,7 +275,7 @@ define <4 x float> @vfma_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %b, <4 x float> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -299,7 +299,7 @@ define <4 x float> @vfma_vf_v4f32_unmasked(<4 x float> %va, float %b, <4 x float> %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_v4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement <4 x float> poison, float %b, i32 0 @@ -326,7 +326,7 @@ define <8 x float> @vfma_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %b, <8 x float> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v10, v12 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -350,7 +350,7 @@ define <8 x float> @vfma_vf_v8f32_unmasked(<8 x float> %va, float %b, <8 x float> %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_v8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement <8 x float> poison, float %b, i32 0 @@ -377,7 +377,7 @@ define <16 x float> @vfma_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %b, <16 x float> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v12, v16 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -401,7 +401,7 @@ define <16 x float> @vfma_vf_v16f32_unmasked(<16 x float> %va, float %b, <16 x float> %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_v16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement <16 x float> poison, float %b, i32 0 @@ -428,7 +428,7 @@ define <2 x double> @vfma_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %b, <2 x double> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -452,7 +452,7 @@ define <2 x double> @vfma_vf_v2f64_unmasked(<2 x double> %va, double %b, <2 x double> %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_v2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement <2 x double> poison, double %b, i32 0 @@ -479,7 +479,7 @@ define <4 x double> @vfma_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %b, <4 x double> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v10, v12 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -503,7 +503,7 @@ define <4 x double> @vfma_vf_v4f64_unmasked(<4 x double> %va, double %b, <4 x double> %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_v4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement <4 x double> poison, double %b, i32 0 @@ -530,7 +530,7 @@ define <8 x double> @vfma_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %b, <8 x double> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v12, v16 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -554,7 +554,7 @@ define <8 x double> @vfma_vf_v8f64_unmasked(<8 x double> %va, double %b, <8 x double> %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_v8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement <8 x double> poison, double %b, i32 0 @@ -570,7 +570,7 @@ define <15 x double> @vfma_vv_v15f64(<15 x double> %va, <15 x double> %b, <15 x double> %c, <15 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v15f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vfmadd.vv v16, v8, v24, v0.t @@ -583,9 +583,9 @@ define <15 x double> @vfma_vv_v15f64_unmasked(<15 x double> %va, <15 x double> %b, <15 x double> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v15f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v16, v24 ; CHECK-NEXT: ret %head = insertelement <15 x i1> poison, i1 true, i32 0 @@ -599,7 +599,7 @@ define <16 x double> @vfma_vv_v16f64(<16 x double> %va, <16 x double> %b, <16 x double> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v16f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vfmadd.vv v16, v8, v24, v0.t @@ -612,9 +612,9 @@ define <16 x double> @vfma_vv_v16f64_unmasked(<16 x double> %va, <16 x double> %b, <16 x double> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v16f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v16, v24 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -638,7 +638,7 @@ define <16 x double> @vfma_vf_v16f64_unmasked(<16 x double> %va, double %b, <16 x double> %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_v16f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement <16 x double> poison, double %b, i32 0 @@ -661,10 +661,10 @@ ; CHECK-NEXT: mul a1, a1, a3 ; CHECK-NEXT: sub sp, sp, a1 ; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v0, v0, 2 ; CHECK-NEXT: addi a1, a2, 128 -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v24, (a1) ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: li a3, 24 @@ -777,7 +777,7 @@ ; CHECK-NEXT: mul a1, a1, a3 ; CHECK-NEXT: sub sp, sp, a1 ; CHECK-NEXT: addi a1, a2, 128 -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v24, (a1) ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 @@ -801,7 +801,7 @@ ; CHECK-NEXT: addi a2, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill ; CHECK-NEXT: vle64.v v0, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 @@ -813,7 +813,7 @@ ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a4, 16 ; CHECK-NEXT: .LBB51_4: -; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add a0, sp, a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmacc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmacc-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmacc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmacc-vp.ll @@ -26,7 +26,7 @@ define <2 x half> @vfmacc_vv_v2f16_unmasked(<2 x half> %a, <2 x half> %b, <2 x half> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_v2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfmacc.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -72,7 +72,7 @@ define <2 x half> @vfmacc_vf_v2f16_unmasked(<2 x half> %va, half %b, <2 x half> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vf_v2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfmacc.vf v9, fa0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -153,7 +153,7 @@ define <4 x half> @vfmacc_vv_v4f16_unmasked(<4 x half> %a, <4 x half> %b, <4 x half> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_v4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfmacc.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -199,7 +199,7 @@ define <4 x half> @vfmacc_vf_v4f16_unmasked(<4 x half> %va, half %b, <4 x half> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vf_v4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfmacc.vf v9, fa0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -280,7 +280,7 @@ define <8 x half> @vfmacc_vv_v8f16_unmasked(<8 x half> %a, <8 x half> %b, <8 x half> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_v8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfmacc.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -326,7 +326,7 @@ define <8 x half> @vfmacc_vf_v8f16_unmasked(<8 x half> %va, half %b, <8 x half> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vf_v8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfmacc.vf v9, fa0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -407,7 +407,7 @@ define <16 x half> @vfmacc_vv_v16f16_unmasked(<16 x half> %a, <16 x half> %b, <16 x half> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_v16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfmacc.vv v12, v8, v10 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -453,7 +453,7 @@ define <16 x half> @vfmacc_vf_v16f16_unmasked(<16 x half> %va, half %b, <16 x half> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vf_v16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfmacc.vf v10, fa0, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -534,7 +534,7 @@ define <32 x half> @vfmacc_vv_v32f16_unmasked(<32 x half> %a, <32 x half> %b, <32 x half> %c, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_v32f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfmacc.vv v16, v8, v12 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -580,7 +580,7 @@ define <32 x half> @vfmacc_vf_v32f16_unmasked(<32 x half> %va, half %b, <32 x half> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vf_v32f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfmacc.vf v12, fa0, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -661,7 +661,7 @@ define <2 x float> @vfmacc_vv_v2f32_unmasked(<2 x float> %a, <2 x float> %b, <2 x float> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_v2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfmacc.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -707,7 +707,7 @@ define <2 x float> @vfmacc_vf_v2f32_unmasked(<2 x float> %va, float %b, <2 x float> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vf_v2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfmacc.vf v9, fa0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -788,7 +788,7 @@ define <4 x float> @vfmacc_vv_v4f32_unmasked(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_v4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfmacc.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -834,7 +834,7 @@ define <4 x float> @vfmacc_vf_v4f32_unmasked(<4 x float> %va, float %b, <4 x float> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vf_v4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfmacc.vf v9, fa0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -915,7 +915,7 @@ define <8 x float> @vfmacc_vv_v8f32_unmasked(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_v8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfmacc.vv v12, v8, v10 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -961,7 +961,7 @@ define <8 x float> @vfmacc_vf_v8f32_unmasked(<8 x float> %va, float %b, <8 x float> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vf_v8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfmacc.vf v10, fa0, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -1042,7 +1042,7 @@ define <16 x float> @vfmacc_vv_v16f32_unmasked(<16 x float> %a, <16 x float> %b, <16 x float> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_v16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfmacc.vv v16, v8, v12 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -1088,7 +1088,7 @@ define <16 x float> @vfmacc_vf_v16f32_unmasked(<16 x float> %va, float %b, <16 x float> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vf_v16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfmacc.vf v12, fa0, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -1169,7 +1169,7 @@ define <2 x double> @vfmacc_vv_v2f64_unmasked(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_v2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vfmacc.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1215,7 +1215,7 @@ define <2 x double> @vfmacc_vf_v2f64_unmasked(<2 x double> %va, double %b, <2 x double> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vf_v2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vfmacc.vf v9, fa0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1296,7 +1296,7 @@ define <4 x double> @vfmacc_vv_v4f64_unmasked(<4 x double> %a, <4 x double> %b, <4 x double> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_v4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vfmacc.vv v12, v8, v10 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -1342,7 +1342,7 @@ define <4 x double> @vfmacc_vf_v4f64_unmasked(<4 x double> %va, double %b, <4 x double> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vf_v4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vfmacc.vf v10, fa0, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -1423,7 +1423,7 @@ define <8 x double> @vfmacc_vv_v8f64_unmasked(<8 x double> %a, <8 x double> %b, <8 x double> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_v8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vfmacc.vv v16, v8, v12 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -1469,7 +1469,7 @@ define <8 x double> @vfmacc_vf_v8f64_unmasked(<8 x double> %va, double %b, <8 x double> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vf_v8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vfmacc.vf v12, fa0, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll @@ -19,7 +19,7 @@ define <2 x half> @vfmax_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -43,7 +43,7 @@ define <4 x half> @vfmax_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -67,7 +67,7 @@ define <8 x half> @vfmax_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -91,7 +91,7 @@ define <16 x half> @vfmax_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -115,7 +115,7 @@ define <2 x float> @vfmax_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -139,7 +139,7 @@ define <4 x float> @vfmax_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -163,7 +163,7 @@ define <8 x float> @vfmax_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -187,7 +187,7 @@ define <16 x float> @vfmax_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -211,7 +211,7 @@ define <2 x double> @vfmax_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -235,7 +235,7 @@ define <4 x double> @vfmax_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -259,7 +259,7 @@ define <8 x double> @vfmax_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -283,7 +283,7 @@ define <15 x double> @vfmax_vv_v15f64_unmasked(<15 x double> %va, <15 x double> %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v15f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement <15 x i1> poison, i1 true, i32 0 @@ -307,7 +307,7 @@ define <16 x double> @vfmax_vv_v16f64_unmasked(<16 x double> %va, <16 x double> %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_v16f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -328,10 +328,10 @@ ; CHECK-NEXT: mul a1, a1, a3 ; CHECK-NEXT: sub sp, sp, a1 ; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v0, v0, 2 ; CHECK-NEXT: addi a1, a0, 128 -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v24, (a1) ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 @@ -388,7 +388,7 @@ ; CHECK-LABEL: vfmax_vv_v32f64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a1, a0, 128 -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v24, (a1) ; CHECK-NEXT: addi a3, a2, -16 ; CHECK-NEXT: li a1, 0 @@ -397,14 +397,14 @@ ; CHECK-NEXT: mv a1, a3 ; CHECK-NEXT: .LBB27_2: ; CHECK-NEXT: vle64.v v0, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vfmax.vv v16, v16, v24 ; CHECK-NEXT: bltu a2, a0, .LBB27_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a2, 16 ; CHECK-NEXT: .LBB27_4: -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v0 ; CHECK-NEXT: ret %head = insertelement <32 x i1> poison, i1 true, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax.ll @@ -9,7 +9,7 @@ define <2 x half> @vfmax_v2f16_vv(<2 x half> %a, <2 x half> %b) { ; CHECK-LABEL: vfmax_v2f16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <2 x half> @llvm.maxnum.v2f16(<2 x half> %a, <2 x half> %b) @@ -19,7 +19,7 @@ define <2 x half> @vfmax_v2f16_vf(<2 x half> %a, half %b) { ; CHECK-LABEL: vfmax_v2f16_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement <2 x half> poison, half %b, i32 0 @@ -31,7 +31,7 @@ define <2 x half> @vfmax_v2f16_fv(<2 x half> %a, half %b) { ; CHECK-LABEL: vfmax_v2f16_fv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement <2 x half> poison, half %b, i32 0 @@ -45,7 +45,7 @@ define <4 x half> @vfmax_v4f16_vv(<4 x half> %a, <4 x half> %b) { ; CHECK-LABEL: vfmax_v4f16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <4 x half> @llvm.maxnum.v4f16(<4 x half> %a, <4 x half> %b) @@ -55,7 +55,7 @@ define <4 x half> @vfmax_v4f16_vf(<4 x half> %a, half %b) { ; CHECK-LABEL: vfmax_v4f16_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement <4 x half> poison, half %b, i32 0 @@ -67,7 +67,7 @@ define <4 x half> @vfmax_v4f16_fv(<4 x half> %a, half %b) { ; CHECK-LABEL: vfmax_v4f16_fv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement <4 x half> poison, half %b, i32 0 @@ -81,7 +81,7 @@ define <8 x half> @vfmax_v8f16_vv(<8 x half> %a, <8 x half> %b) { ; CHECK-LABEL: vfmax_v8f16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <8 x half> @llvm.maxnum.v8f16(<8 x half> %a, <8 x half> %b) @@ -91,7 +91,7 @@ define <8 x half> @vfmax_v8f16_vf(<8 x half> %a, half %b) { ; CHECK-LABEL: vfmax_v8f16_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement <8 x half> poison, half %b, i32 0 @@ -103,7 +103,7 @@ define <8 x half> @vfmax_v8f16_fv(<8 x half> %a, half %b) { ; CHECK-LABEL: vfmax_v8f16_fv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement <8 x half> poison, half %b, i32 0 @@ -117,7 +117,7 @@ define <16 x half> @vfmax_v16f16_vv(<16 x half> %a, <16 x half> %b) { ; CHECK-LABEL: vfmax_v16f16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v10 ; CHECK-NEXT: ret %v = call <16 x half> @llvm.maxnum.v16f16(<16 x half> %a, <16 x half> %b) @@ -127,7 +127,7 @@ define <16 x half> @vfmax_v16f16_vf(<16 x half> %a, half %b) { ; CHECK-LABEL: vfmax_v16f16_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement <16 x half> poison, half %b, i32 0 @@ -139,7 +139,7 @@ define <16 x half> @vfmax_v16f16_fv(<16 x half> %a, half %b) { ; CHECK-LABEL: vfmax_v16f16_fv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement <16 x half> poison, half %b, i32 0 @@ -153,7 +153,7 @@ define <2 x float> @vfmax_v2f32_vv(<2 x float> %a, <2 x float> %b) { ; CHECK-LABEL: vfmax_v2f32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <2 x float> @llvm.maxnum.v2f32(<2 x float> %a, <2 x float> %b) @@ -163,7 +163,7 @@ define <2 x float> @vfmax_v2f32_vf(<2 x float> %a, float %b) { ; CHECK-LABEL: vfmax_v2f32_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement <2 x float> poison, float %b, i32 0 @@ -175,7 +175,7 @@ define <2 x float> @vfmax_v2f32_fv(<2 x float> %a, float %b) { ; CHECK-LABEL: vfmax_v2f32_fv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement <2 x float> poison, float %b, i32 0 @@ -189,7 +189,7 @@ define <4 x float> @vfmax_v4f32_vv(<4 x float> %a, <4 x float> %b) { ; CHECK-LABEL: vfmax_v4f32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <4 x float> @llvm.maxnum.v4f32(<4 x float> %a, <4 x float> %b) @@ -199,7 +199,7 @@ define <4 x float> @vfmax_v4f32_vf(<4 x float> %a, float %b) { ; CHECK-LABEL: vfmax_v4f32_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement <4 x float> poison, float %b, i32 0 @@ -211,7 +211,7 @@ define <4 x float> @vfmax_v4f32_fv(<4 x float> %a, float %b) { ; CHECK-LABEL: vfmax_v4f32_fv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement <4 x float> poison, float %b, i32 0 @@ -225,7 +225,7 @@ define <8 x float> @vfmax_v8f32_vv(<8 x float> %a, <8 x float> %b) { ; CHECK-LABEL: vfmax_v8f32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v10 ; CHECK-NEXT: ret %v = call <8 x float> @llvm.maxnum.v8f32(<8 x float> %a, <8 x float> %b) @@ -235,7 +235,7 @@ define <8 x float> @vfmax_v8f32_vf(<8 x float> %a, float %b) { ; CHECK-LABEL: vfmax_v8f32_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement <8 x float> poison, float %b, i32 0 @@ -247,7 +247,7 @@ define <8 x float> @vfmax_v8f32_fv(<8 x float> %a, float %b) { ; CHECK-LABEL: vfmax_v8f32_fv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement <8 x float> poison, float %b, i32 0 @@ -261,7 +261,7 @@ define <16 x float> @vfmax_v16f32_vv(<16 x float> %a, <16 x float> %b) { ; CHECK-LABEL: vfmax_v16f32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v12 ; CHECK-NEXT: ret %v = call <16 x float> @llvm.maxnum.v16f32(<16 x float> %a, <16 x float> %b) @@ -271,7 +271,7 @@ define <16 x float> @vfmax_v16f32_vf(<16 x float> %a, float %b) { ; CHECK-LABEL: vfmax_v16f32_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement <16 x float> poison, float %b, i32 0 @@ -283,7 +283,7 @@ define <16 x float> @vfmax_v16f32_fv(<16 x float> %a, float %b) { ; CHECK-LABEL: vfmax_v16f32_fv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement <16 x float> poison, float %b, i32 0 @@ -297,7 +297,7 @@ define <2 x double> @vfmax_v2f64_vv(<2 x double> %a, <2 x double> %b) { ; CHECK-LABEL: vfmax_v2f64_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <2 x double> @llvm.maxnum.v2f64(<2 x double> %a, <2 x double> %b) @@ -307,7 +307,7 @@ define <2 x double> @vfmax_v2f64_vf(<2 x double> %a, double %b) { ; CHECK-LABEL: vfmax_v2f64_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement <2 x double> poison, double %b, i32 0 @@ -319,7 +319,7 @@ define <2 x double> @vfmax_v2f64_fv(<2 x double> %a, double %b) { ; CHECK-LABEL: vfmax_v2f64_fv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement <2 x double> poison, double %b, i32 0 @@ -333,7 +333,7 @@ define <4 x double> @vfmax_v4f64_vv(<4 x double> %a, <4 x double> %b) { ; CHECK-LABEL: vfmax_v4f64_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v10 ; CHECK-NEXT: ret %v = call <4 x double> @llvm.maxnum.v4f64(<4 x double> %a, <4 x double> %b) @@ -343,7 +343,7 @@ define <4 x double> @vfmax_v4f64_vf(<4 x double> %a, double %b) { ; CHECK-LABEL: vfmax_v4f64_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement <4 x double> poison, double %b, i32 0 @@ -355,7 +355,7 @@ define <4 x double> @vfmax_v4f64_fv(<4 x double> %a, double %b) { ; CHECK-LABEL: vfmax_v4f64_fv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement <4 x double> poison, double %b, i32 0 @@ -369,7 +369,7 @@ define <8 x double> @vfmax_v8f64_vv(<8 x double> %a, <8 x double> %b) { ; CHECK-LABEL: vfmax_v8f64_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v12 ; CHECK-NEXT: ret %v = call <8 x double> @llvm.maxnum.v8f64(<8 x double> %a, <8 x double> %b) @@ -379,7 +379,7 @@ define <8 x double> @vfmax_v8f64_vf(<8 x double> %a, double %b) { ; CHECK-LABEL: vfmax_v8f64_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement <8 x double> poison, double %b, i32 0 @@ -391,7 +391,7 @@ define <8 x double> @vfmax_v8f64_fv(<8 x double> %a, double %b) { ; CHECK-LABEL: vfmax_v8f64_fv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement <8 x double> poison, double %b, i32 0 @@ -405,7 +405,7 @@ define <16 x double> @vfmax_v16f64_vv(<16 x double> %a, <16 x double> %b) { ; CHECK-LABEL: vfmax_v16f64_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v16 ; CHECK-NEXT: ret %v = call <16 x double> @llvm.maxnum.v16f64(<16 x double> %a, <16 x double> %b) @@ -415,7 +415,7 @@ define <16 x double> @vfmax_v16f64_vf(<16 x double> %a, double %b) { ; CHECK-LABEL: vfmax_v16f64_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement <16 x double> poison, double %b, i32 0 @@ -427,7 +427,7 @@ define <16 x double> @vfmax_v16f64_fv(<16 x double> %a, double %b) { ; CHECK-LABEL: vfmax_v16f64_fv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement <16 x double> poison, double %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll @@ -19,7 +19,7 @@ define <2 x half> @vfmin_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -43,7 +43,7 @@ define <4 x half> @vfmin_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -67,7 +67,7 @@ define <8 x half> @vfmin_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -91,7 +91,7 @@ define <16 x half> @vfmin_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -115,7 +115,7 @@ define <2 x float> @vfmin_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -139,7 +139,7 @@ define <4 x float> @vfmin_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -163,7 +163,7 @@ define <8 x float> @vfmin_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -187,7 +187,7 @@ define <16 x float> @vfmin_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -211,7 +211,7 @@ define <2 x double> @vfmin_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -235,7 +235,7 @@ define <4 x double> @vfmin_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -259,7 +259,7 @@ define <8 x double> @vfmin_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -283,7 +283,7 @@ define <15 x double> @vfmin_vv_v15f64_unmasked(<15 x double> %va, <15 x double> %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v15f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement <15 x i1> poison, i1 true, i32 0 @@ -307,7 +307,7 @@ define <16 x double> @vfmin_vv_v16f64_unmasked(<16 x double> %va, <16 x double> %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_v16f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -328,10 +328,10 @@ ; CHECK-NEXT: mul a1, a1, a3 ; CHECK-NEXT: sub sp, sp, a1 ; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v0, v0, 2 ; CHECK-NEXT: addi a1, a0, 128 -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v24, (a1) ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 @@ -388,7 +388,7 @@ ; CHECK-LABEL: vfmin_vv_v32f64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a1, a0, 128 -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v24, (a1) ; CHECK-NEXT: addi a3, a2, -16 ; CHECK-NEXT: li a1, 0 @@ -397,14 +397,14 @@ ; CHECK-NEXT: mv a1, a3 ; CHECK-NEXT: .LBB27_2: ; CHECK-NEXT: vle64.v v0, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: vfmin.vv v16, v16, v24 ; CHECK-NEXT: bltu a2, a0, .LBB27_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a2, 16 ; CHECK-NEXT: .LBB27_4: -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v0 ; CHECK-NEXT: ret %head = insertelement <32 x i1> poison, i1 true, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin.ll @@ -9,7 +9,7 @@ define <2 x half> @vfmin_v2f16_vv(<2 x half> %a, <2 x half> %b) { ; CHECK-LABEL: vfmin_v2f16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <2 x half> @llvm.minnum.v2f16(<2 x half> %a, <2 x half> %b) @@ -19,7 +19,7 @@ define <2 x half> @vfmin_v2f16_vf(<2 x half> %a, half %b) { ; CHECK-LABEL: vfmin_v2f16_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement <2 x half> poison, half %b, i32 0 @@ -31,7 +31,7 @@ define <2 x half> @vfmin_v2f16_fv(<2 x half> %a, half %b) { ; CHECK-LABEL: vfmin_v2f16_fv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement <2 x half> poison, half %b, i32 0 @@ -45,7 +45,7 @@ define <4 x half> @vfmin_v4f16_vv(<4 x half> %a, <4 x half> %b) { ; CHECK-LABEL: vfmin_v4f16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <4 x half> @llvm.minnum.v4f16(<4 x half> %a, <4 x half> %b) @@ -55,7 +55,7 @@ define <4 x half> @vfmin_v4f16_vf(<4 x half> %a, half %b) { ; CHECK-LABEL: vfmin_v4f16_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement <4 x half> poison, half %b, i32 0 @@ -67,7 +67,7 @@ define <4 x half> @vfmin_v4f16_fv(<4 x half> %a, half %b) { ; CHECK-LABEL: vfmin_v4f16_fv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement <4 x half> poison, half %b, i32 0 @@ -81,7 +81,7 @@ define <8 x half> @vfmin_v8f16_vv(<8 x half> %a, <8 x half> %b) { ; CHECK-LABEL: vfmin_v8f16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <8 x half> @llvm.minnum.v8f16(<8 x half> %a, <8 x half> %b) @@ -91,7 +91,7 @@ define <8 x half> @vfmin_v8f16_vf(<8 x half> %a, half %b) { ; CHECK-LABEL: vfmin_v8f16_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement <8 x half> poison, half %b, i32 0 @@ -103,7 +103,7 @@ define <8 x half> @vfmin_v8f16_fv(<8 x half> %a, half %b) { ; CHECK-LABEL: vfmin_v8f16_fv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement <8 x half> poison, half %b, i32 0 @@ -117,7 +117,7 @@ define <16 x half> @vfmin_v16f16_vv(<16 x half> %a, <16 x half> %b) { ; CHECK-LABEL: vfmin_v16f16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v10 ; CHECK-NEXT: ret %v = call <16 x half> @llvm.minnum.v16f16(<16 x half> %a, <16 x half> %b) @@ -127,7 +127,7 @@ define <16 x half> @vfmin_v16f16_vf(<16 x half> %a, half %b) { ; CHECK-LABEL: vfmin_v16f16_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement <16 x half> poison, half %b, i32 0 @@ -139,7 +139,7 @@ define <16 x half> @vfmin_v16f16_fv(<16 x half> %a, half %b) { ; CHECK-LABEL: vfmin_v16f16_fv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement <16 x half> poison, half %b, i32 0 @@ -153,7 +153,7 @@ define <2 x float> @vfmin_v2f32_vv(<2 x float> %a, <2 x float> %b) { ; CHECK-LABEL: vfmin_v2f32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <2 x float> @llvm.minnum.v2f32(<2 x float> %a, <2 x float> %b) @@ -163,7 +163,7 @@ define <2 x float> @vfmin_v2f32_vf(<2 x float> %a, float %b) { ; CHECK-LABEL: vfmin_v2f32_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement <2 x float> poison, float %b, i32 0 @@ -175,7 +175,7 @@ define <2 x float> @vfmin_v2f32_fv(<2 x float> %a, float %b) { ; CHECK-LABEL: vfmin_v2f32_fv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement <2 x float> poison, float %b, i32 0 @@ -189,7 +189,7 @@ define <4 x float> @vfmin_v4f32_vv(<4 x float> %a, <4 x float> %b) { ; CHECK-LABEL: vfmin_v4f32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <4 x float> @llvm.minnum.v4f32(<4 x float> %a, <4 x float> %b) @@ -199,7 +199,7 @@ define <4 x float> @vfmin_v4f32_vf(<4 x float> %a, float %b) { ; CHECK-LABEL: vfmin_v4f32_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement <4 x float> poison, float %b, i32 0 @@ -211,7 +211,7 @@ define <4 x float> @vfmin_v4f32_fv(<4 x float> %a, float %b) { ; CHECK-LABEL: vfmin_v4f32_fv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement <4 x float> poison, float %b, i32 0 @@ -225,7 +225,7 @@ define <8 x float> @vfmin_v8f32_vv(<8 x float> %a, <8 x float> %b) { ; CHECK-LABEL: vfmin_v8f32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v10 ; CHECK-NEXT: ret %v = call <8 x float> @llvm.minnum.v8f32(<8 x float> %a, <8 x float> %b) @@ -235,7 +235,7 @@ define <8 x float> @vfmin_v8f32_vf(<8 x float> %a, float %b) { ; CHECK-LABEL: vfmin_v8f32_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement <8 x float> poison, float %b, i32 0 @@ -247,7 +247,7 @@ define <8 x float> @vfmin_v8f32_fv(<8 x float> %a, float %b) { ; CHECK-LABEL: vfmin_v8f32_fv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement <8 x float> poison, float %b, i32 0 @@ -261,7 +261,7 @@ define <16 x float> @vfmin_v16f32_vv(<16 x float> %a, <16 x float> %b) { ; CHECK-LABEL: vfmin_v16f32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v12 ; CHECK-NEXT: ret %v = call <16 x float> @llvm.minnum.v16f32(<16 x float> %a, <16 x float> %b) @@ -271,7 +271,7 @@ define <16 x float> @vfmin_v16f32_vf(<16 x float> %a, float %b) { ; CHECK-LABEL: vfmin_v16f32_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement <16 x float> poison, float %b, i32 0 @@ -283,7 +283,7 @@ define <16 x float> @vfmin_v16f32_fv(<16 x float> %a, float %b) { ; CHECK-LABEL: vfmin_v16f32_fv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement <16 x float> poison, float %b, i32 0 @@ -297,7 +297,7 @@ define <2 x double> @vfmin_v2f64_vv(<2 x double> %a, <2 x double> %b) { ; CHECK-LABEL: vfmin_v2f64_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <2 x double> @llvm.minnum.v2f64(<2 x double> %a, <2 x double> %b) @@ -307,7 +307,7 @@ define <2 x double> @vfmin_v2f64_vf(<2 x double> %a, double %b) { ; CHECK-LABEL: vfmin_v2f64_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement <2 x double> poison, double %b, i32 0 @@ -319,7 +319,7 @@ define <2 x double> @vfmin_v2f64_fv(<2 x double> %a, double %b) { ; CHECK-LABEL: vfmin_v2f64_fv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement <2 x double> poison, double %b, i32 0 @@ -333,7 +333,7 @@ define <4 x double> @vfmin_v4f64_vv(<4 x double> %a, <4 x double> %b) { ; CHECK-LABEL: vfmin_v4f64_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v10 ; CHECK-NEXT: ret %v = call <4 x double> @llvm.minnum.v4f64(<4 x double> %a, <4 x double> %b) @@ -343,7 +343,7 @@ define <4 x double> @vfmin_v4f64_vf(<4 x double> %a, double %b) { ; CHECK-LABEL: vfmin_v4f64_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement <4 x double> poison, double %b, i32 0 @@ -355,7 +355,7 @@ define <4 x double> @vfmin_v4f64_fv(<4 x double> %a, double %b) { ; CHECK-LABEL: vfmin_v4f64_fv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement <4 x double> poison, double %b, i32 0 @@ -369,7 +369,7 @@ define <8 x double> @vfmin_v8f64_vv(<8 x double> %a, <8 x double> %b) { ; CHECK-LABEL: vfmin_v8f64_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v12 ; CHECK-NEXT: ret %v = call <8 x double> @llvm.minnum.v8f64(<8 x double> %a, <8 x double> %b) @@ -379,7 +379,7 @@ define <8 x double> @vfmin_v8f64_vf(<8 x double> %a, double %b) { ; CHECK-LABEL: vfmin_v8f64_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement <8 x double> poison, double %b, i32 0 @@ -391,7 +391,7 @@ define <8 x double> @vfmin_v8f64_fv(<8 x double> %a, double %b) { ; CHECK-LABEL: vfmin_v8f64_fv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement <8 x double> poison, double %b, i32 0 @@ -405,7 +405,7 @@ define <16 x double> @vfmin_v16f64_vv(<16 x double> %a, <16 x double> %b) { ; CHECK-LABEL: vfmin_v16f64_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v16 ; CHECK-NEXT: ret %v = call <16 x double> @llvm.minnum.v16f64(<16 x double> %a, <16 x double> %b) @@ -415,7 +415,7 @@ define <16 x double> @vfmin_v16f64_vf(<16 x double> %a, double %b) { ; CHECK-LABEL: vfmin_v16f64_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement <16 x double> poison, double %b, i32 0 @@ -427,7 +427,7 @@ define <16 x double> @vfmin_v16f64_fv(<16 x double> %a, double %b) { ; CHECK-LABEL: vfmin_v16f64_fv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement <16 x double> poison, double %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmsac-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmsac-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmsac-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmsac-vp.ll @@ -27,7 +27,7 @@ define <2 x half> @vfmsac_vv_v2f16_unmasked(<2 x half> %a, <2 x half> %b, <2 x half> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsac_vv_v2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfmsac.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -76,7 +76,7 @@ define <2 x half> @vfmsac_vf_v2f16_unmasked(<2 x half> %a, half %b, <2 x half> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmsac_vf_v2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfmsac.vf v9, fa0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -162,7 +162,7 @@ define <4 x half> @vfmsac_vv_v4f16_unmasked(<4 x half> %a, <4 x half> %b, <4 x half> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsac_vv_v4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfmsac.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -211,7 +211,7 @@ define <4 x half> @vfmsac_vf_v4f16_unmasked(<4 x half> %a, half %b, <4 x half> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmsac_vf_v4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfmsac.vf v9, fa0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -297,7 +297,7 @@ define <8 x half> @vfmsac_vv_v8f16_unmasked(<8 x half> %a, <8 x half> %b, <8 x half> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsac_vv_v8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfmsac.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -346,7 +346,7 @@ define <8 x half> @vfmsac_vf_v8f16_unmasked(<8 x half> %a, half %b, <8 x half> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmsac_vf_v8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfmsac.vf v9, fa0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -432,7 +432,7 @@ define <16 x half> @vfmsac_vv_v16f16_unmasked(<16 x half> %a, <16 x half> %b, <16 x half> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsac_vv_v16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfmsac.vv v12, v8, v10 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -481,7 +481,7 @@ define <16 x half> @vfmsac_vf_v16f16_unmasked(<16 x half> %a, half %b, <16 x half> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmsac_vf_v16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfmsac.vf v10, fa0, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -567,7 +567,7 @@ define <32 x half> @vfmsac_vv_v32f16_unmasked(<32 x half> %a, <32 x half> %b, <32 x half> %c, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsac_vv_v32f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfmsac.vv v16, v8, v12 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -616,7 +616,7 @@ define <32 x half> @vfmsac_vf_v32f16_unmasked(<32 x half> %a, half %b, <32 x half> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmsac_vf_v32f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfmsac.vf v12, fa0, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -702,7 +702,7 @@ define <2 x float> @vfmsac_vv_v2f32_unmasked(<2 x float> %a, <2 x float> %b, <2 x float> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsac_vv_v2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfmsac.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -751,7 +751,7 @@ define <2 x float> @vfmsac_vf_v2f32_unmasked(<2 x float> %a, float %b, <2 x float> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmsac_vf_v2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfmsac.vf v9, fa0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -837,7 +837,7 @@ define <4 x float> @vfmsac_vv_v4f32_unmasked(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsac_vv_v4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfmsac.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -886,7 +886,7 @@ define <4 x float> @vfmsac_vf_v4f32_unmasked(<4 x float> %a, float %b, <4 x float> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmsac_vf_v4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfmsac.vf v9, fa0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -972,7 +972,7 @@ define <8 x float> @vfmsac_vv_v8f32_unmasked(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsac_vv_v8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfmsac.vv v12, v8, v10 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -1021,7 +1021,7 @@ define <8 x float> @vfmsac_vf_v8f32_unmasked(<8 x float> %a, float %b, <8 x float> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmsac_vf_v8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfmsac.vf v10, fa0, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -1107,7 +1107,7 @@ define <16 x float> @vfmsac_vv_v16f32_unmasked(<16 x float> %a, <16 x float> %b, <16 x float> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsac_vv_v16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfmsac.vv v16, v8, v12 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -1156,7 +1156,7 @@ define <16 x float> @vfmsac_vf_v16f32_unmasked(<16 x float> %a, float %b, <16 x float> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmsac_vf_v16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfmsac.vf v12, fa0, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -1242,7 +1242,7 @@ define <2 x double> @vfmsac_vv_v2f64_unmasked(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsac_vv_v2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vfmsac.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1291,7 +1291,7 @@ define <2 x double> @vfmsac_vf_v2f64_unmasked(<2 x double> %a, double %b, <2 x double> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmsac_vf_v2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vfmsac.vf v9, fa0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1377,7 +1377,7 @@ define <4 x double> @vfmsac_vv_v4f64_unmasked(<4 x double> %a, <4 x double> %b, <4 x double> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsac_vv_v4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vfmsac.vv v12, v8, v10 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -1426,7 +1426,7 @@ define <4 x double> @vfmsac_vf_v4f64_unmasked(<4 x double> %a, double %b, <4 x double> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmsac_vf_v4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vfmsac.vf v10, fa0, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -1512,7 +1512,7 @@ define <8 x double> @vfmsac_vv_v8f64_unmasked(<8 x double> %a, <8 x double> %b, <8 x double> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmsac_vv_v8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vfmsac.vv v16, v8, v12 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -1561,7 +1561,7 @@ define <8 x double> @vfmsac_vf_v8f64_unmasked(<8 x double> %a, double %b, <8 x double> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmsac_vf_v8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vfmsac.vf v12, fa0, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-vp.ll @@ -19,7 +19,7 @@ define <2 x half> @vfmul_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_v2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -43,7 +43,7 @@ define <2 x half> @vfmul_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_v2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x half> poison, half %b, i32 0 @@ -81,7 +81,7 @@ define <4 x half> @vfmul_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_v4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -105,7 +105,7 @@ define <4 x half> @vfmul_vf_v4f16_unmasked(<4 x half> %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_v4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x half> poison, half %b, i32 0 @@ -131,7 +131,7 @@ define <8 x half> @vfmul_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_v8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -155,7 +155,7 @@ define <8 x half> @vfmul_vf_v8f16_unmasked(<8 x half> %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_v8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x half> poison, half %b, i32 0 @@ -181,7 +181,7 @@ define <16 x half> @vfmul_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_v16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -205,7 +205,7 @@ define <16 x half> @vfmul_vf_v16f16_unmasked(<16 x half> %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_v16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x half> poison, half %b, i32 0 @@ -231,7 +231,7 @@ define <2 x float> @vfmul_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_v2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -255,7 +255,7 @@ define <2 x float> @vfmul_vf_v2f32_unmasked(<2 x float> %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_v2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x float> poison, float %b, i32 0 @@ -281,7 +281,7 @@ define <4 x float> @vfmul_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_v4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -305,7 +305,7 @@ define <4 x float> @vfmul_vf_v4f32_unmasked(<4 x float> %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_v4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x float> poison, float %b, i32 0 @@ -331,7 +331,7 @@ define <8 x float> @vfmul_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_v8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -355,7 +355,7 @@ define <8 x float> @vfmul_vf_v8f32_unmasked(<8 x float> %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_v8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x float> poison, float %b, i32 0 @@ -381,7 +381,7 @@ define <16 x float> @vfmul_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_v16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -405,7 +405,7 @@ define <16 x float> @vfmul_vf_v16f32_unmasked(<16 x float> %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_v16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x float> poison, float %b, i32 0 @@ -431,7 +431,7 @@ define <2 x double> @vfmul_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_v2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -455,7 +455,7 @@ define <2 x double> @vfmul_vf_v2f64_unmasked(<2 x double> %va, double %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_v2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x double> poison, double %b, i32 0 @@ -481,7 +481,7 @@ define <4 x double> @vfmul_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_v4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -505,7 +505,7 @@ define <4 x double> @vfmul_vf_v4f64_unmasked(<4 x double> %va, double %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_v4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x double> poison, double %b, i32 0 @@ -531,7 +531,7 @@ define <8 x double> @vfmul_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_v8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -555,7 +555,7 @@ define <8 x double> @vfmul_vf_v8f64_unmasked(<8 x double> %va, double %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_v8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x double> poison, double %b, i32 0 @@ -581,7 +581,7 @@ define <16 x double> @vfmul_vv_v16f64_unmasked(<16 x double> %va, <16 x double> %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_v16f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -605,7 +605,7 @@ define <16 x double> @vfmul_vf_v16f64_unmasked(<16 x double> %va, double %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_v16f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x double> poison, double %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll @@ -20,7 +20,7 @@ define <2 x half> @vfma_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %b, <2 x half> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -44,7 +44,7 @@ define <2 x half> @vfma_vf_v2f16_unmasked(<2 x half> %va, half %b, <2 x half> %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_v2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement <2 x half> poison, half %b, i32 0 @@ -71,7 +71,7 @@ define <4 x half> @vfma_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %b, <4 x half> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -95,7 +95,7 @@ define <4 x half> @vfma_vf_v4f16_unmasked(<4 x half> %va, half %b, <4 x half> %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_v4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement <4 x half> poison, half %b, i32 0 @@ -122,7 +122,7 @@ define <8 x half> @vfma_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %b, <8 x half> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -146,7 +146,7 @@ define <8 x half> @vfma_vf_v8f16_unmasked(<8 x half> %va, half %b, <8 x half> %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_v8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement <8 x half> poison, half %b, i32 0 @@ -173,7 +173,7 @@ define <16 x half> @vfma_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %b, <16 x half> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v10, v12 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -197,7 +197,7 @@ define <16 x half> @vfma_vf_v16f16_unmasked(<16 x half> %va, half %b, <16 x half> %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_v16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement <16 x half> poison, half %b, i32 0 @@ -224,7 +224,7 @@ define <2 x float> @vfma_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %b, <2 x float> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -248,7 +248,7 @@ define <2 x float> @vfma_vf_v2f32_unmasked(<2 x float> %va, float %b, <2 x float> %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_v2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement <2 x float> poison, float %b, i32 0 @@ -275,7 +275,7 @@ define <4 x float> @vfma_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %b, <4 x float> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -299,7 +299,7 @@ define <4 x float> @vfma_vf_v4f32_unmasked(<4 x float> %va, float %b, <4 x float> %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_v4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement <4 x float> poison, float %b, i32 0 @@ -326,7 +326,7 @@ define <8 x float> @vfma_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %b, <8 x float> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v10, v12 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -350,7 +350,7 @@ define <8 x float> @vfma_vf_v8f32_unmasked(<8 x float> %va, float %b, <8 x float> %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_v8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement <8 x float> poison, float %b, i32 0 @@ -377,7 +377,7 @@ define <16 x float> @vfma_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %b, <16 x float> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v12, v16 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -401,7 +401,7 @@ define <16 x float> @vfma_vf_v16f32_unmasked(<16 x float> %va, float %b, <16 x float> %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_v16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement <16 x float> poison, float %b, i32 0 @@ -428,7 +428,7 @@ define <2 x double> @vfma_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %b, <2 x double> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -452,7 +452,7 @@ define <2 x double> @vfma_vf_v2f64_unmasked(<2 x double> %va, double %b, <2 x double> %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_v2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement <2 x double> poison, double %b, i32 0 @@ -479,7 +479,7 @@ define <4 x double> @vfma_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %b, <4 x double> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v10, v12 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -503,7 +503,7 @@ define <4 x double> @vfma_vf_v4f64_unmasked(<4 x double> %va, double %b, <4 x double> %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_v4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement <4 x double> poison, double %b, i32 0 @@ -530,7 +530,7 @@ define <8 x double> @vfma_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %b, <8 x double> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v12, v16 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -554,7 +554,7 @@ define <8 x double> @vfma_vf_v8f64_unmasked(<8 x double> %va, double %b, <8 x double> %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_v8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement <8 x double> poison, double %b, i32 0 @@ -570,7 +570,7 @@ define <15 x double> @vfma_vv_v15f64(<15 x double> %va, <15 x double> %b, <15 x double> %c, <15 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v15f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vfmadd.vv v16, v8, v24, v0.t @@ -583,9 +583,9 @@ define <15 x double> @vfma_vv_v15f64_unmasked(<15 x double> %va, <15 x double> %b, <15 x double> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v15f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v16, v24 ; CHECK-NEXT: ret %head = insertelement <15 x i1> poison, i1 true, i32 0 @@ -599,7 +599,7 @@ define <16 x double> @vfma_vv_v16f64(<16 x double> %va, <16 x double> %b, <16 x double> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v16f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vfmadd.vv v16, v8, v24, v0.t @@ -612,9 +612,9 @@ define <16 x double> @vfma_vv_v16f64_unmasked(<16 x double> %va, <16 x double> %b, <16 x double> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_v16f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v16, v24 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -638,7 +638,7 @@ define <16 x double> @vfma_vf_v16f64_unmasked(<16 x double> %va, double %b, <16 x double> %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_v16f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement <16 x double> poison, double %b, i32 0 @@ -661,10 +661,10 @@ ; CHECK-NEXT: mul a1, a1, a3 ; CHECK-NEXT: sub sp, sp, a1 ; CHECK-NEXT: vmv1r.v v1, v0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v0, v0, 2 ; CHECK-NEXT: addi a1, a2, 128 -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v24, (a1) ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: li a3, 24 @@ -777,7 +777,7 @@ ; CHECK-NEXT: mul a1, a1, a3 ; CHECK-NEXT: sub sp, sp, a1 ; CHECK-NEXT: addi a1, a2, 128 -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v24, (a1) ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 @@ -801,7 +801,7 @@ ; CHECK-NEXT: addi a2, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill ; CHECK-NEXT: vle64.v v0, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 @@ -813,7 +813,7 @@ ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a4, 16 ; CHECK-NEXT: .LBB51_4: -; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add a0, sp, a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll @@ -19,7 +19,7 @@ define <2 x half> @vfneg_vv_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_v2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -43,7 +43,7 @@ define <4 x half> @vfneg_vv_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_v4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -67,7 +67,7 @@ define <8 x half> @vfneg_vv_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_v8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -91,7 +91,7 @@ define <16 x half> @vfneg_vv_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_v16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -115,7 +115,7 @@ define <2 x float> @vfneg_vv_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_v2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -139,7 +139,7 @@ define <4 x float> @vfneg_vv_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_v4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -163,7 +163,7 @@ define <8 x float> @vfneg_vv_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_v8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -187,7 +187,7 @@ define <16 x float> @vfneg_vv_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_v16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -211,7 +211,7 @@ define <2 x double> @vfneg_vv_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_v2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -235,7 +235,7 @@ define <4 x double> @vfneg_vv_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_v4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -259,7 +259,7 @@ define <8 x double> @vfneg_vv_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_v8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -283,7 +283,7 @@ define <15 x double> @vfneg_vv_v15f64_unmasked(<15 x double> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_v15f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <15 x i1> poison, i1 true, i32 0 @@ -307,7 +307,7 @@ define <16 x double> @vfneg_vv_v16f64_unmasked(<16 x double> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_v16f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -323,7 +323,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: addi a2, a0, -16 ; CHECK-NEXT: vslidedown.vi v0, v0, 2 ; CHECK-NEXT: bltu a0, a2, .LBB26_2 @@ -354,14 +354,14 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a2, a1 ; CHECK-NEXT: .LBB27_2: -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: li a1, 16 ; CHECK-NEXT: vfneg.v v16, v16 ; CHECK-NEXT: bltu a0, a1, .LBB27_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: .LBB27_4: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <32 x i1> poison, i1 true, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmacc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmacc-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmacc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmacc-vp.ll @@ -28,7 +28,7 @@ define <2 x half> @vfnmacc_vv_v2f16_unmasked(<2 x half> %a, <2 x half> %b, <2 x half> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_v2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfnmacc.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -80,7 +80,7 @@ define <2 x half> @vfnmacc_vf_v2f16_unmasked(<2 x half> %a, half %b, <2 x half> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vf_v2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfnmacc.vf v9, fa0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -171,7 +171,7 @@ define <4 x half> @vfnmacc_vv_v4f16_unmasked(<4 x half> %a, <4 x half> %b, <4 x half> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_v4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfnmacc.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -223,7 +223,7 @@ define <4 x half> @vfnmacc_vf_v4f16_unmasked(<4 x half> %a, half %b, <4 x half> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vf_v4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfnmacc.vf v9, fa0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -314,7 +314,7 @@ define <8 x half> @vfnmacc_vv_v8f16_unmasked(<8 x half> %a, <8 x half> %b, <8 x half> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_v8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfnmacc.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -366,7 +366,7 @@ define <8 x half> @vfnmacc_vf_v8f16_unmasked(<8 x half> %a, half %b, <8 x half> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vf_v8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfnmacc.vf v9, fa0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -457,7 +457,7 @@ define <16 x half> @vfnmacc_vv_v16f16_unmasked(<16 x half> %a, <16 x half> %b, <16 x half> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_v16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfnmacc.vv v12, v8, v10 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -509,7 +509,7 @@ define <16 x half> @vfnmacc_vf_v16f16_unmasked(<16 x half> %a, half %b, <16 x half> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vf_v16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfnmacc.vf v10, fa0, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -600,7 +600,7 @@ define <32 x half> @vfnmacc_vv_v32f16_unmasked(<32 x half> %a, <32 x half> %b, <32 x half> %c, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_v32f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfnmacc.vv v16, v8, v12 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -652,7 +652,7 @@ define <32 x half> @vfnmacc_vf_v32f16_unmasked(<32 x half> %a, half %b, <32 x half> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vf_v32f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfnmacc.vf v12, fa0, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -743,7 +743,7 @@ define <2 x float> @vfnmacc_vv_v2f32_unmasked(<2 x float> %a, <2 x float> %b, <2 x float> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_v2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfnmacc.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -795,7 +795,7 @@ define <2 x float> @vfnmacc_vf_v2f32_unmasked(<2 x float> %a, float %b, <2 x float> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vf_v2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfnmacc.vf v9, fa0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -886,7 +886,7 @@ define <4 x float> @vfnmacc_vv_v4f32_unmasked(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_v4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfnmacc.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -938,7 +938,7 @@ define <4 x float> @vfnmacc_vf_v4f32_unmasked(<4 x float> %a, float %b, <4 x float> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vf_v4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfnmacc.vf v9, fa0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1029,7 +1029,7 @@ define <8 x float> @vfnmacc_vv_v8f32_unmasked(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_v8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfnmacc.vv v12, v8, v10 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -1081,7 +1081,7 @@ define <8 x float> @vfnmacc_vf_v8f32_unmasked(<8 x float> %a, float %b, <8 x float> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vf_v8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfnmacc.vf v10, fa0, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -1172,7 +1172,7 @@ define <16 x float> @vfnmacc_vv_v16f32_unmasked(<16 x float> %a, <16 x float> %b, <16 x float> %c, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_v16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfnmacc.vv v16, v8, v12 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -1224,7 +1224,7 @@ define <16 x float> @vfnmacc_vf_v16f32_unmasked(<16 x float> %a, float %b, <16 x float> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vf_v16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfnmacc.vf v12, fa0, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -1315,7 +1315,7 @@ define <2 x double> @vfnmacc_vv_v2f64_unmasked(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_v2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vfnmacc.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1367,7 +1367,7 @@ define <2 x double> @vfnmacc_vf_v2f64_unmasked(<2 x double> %a, double %b, <2 x double> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vf_v2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vfnmacc.vf v9, fa0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1458,7 +1458,7 @@ define <4 x double> @vfnmacc_vv_v4f64_unmasked(<4 x double> %a, <4 x double> %b, <4 x double> %c, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_v4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vfnmacc.vv v12, v8, v10 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -1510,7 +1510,7 @@ define <4 x double> @vfnmacc_vf_v4f64_unmasked(<4 x double> %a, double %b, <4 x double> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vf_v4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vfnmacc.vf v10, fa0, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -1601,7 +1601,7 @@ define <8 x double> @vfnmacc_vv_v8f64_unmasked(<8 x double> %a, <8 x double> %b, <8 x double> %c, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_v8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vfnmacc.vv v16, v8, v12 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -1653,7 +1653,7 @@ define <8 x double> @vfnmacc_vf_v8f64_unmasked(<8 x double> %a, double %b, <8 x double> %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vf_v8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vfnmacc.vf v12, fa0, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmsac-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmsac-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmsac-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmsac-vp.ll @@ -27,7 +27,7 @@ define @vfnmsac_vv_nxv1f16_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv1f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfnmsac.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -76,7 +76,7 @@ define @vfnmsac_vf_nxv1f16_unmasked( %a, half %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vf_nxv1f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfnmsac.vf v9, fa0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -162,7 +162,7 @@ define @vfnmsac_vv_nxv2f16_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfnmsac.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -211,7 +211,7 @@ define @vfnmsac_vf_nxv2f16_unmasked( %a, half %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vf_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfnmsac.vf v9, fa0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -297,7 +297,7 @@ define @vfnmsac_vv_nxv4f16_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfnmsac.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -346,7 +346,7 @@ define @vfnmsac_vf_nxv4f16_unmasked( %a, half %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vf_nxv4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfnmsac.vf v9, fa0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -432,7 +432,7 @@ define @vfnmsac_vv_nxv8f16_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfnmsac.vv v12, v8, v10 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -481,7 +481,7 @@ define @vfnmsac_vf_nxv8f16_unmasked( %a, half %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vf_nxv8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfnmsac.vf v10, fa0, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -567,7 +567,7 @@ define @vfnmsac_vv_nxv16f16_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfnmsac.vv v16, v8, v12 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -616,7 +616,7 @@ define @vfnmsac_vf_nxv16f16_unmasked( %a, half %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vf_nxv16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfnmsac.vf v12, fa0, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -704,7 +704,7 @@ ; CHECK-LABEL: vfnmsac_vv_nxv32f16_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, ma ; CHECK-NEXT: vfnmsac.vv v24, v8, v16 ; CHECK-NEXT: vmv8r.v v8, v24 ; CHECK-NEXT: ret @@ -753,7 +753,7 @@ define @vfnmsac_vf_nxv32f16_unmasked( %a, half %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vf_nxv32f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vfnmsac.vf v16, fa0, v8 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -840,7 +840,7 @@ define @vfnmsac_vv_nxv1f32_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv1f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfnmsac.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -889,7 +889,7 @@ define @vfnmsac_vf_nxv1f32_unmasked( %a, float %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vf_nxv1f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfnmsac.vf v9, fa0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -975,7 +975,7 @@ define @vfnmsac_vv_nxv2f32_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfnmsac.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1024,7 +1024,7 @@ define @vfnmsac_vf_nxv2f32_unmasked( %a, float %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vf_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfnmsac.vf v9, fa0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1110,7 +1110,7 @@ define @vfnmsac_vv_nxv4f32_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfnmsac.vv v12, v8, v10 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -1159,7 +1159,7 @@ define @vfnmsac_vf_nxv4f32_unmasked( %a, float %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vf_nxv4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfnmsac.vf v10, fa0, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -1245,7 +1245,7 @@ define @vfnmsac_vv_nxv8f32_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfnmsac.vv v16, v8, v12 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -1294,7 +1294,7 @@ define @vfnmsac_vf_nxv8f32_unmasked( %a, float %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vf_nxv8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfnmsac.vf v12, fa0, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -1382,7 +1382,7 @@ ; CHECK-LABEL: vfnmsac_vv_nxv16f32_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma ; CHECK-NEXT: vfnmsac.vv v24, v8, v16 ; CHECK-NEXT: vmv8r.v v8, v24 ; CHECK-NEXT: ret @@ -1431,7 +1431,7 @@ define @vfnmsac_vf_nxv16f32_unmasked( %a, float %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vf_nxv16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vfnmsac.vf v16, fa0, v8 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -1518,7 +1518,7 @@ define @vfnmsac_vv_nxv1f64_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv1f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vfnmsac.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1567,7 +1567,7 @@ define @vfnmsac_vf_nxv1f64_unmasked( %a, double %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vf_nxv1f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vfnmsac.vf v9, fa0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1653,7 +1653,7 @@ define @vfnmsac_vv_nxv2f64_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vfnmsac.vv v12, v8, v10 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -1702,7 +1702,7 @@ define @vfnmsac_vf_nxv2f64_unmasked( %a, double %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vf_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vfnmsac.vf v10, fa0, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -1788,7 +1788,7 @@ define @vfnmsac_vv_nxv4f64_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vfnmsac.vv v16, v8, v12 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -1837,7 +1837,7 @@ define @vfnmsac_vf_nxv4f64_unmasked( %a, double %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vf_nxv4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vfnmsac.vf v12, fa0, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -1925,7 +1925,7 @@ ; CHECK-LABEL: vfnmsac_vv_nxv8f64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, ma ; CHECK-NEXT: vfnmsac.vv v24, v8, v16 ; CHECK-NEXT: vmv8r.v v8, v24 ; CHECK-NEXT: ret @@ -1974,7 +1974,7 @@ define @vfnmsac_vf_nxv8f64_unmasked( %a, double %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vf_nxv8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vfnmsac.vf v16, fa0, v8 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrdiv-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrdiv-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrdiv-vp.ll @@ -21,7 +21,7 @@ define <2 x half> @vfrdiv_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_v2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x half> poison, half %b, i32 0 @@ -49,7 +49,7 @@ define <4 x half> @vfrdiv_vf_v4f16_unmasked(<4 x half> %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_v4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x half> poison, half %b, i32 0 @@ -77,7 +77,7 @@ define <8 x half> @vfrdiv_vf_v8f16_unmasked(<8 x half> %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_v8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x half> poison, half %b, i32 0 @@ -105,7 +105,7 @@ define <16 x half> @vfrdiv_vf_v16f16_unmasked(<16 x half> %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_v16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x half> poison, half %b, i32 0 @@ -133,7 +133,7 @@ define <2 x float> @vfrdiv_vf_v2f32_unmasked(<2 x float> %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_v2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x float> poison, float %b, i32 0 @@ -161,7 +161,7 @@ define <4 x float> @vfrdiv_vf_v4f32_unmasked(<4 x float> %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_v4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x float> poison, float %b, i32 0 @@ -189,7 +189,7 @@ define <8 x float> @vfrdiv_vf_v8f32_unmasked(<8 x float> %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_v8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x float> poison, float %b, i32 0 @@ -217,7 +217,7 @@ define <16 x float> @vfrdiv_vf_v16f32_unmasked(<16 x float> %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_v16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x float> poison, float %b, i32 0 @@ -245,7 +245,7 @@ define <2 x double> @vfrdiv_vf_v2f64_unmasked(<2 x double> %va, double %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_v2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x double> poison, double %b, i32 0 @@ -273,7 +273,7 @@ define <4 x double> @vfrdiv_vf_v4f64_unmasked(<4 x double> %va, double %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_v4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x double> poison, double %b, i32 0 @@ -301,7 +301,7 @@ define <8 x double> @vfrdiv_vf_v8f64_unmasked(<8 x double> %va, double %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_v8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x double> poison, double %b, i32 0 @@ -329,7 +329,7 @@ define <16 x double> @vfrdiv_vf_v16f64_unmasked(<16 x double> %va, double %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_v16f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x double> poison, double %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrsub-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrsub-vp.ll @@ -21,7 +21,7 @@ define <2 x half> @vfrsub_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_v2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x half> poison, half %b, i32 0 @@ -49,7 +49,7 @@ define <4 x half> @vfrsub_vf_v4f16_unmasked(<4 x half> %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_v4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x half> poison, half %b, i32 0 @@ -77,7 +77,7 @@ define <8 x half> @vfrsub_vf_v8f16_unmasked(<8 x half> %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_v8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x half> poison, half %b, i32 0 @@ -105,7 +105,7 @@ define <16 x half> @vfrsub_vf_v16f16_unmasked(<16 x half> %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_v16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x half> poison, half %b, i32 0 @@ -133,7 +133,7 @@ define <2 x float> @vfrsub_vf_v2f32_unmasked(<2 x float> %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_v2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x float> poison, float %b, i32 0 @@ -161,7 +161,7 @@ define <4 x float> @vfrsub_vf_v4f32_unmasked(<4 x float> %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_v4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x float> poison, float %b, i32 0 @@ -189,7 +189,7 @@ define <8 x float> @vfrsub_vf_v8f32_unmasked(<8 x float> %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_v8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x float> poison, float %b, i32 0 @@ -217,7 +217,7 @@ define <16 x float> @vfrsub_vf_v16f32_unmasked(<16 x float> %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_v16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x float> poison, float %b, i32 0 @@ -245,7 +245,7 @@ define <2 x double> @vfrsub_vf_v2f64_unmasked(<2 x double> %va, double %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_v2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x double> poison, double %b, i32 0 @@ -273,7 +273,7 @@ define <4 x double> @vfrsub_vf_v4f64_unmasked(<4 x double> %va, double %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_v4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x double> poison, double %b, i32 0 @@ -301,7 +301,7 @@ define <8 x double> @vfrsub_vf_v8f64_unmasked(<8 x double> %va, double %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_v8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x double> poison, double %b, i32 0 @@ -329,7 +329,7 @@ define <16 x double> @vfrsub_vf_v16f64_unmasked(<16 x double> %va, double %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_v16f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x double> poison, double %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-vp.ll @@ -19,7 +19,7 @@ define <2 x half> @vfsqrt_vv_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_v2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -43,7 +43,7 @@ define <4 x half> @vfsqrt_vv_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_v4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -67,7 +67,7 @@ define <8 x half> @vfsqrt_vv_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_v8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -91,7 +91,7 @@ define <16 x half> @vfsqrt_vv_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_v16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -115,7 +115,7 @@ define <2 x float> @vfsqrt_vv_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_v2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -139,7 +139,7 @@ define <4 x float> @vfsqrt_vv_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_v4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -163,7 +163,7 @@ define <8 x float> @vfsqrt_vv_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_v8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -187,7 +187,7 @@ define <16 x float> @vfsqrt_vv_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_v16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -211,7 +211,7 @@ define <2 x double> @vfsqrt_vv_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_v2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -235,7 +235,7 @@ define <4 x double> @vfsqrt_vv_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_v4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -259,7 +259,7 @@ define <8 x double> @vfsqrt_vv_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_v8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -283,7 +283,7 @@ define <15 x double> @vfsqrt_vv_v15f64_unmasked(<15 x double> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_v15f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <15 x i1> poison, i1 true, i32 0 @@ -307,7 +307,7 @@ define <16 x double> @vfsqrt_vv_v16f64_unmasked(<16 x double> %va, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_v16f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -323,7 +323,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: addi a2, a0, -16 ; CHECK-NEXT: vslidedown.vi v0, v0, 2 ; CHECK-NEXT: bltu a0, a2, .LBB26_2 @@ -354,14 +354,14 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a2, a1 ; CHECK-NEXT: .LBB27_2: -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: li a1, 16 ; CHECK-NEXT: vfsqrt.v v16, v16 ; CHECK-NEXT: bltu a0, a1, .LBB27_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: .LBB27_4: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret %head = insertelement <32 x i1> poison, i1 true, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-vp.ll @@ -19,7 +19,7 @@ define <2 x half> @vfsub_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_v2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -43,7 +43,7 @@ define <2 x half> @vfsub_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_v2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x half> poison, half %b, i32 0 @@ -81,7 +81,7 @@ define <4 x half> @vfsub_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_v4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -105,7 +105,7 @@ define <4 x half> @vfsub_vf_v4f16_unmasked(<4 x half> %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_v4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x half> poison, half %b, i32 0 @@ -131,7 +131,7 @@ define <8 x half> @vfsub_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_v8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -155,7 +155,7 @@ define <8 x half> @vfsub_vf_v8f16_unmasked(<8 x half> %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_v8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x half> poison, half %b, i32 0 @@ -181,7 +181,7 @@ define <16 x half> @vfsub_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_v16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -205,7 +205,7 @@ define <16 x half> @vfsub_vf_v16f16_unmasked(<16 x half> %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_v16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x half> poison, half %b, i32 0 @@ -231,7 +231,7 @@ define <2 x float> @vfsub_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_v2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -255,7 +255,7 @@ define <2 x float> @vfsub_vf_v2f32_unmasked(<2 x float> %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_v2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x float> poison, float %b, i32 0 @@ -281,7 +281,7 @@ define <4 x float> @vfsub_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_v4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -305,7 +305,7 @@ define <4 x float> @vfsub_vf_v4f32_unmasked(<4 x float> %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_v4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x float> poison, float %b, i32 0 @@ -331,7 +331,7 @@ define <8 x float> @vfsub_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_v8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -355,7 +355,7 @@ define <8 x float> @vfsub_vf_v8f32_unmasked(<8 x float> %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_v8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x float> poison, float %b, i32 0 @@ -381,7 +381,7 @@ define <16 x float> @vfsub_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_v16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -405,7 +405,7 @@ define <16 x float> @vfsub_vf_v16f32_unmasked(<16 x float> %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_v16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x float> poison, float %b, i32 0 @@ -431,7 +431,7 @@ define <2 x double> @vfsub_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_v2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -455,7 +455,7 @@ define <2 x double> @vfsub_vf_v2f64_unmasked(<2 x double> %va, double %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_v2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x double> poison, double %b, i32 0 @@ -481,7 +481,7 @@ define <4 x double> @vfsub_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_v4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -505,7 +505,7 @@ define <4 x double> @vfsub_vf_v4f64_unmasked(<4 x double> %va, double %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_v4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x double> poison, double %b, i32 0 @@ -531,7 +531,7 @@ define <8 x double> @vfsub_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_v8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -555,7 +555,7 @@ define <8 x double> @vfsub_vf_v8f64_unmasked(<8 x double> %va, double %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_v8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x double> poison, double %b, i32 0 @@ -581,7 +581,7 @@ define <16 x double> @vfsub_vv_v16f64_unmasked(<16 x double> %va, <16 x double> %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_v16f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -605,7 +605,7 @@ define <16 x double> @vfsub_vf_v16f64_unmasked(<16 x double> %va, double %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_v16f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x double> poison, double %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll @@ -7,7 +7,7 @@ define <2 x float> @vfwadd_v2f16(<2 x half> *%x, <2 x half> *%y) { ; CHECK-LABEL: vfwadd_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vle16.v v10, (a1) ; CHECK-NEXT: vfwadd.vv v8, v9, v10 @@ -23,7 +23,7 @@ define <4 x float> @vfwadd_v4f16(<4 x half> *%x, <4 x half> *%y) { ; CHECK-LABEL: vfwadd_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vle16.v v10, (a1) ; CHECK-NEXT: vfwadd.vv v8, v9, v10 @@ -39,7 +39,7 @@ define <8 x float> @vfwadd_v8f16(<8 x half> *%x, <8 x half> *%y) { ; CHECK-LABEL: vfwadd_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v10, (a0) ; CHECK-NEXT: vle16.v v11, (a1) ; CHECK-NEXT: vfwadd.vv v8, v10, v11 @@ -55,7 +55,7 @@ define <16 x float> @vfwadd_v16f16(<16 x half> *%x, <16 x half> *%y) { ; CHECK-LABEL: vfwadd_v16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v12, (a0) ; CHECK-NEXT: vle16.v v14, (a1) ; CHECK-NEXT: vfwadd.vv v8, v12, v14 @@ -72,7 +72,7 @@ ; CHECK-LABEL: vfwadd_v32f16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vle16.v v20, (a1) ; CHECK-NEXT: vfwadd.vv v8, v16, v20 @@ -94,16 +94,16 @@ ; CHECK-NEXT: slli a2, a2, 3 ; CHECK-NEXT: sub sp, sp, a2 ; CHECK-NEXT: li a2, 64 -; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vle16.v v24, (a1) ; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v16, a0 ; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: vslidedown.vx v0, v24, a0 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfwadd.vv v8, v16, v24 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload @@ -124,7 +124,7 @@ define <2 x double> @vfwadd_v2f32(<2 x float> *%x, <2 x float> *%y) { ; CHECK-LABEL: vfwadd_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vle32.v v10, (a1) ; CHECK-NEXT: vfwadd.vv v8, v9, v10 @@ -140,7 +140,7 @@ define <4 x double> @vfwadd_v4f32(<4 x float> *%x, <4 x float> *%y) { ; CHECK-LABEL: vfwadd_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v10, (a0) ; CHECK-NEXT: vle32.v v11, (a1) ; CHECK-NEXT: vfwadd.vv v8, v10, v11 @@ -156,7 +156,7 @@ define <8 x double> @vfwadd_v8f32(<8 x float> *%x, <8 x float> *%y) { ; CHECK-LABEL: vfwadd_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v12, (a0) ; CHECK-NEXT: vle32.v v14, (a1) ; CHECK-NEXT: vfwadd.vv v8, v12, v14 @@ -172,7 +172,7 @@ define <16 x double> @vfwadd_v16f32(<16 x float> *%x, <16 x float> *%y) { ; CHECK-LABEL: vfwadd_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vle32.v v20, (a1) ; CHECK-NEXT: vfwadd.vv v8, v16, v20 @@ -194,15 +194,15 @@ ; CHECK-NEXT: slli a2, a2, 3 ; CHECK-NEXT: sub sp, sp, a2 ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vle32.v v24, (a1) -; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v16, 16 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill ; CHECK-NEXT: vslidedown.vi v0, v24, 16 -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vfwadd.vv v8, v16, v24 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload @@ -223,7 +223,7 @@ define <2 x float> @vfwadd_vf_v2f16(<2 x half>* %x, half %y) { ; CHECK-LABEL: vfwadd_vf_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vfwadd.vf v8, v9, fa0 ; CHECK-NEXT: ret @@ -239,7 +239,7 @@ define <4 x float> @vfwadd_vf_v4f16(<4 x half>* %x, half %y) { ; CHECK-LABEL: vfwadd_vf_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vfwadd.vf v8, v9, fa0 ; CHECK-NEXT: ret @@ -255,7 +255,7 @@ define <8 x float> @vfwadd_vf_v8f16(<8 x half>* %x, half %y) { ; CHECK-LABEL: vfwadd_vf_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v10, (a0) ; CHECK-NEXT: vfwadd.vf v8, v10, fa0 ; CHECK-NEXT: ret @@ -271,7 +271,7 @@ define <16 x float> @vfwadd_vf_v16f16(<16 x half>* %x, half %y) { ; CHECK-LABEL: vfwadd_vf_v16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v12, (a0) ; CHECK-NEXT: vfwadd.vf v8, v12, fa0 ; CHECK-NEXT: ret @@ -288,7 +288,7 @@ ; CHECK-LABEL: vfwadd_vf_v32f16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vfwadd.vf v8, v16, fa0 ; CHECK-NEXT: ret @@ -304,7 +304,7 @@ define <2 x double> @vfwadd_vf_v2f32(<2 x float>* %x, float %y) { ; CHECK-LABEL: vfwadd_vf_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vfwadd.vf v8, v9, fa0 ; CHECK-NEXT: ret @@ -320,7 +320,7 @@ define <4 x double> @vfwadd_vf_v4f32(<4 x float>* %x, float %y) { ; CHECK-LABEL: vfwadd_vf_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v10, (a0) ; CHECK-NEXT: vfwadd.vf v8, v10, fa0 ; CHECK-NEXT: ret @@ -336,7 +336,7 @@ define <8 x double> @vfwadd_vf_v8f32(<8 x float>* %x, float %y) { ; CHECK-LABEL: vfwadd_vf_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v12, (a0) ; CHECK-NEXT: vfwadd.vf v8, v12, fa0 ; CHECK-NEXT: ret @@ -352,7 +352,7 @@ define <16 x double> @vfwadd_vf_v16f32(<16 x float>* %x, float %y) { ; CHECK-LABEL: vfwadd_vf_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vfwadd.vf v8, v16, fa0 ; CHECK-NEXT: ret @@ -369,11 +369,11 @@ ; CHECK-LABEL: vfwadd_vf_v32f32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v24, (a0) -; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v0, v24, 16 -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vfmv.v.f v8, fa0 ; CHECK-NEXT: vfwcvt.f.f.v v16, v8 ; CHECK-NEXT: vfwadd.wv v8, v16, v24 @@ -391,7 +391,7 @@ define <2 x float> @vfwadd_wv_v2f16(<2 x float> *%x, <2 x half> *%y) { ; CHECK-LABEL: vfwadd_wv_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vfwadd.wv v8, v8, v9 @@ -406,7 +406,7 @@ define <4 x float> @vfwadd_wv_v4f16(<4 x float> *%x, <4 x half> *%y) { ; CHECK-LABEL: vfwadd_wv_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vfwadd.wv v8, v8, v9 @@ -421,7 +421,7 @@ define <8 x float> @vfwadd_wv_v8f16(<8 x float> *%x, <8 x half> *%y) { ; CHECK-LABEL: vfwadd_wv_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle16.v v10, (a1) ; CHECK-NEXT: vfwadd.wv v8, v8, v10 @@ -436,7 +436,7 @@ define <16 x float> @vfwadd_wv_v16f16(<16 x float> *%x, <16 x half> *%y) { ; CHECK-LABEL: vfwadd_wv_v16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle16.v v12, (a1) ; CHECK-NEXT: vfwadd.wv v8, v8, v12 @@ -452,7 +452,7 @@ ; CHECK-LABEL: vfwadd_wv_v32f16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle16.v v16, (a1) ; CHECK-NEXT: vfwadd.wv v8, v8, v16 @@ -467,7 +467,7 @@ define <2 x double> @vfwadd_wv_v2f32(<2 x double> *%x, <2 x float> *%y) { ; CHECK-LABEL: vfwadd_wv_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vfwadd.wv v8, v8, v9 @@ -482,7 +482,7 @@ define <4 x double> @vfwadd_wv_v4f32(<4 x double> *%x, <4 x float> *%y) { ; CHECK-LABEL: vfwadd_wv_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle32.v v10, (a1) ; CHECK-NEXT: vfwadd.wv v8, v8, v10 @@ -497,7 +497,7 @@ define <8 x double> @vfwadd_wv_v8f32(<8 x double> *%x, <8 x float> *%y) { ; CHECK-LABEL: vfwadd_wv_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle32.v v12, (a1) ; CHECK-NEXT: vfwadd.wv v8, v8, v12 @@ -512,7 +512,7 @@ define <16 x double> @vfwadd_wv_v16f32(<16 x double> *%x, <16 x float> *%y) { ; CHECK-LABEL: vfwadd_wv_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle32.v v16, (a1) ; CHECK-NEXT: vfwadd.wv v8, v8, v16 @@ -527,7 +527,7 @@ define <2 x float> @vfwadd_wf_v2f16(<2 x float>* %x, half %y) { ; CHECK-LABEL: vfwadd_wf_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfwadd.wf v8, v8, fa0 ; CHECK-NEXT: ret @@ -542,7 +542,7 @@ define <4 x float> @vfwadd_wf_v4f16(<4 x float>* %x, half %y) { ; CHECK-LABEL: vfwadd_wf_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfwadd.wf v8, v8, fa0 ; CHECK-NEXT: ret @@ -557,7 +557,7 @@ define <8 x float> @vfwadd_wf_v8f16(<8 x float>* %x, half %y) { ; CHECK-LABEL: vfwadd_wf_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfwadd.wf v8, v8, fa0 ; CHECK-NEXT: ret @@ -572,7 +572,7 @@ define <16 x float> @vfwadd_wf_v16f16(<16 x float>* %x, half %y) { ; CHECK-LABEL: vfwadd_wf_v16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfwadd.wf v8, v8, fa0 ; CHECK-NEXT: ret @@ -587,7 +587,7 @@ define <2 x double> @vfwadd_wf_v2f32(<2 x double>* %x, float %y) { ; CHECK-LABEL: vfwadd_wf_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfwadd.wf v8, v8, fa0 ; CHECK-NEXT: ret @@ -602,7 +602,7 @@ define <4 x double> @vfwadd_wf_v4f32(<4 x double>* %x, float %y) { ; CHECK-LABEL: vfwadd_wf_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfwadd.wf v8, v8, fa0 ; CHECK-NEXT: ret @@ -617,7 +617,7 @@ define <8 x double> @vfwadd_wf_v8f32(<8 x double>* %x, float %y) { ; CHECK-LABEL: vfwadd_wf_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfwadd.wf v8, v8, fa0 ; CHECK-NEXT: ret @@ -632,7 +632,7 @@ define <16 x double> @vfwadd_wf_v16f32(<16 x double>* %x, float %y) { ; CHECK-LABEL: vfwadd_wf_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfwadd.wf v8, v8, fa0 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmacc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmacc.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmacc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmacc.ll @@ -9,7 +9,7 @@ define <1 x float> @vfwmacc_vv_nxv1f32(<1 x float> %va, <1 x half> %vb, <1 x half> %vc) { ; CHECK-LABEL: vfwmacc_vv_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vfwmacc.vv v8, v9, v10 ; CHECK-NEXT: ret %vd = fpext <1 x half> %vb to <1 x float> @@ -21,7 +21,7 @@ define <1 x float> @vfwmacc_vf_nxv1f32(<1 x float> %va, <1 x half> %vb, half %c) { ; CHECK-LABEL: vfwmacc_vf_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vfwmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement <1 x half> poison, half %c, i32 0 @@ -35,7 +35,7 @@ define <1 x float> @vfwnmacc_vv_nxv1f32(<1 x float> %va, <1 x half> %vb, <1 x half> %vc) { ; CHECK-LABEL: vfwnmacc_vv_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vfwnmacc.vv v8, v9, v10 ; CHECK-NEXT: ret %vd = fpext <1 x half> %vb to <1 x float> @@ -49,7 +49,7 @@ define <1 x float> @vfwnmacc_vf_nxv1f32(<1 x float> %va, <1 x half> %vb, half %c) { ; CHECK-LABEL: vfwnmacc_vf_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement <1 x half> poison, half %c, i32 0 @@ -65,7 +65,7 @@ define <1 x float> @vfwnmacc_fv_nxv1f32(<1 x float> %va, <1 x half> %vb, half %c) { ; CHECK-LABEL: vfwnmacc_fv_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement <1 x half> poison, half %c, i32 0 @@ -81,7 +81,7 @@ define <1 x float> @vfwmsac_vv_nxv1f32(<1 x float> %va, <1 x half> %vb, <1 x half> %vc) { ; CHECK-LABEL: vfwmsac_vv_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vfwmsac.vv v8, v9, v10 ; CHECK-NEXT: ret %vd = fpext <1 x half> %vb to <1 x float> @@ -94,7 +94,7 @@ define <1 x float> @vfwmsac_vf_nxv1f32(<1 x float> %va, <1 x half> %vb, half %c) { ; CHECK-LABEL: vfwmsac_vf_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vfwmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement <1 x half> poison, half %c, i32 0 @@ -109,7 +109,7 @@ define <1 x float> @vfwnmsac_vv_nxv1f32(<1 x float> %va, <1 x half> %vb, <1 x half> %vc) { ; CHECK-LABEL: vfwnmsac_vv_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vfwnmsac.vv v8, v9, v10 ; CHECK-NEXT: ret %vd = fpext <1 x half> %vb to <1 x float> @@ -122,7 +122,7 @@ define <1 x float> @vfwnmsac_vf_nxv1f32(<1 x float> %va, <1 x half> %vb, half %c) { ; CHECK-LABEL: vfwnmsac_vf_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement <1 x half> poison, half %c, i32 0 @@ -137,7 +137,7 @@ define <1 x float> @vfwnmsac_fv_nxv1f32(<1 x float> %va, <1 x half> %vb, half %c) { ; CHECK-LABEL: vfwnmsac_fv_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement <1 x half> poison, half %c, i32 0 @@ -154,7 +154,7 @@ define <2 x float> @vfwmacc_vv_nxv2f32(<2 x float> %va, <2 x half> %vb, <2 x half> %vc) { ; CHECK-LABEL: vfwmacc_vv_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vfwmacc.vv v8, v9, v10 ; CHECK-NEXT: ret %vd = fpext <2 x half> %vb to <2 x float> @@ -166,7 +166,7 @@ define <2 x float> @vfwmacc_vf_nxv2f32(<2 x float> %va, <2 x half> %vb, half %c) { ; CHECK-LABEL: vfwmacc_vf_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vfwmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement <2 x half> poison, half %c, i32 0 @@ -180,7 +180,7 @@ define <2 x float> @vfwnmacc_vv_nxv2f32(<2 x float> %va, <2 x half> %vb, <2 x half> %vc) { ; CHECK-LABEL: vfwnmacc_vv_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vfwnmacc.vv v8, v9, v10 ; CHECK-NEXT: ret %vd = fpext <2 x half> %vb to <2 x float> @@ -194,7 +194,7 @@ define <2 x float> @vfwnmacc_vf_nxv2f32(<2 x float> %va, <2 x half> %vb, half %c) { ; CHECK-LABEL: vfwnmacc_vf_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement <2 x half> poison, half %c, i32 0 @@ -210,7 +210,7 @@ define <2 x float> @vfwnmacc_fv_nxv2f32(<2 x float> %va, <2 x half> %vb, half %c) { ; CHECK-LABEL: vfwnmacc_fv_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement <2 x half> poison, half %c, i32 0 @@ -226,7 +226,7 @@ define <2 x float> @vfwmsac_vv_nxv2f32(<2 x float> %va, <2 x half> %vb, <2 x half> %vc) { ; CHECK-LABEL: vfwmsac_vv_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vfwmsac.vv v8, v9, v10 ; CHECK-NEXT: ret %vd = fpext <2 x half> %vb to <2 x float> @@ -239,7 +239,7 @@ define <2 x float> @vfwmsac_vf_nxv2f32(<2 x float> %va, <2 x half> %vb, half %c) { ; CHECK-LABEL: vfwmsac_vf_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vfwmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement <2 x half> poison, half %c, i32 0 @@ -254,7 +254,7 @@ define <2 x float> @vfwnmsac_vv_nxv2f32(<2 x float> %va, <2 x half> %vb, <2 x half> %vc) { ; CHECK-LABEL: vfwnmsac_vv_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vfwnmsac.vv v8, v9, v10 ; CHECK-NEXT: ret %vd = fpext <2 x half> %vb to <2 x float> @@ -267,7 +267,7 @@ define <2 x float> @vfwnmsac_vf_nxv2f32(<2 x float> %va, <2 x half> %vb, half %c) { ; CHECK-LABEL: vfwnmsac_vf_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement <2 x half> poison, half %c, i32 0 @@ -282,7 +282,7 @@ define <2 x float> @vfwnmsac_fv_nxv2f32(<2 x float> %va, <2 x half> %vb, half %c) { ; CHECK-LABEL: vfwnmsac_fv_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement <2 x half> poison, half %c, i32 0 @@ -300,7 +300,7 @@ define <4 x float> @vfwmacc_vv_nxv4f32(<4 x float> %va, <4 x half> %vb, <4 x half> %vc) { ; CHECK-LABEL: vfwmacc_vv_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vfwmacc.vv v8, v9, v10 ; CHECK-NEXT: ret %vd = fpext <4 x half> %vb to <4 x float> @@ -312,7 +312,7 @@ define <4 x float> @vfwmacc_vf_nxv4f32(<4 x float> %va, <4 x half> %vb, half %c) { ; CHECK-LABEL: vfwmacc_vf_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vfwmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement <4 x half> poison, half %c, i32 0 @@ -326,7 +326,7 @@ define <4 x float> @vfwnmacc_vv_nxv4f32(<4 x float> %va, <4 x half> %vb, <4 x half> %vc) { ; CHECK-LABEL: vfwnmacc_vv_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vfwnmacc.vv v8, v9, v10 ; CHECK-NEXT: ret %vd = fpext <4 x half> %vb to <4 x float> @@ -340,7 +340,7 @@ define <4 x float> @vfwnmacc_vf_nxv4f32(<4 x float> %va, <4 x half> %vb, half %c) { ; CHECK-LABEL: vfwnmacc_vf_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement <4 x half> poison, half %c, i32 0 @@ -356,7 +356,7 @@ define <4 x float> @vfwnmacc_fv_nxv4f32(<4 x float> %va, <4 x half> %vb, half %c) { ; CHECK-LABEL: vfwnmacc_fv_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement <4 x half> poison, half %c, i32 0 @@ -372,7 +372,7 @@ define <4 x float> @vfwmsac_vv_nxv4f32(<4 x float> %va, <4 x half> %vb, <4 x half> %vc) { ; CHECK-LABEL: vfwmsac_vv_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vfwmsac.vv v8, v9, v10 ; CHECK-NEXT: ret %vd = fpext <4 x half> %vb to <4 x float> @@ -385,7 +385,7 @@ define <4 x float> @vfwmsac_vf_nxv4f32(<4 x float> %va, <4 x half> %vb, half %c) { ; CHECK-LABEL: vfwmsac_vf_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vfwmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement <4 x half> poison, half %c, i32 0 @@ -400,7 +400,7 @@ define <4 x float> @vfwnmsac_vv_nxv4f32(<4 x float> %va, <4 x half> %vb, <4 x half> %vc) { ; CHECK-LABEL: vfwnmsac_vv_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vfwnmsac.vv v8, v9, v10 ; CHECK-NEXT: ret %vd = fpext <4 x half> %vb to <4 x float> @@ -413,7 +413,7 @@ define <4 x float> @vfwnmsac_vf_nxv4f32(<4 x float> %va, <4 x half> %vb, half %c) { ; CHECK-LABEL: vfwnmsac_vf_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement <4 x half> poison, half %c, i32 0 @@ -428,7 +428,7 @@ define <4 x float> @vfwnmsac_fv_nxv4f32(<4 x float> %va, <4 x half> %vb, half %c) { ; CHECK-LABEL: vfwnmsac_fv_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement <4 x half> poison, half %c, i32 0 @@ -445,7 +445,7 @@ define <8 x float> @vfwmacc_vv_nxv8f32(<8 x float> %va, <8 x half> %vb, <8 x half> %vc) { ; CHECK-LABEL: vfwmacc_vv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vfwmacc.vv v8, v10, v11 ; CHECK-NEXT: ret %vd = fpext <8 x half> %vb to <8 x float> @@ -457,7 +457,7 @@ define <8 x float> @vfwmacc_vf_nxv8f32(<8 x float> %va, <8 x half> %vb, half %c) { ; CHECK-LABEL: vfwmacc_vf_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vfwmacc.vf v8, fa0, v10 ; CHECK-NEXT: ret %head = insertelement <8 x half> poison, half %c, i32 0 @@ -471,7 +471,7 @@ define <8 x float> @vfwnmacc_vv_nxv8f32(<8 x float> %va, <8 x half> %vb, <8 x half> %vc) { ; CHECK-LABEL: vfwnmacc_vv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vfwnmacc.vv v8, v10, v11 ; CHECK-NEXT: ret %vd = fpext <8 x half> %vb to <8 x float> @@ -485,7 +485,7 @@ define <8 x float> @vfwnmacc_vf_nxv8f32(<8 x float> %va, <8 x half> %vb, half %c) { ; CHECK-LABEL: vfwnmacc_vf_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v10 ; CHECK-NEXT: ret %head = insertelement <8 x half> poison, half %c, i32 0 @@ -501,7 +501,7 @@ define <8 x float> @vfwnmacc_fv_nxv8f32(<8 x float> %va, <8 x half> %vb, half %c) { ; CHECK-LABEL: vfwnmacc_fv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v10 ; CHECK-NEXT: ret %head = insertelement <8 x half> poison, half %c, i32 0 @@ -517,7 +517,7 @@ define <8 x float> @vfwmsac_vv_nxv8f32(<8 x float> %va, <8 x half> %vb, <8 x half> %vc) { ; CHECK-LABEL: vfwmsac_vv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vfwmsac.vv v8, v10, v11 ; CHECK-NEXT: ret %vd = fpext <8 x half> %vb to <8 x float> @@ -530,7 +530,7 @@ define <8 x float> @vfwmsac_vf_nxv8f32(<8 x float> %va, <8 x half> %vb, half %c) { ; CHECK-LABEL: vfwmsac_vf_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vfwmsac.vf v8, fa0, v10 ; CHECK-NEXT: ret %head = insertelement <8 x half> poison, half %c, i32 0 @@ -545,7 +545,7 @@ define <8 x float> @vfwnmsac_vv_nxv8f32(<8 x float> %va, <8 x half> %vb, <8 x half> %vc) { ; CHECK-LABEL: vfwnmsac_vv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vfwnmsac.vv v8, v10, v11 ; CHECK-NEXT: ret %vd = fpext <8 x half> %vb to <8 x float> @@ -558,7 +558,7 @@ define <8 x float> @vfwnmsac_vf_nxv8f32(<8 x float> %va, <8 x half> %vb, half %c) { ; CHECK-LABEL: vfwnmsac_vf_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v10 ; CHECK-NEXT: ret %head = insertelement <8 x half> poison, half %c, i32 0 @@ -573,7 +573,7 @@ define <8 x float> @vfwnmsac_fv_nxv8f32(<8 x float> %va, <8 x half> %vb, half %c) { ; CHECK-LABEL: vfwnmsac_fv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v10 ; CHECK-NEXT: ret %head = insertelement <8 x half> poison, half %c, i32 0 @@ -590,7 +590,7 @@ define <16 x float> @vfwmacc_vv_nxv16f32(<16 x float> %va, <16 x half> %vb, <16 x half> %vc) { ; CHECK-LABEL: vfwmacc_vv_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vfwmacc.vv v8, v12, v14 ; CHECK-NEXT: ret %vd = fpext <16 x half> %vb to <16 x float> @@ -602,7 +602,7 @@ define <16 x float> @vfwmacc_vf_nxv16f32(<16 x float> %va, <16 x half> %vb, half %c) { ; CHECK-LABEL: vfwmacc_vf_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vfwmacc.vf v8, fa0, v12 ; CHECK-NEXT: ret %head = insertelement <16 x half> poison, half %c, i32 0 @@ -616,7 +616,7 @@ define <16 x float> @vfwnmacc_vv_nxv16f32(<16 x float> %va, <16 x half> %vb, <16 x half> %vc) { ; CHECK-LABEL: vfwnmacc_vv_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vfwnmacc.vv v8, v12, v14 ; CHECK-NEXT: ret %vd = fpext <16 x half> %vb to <16 x float> @@ -630,7 +630,7 @@ define <16 x float> @vfwnmacc_vf_nxv16f32(<16 x float> %va, <16 x half> %vb, half %c) { ; CHECK-LABEL: vfwnmacc_vf_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v12 ; CHECK-NEXT: ret %head = insertelement <16 x half> poison, half %c, i32 0 @@ -646,7 +646,7 @@ define <16 x float> @vfwnmacc_fv_nxv16f32(<16 x float> %va, <16 x half> %vb, half %c) { ; CHECK-LABEL: vfwnmacc_fv_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v12 ; CHECK-NEXT: ret %head = insertelement <16 x half> poison, half %c, i32 0 @@ -662,7 +662,7 @@ define <16 x float> @vfwmsac_vv_nxv16f32(<16 x float> %va, <16 x half> %vb, <16 x half> %vc) { ; CHECK-LABEL: vfwmsac_vv_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vfwmsac.vv v8, v12, v14 ; CHECK-NEXT: ret %vd = fpext <16 x half> %vb to <16 x float> @@ -675,7 +675,7 @@ define <16 x float> @vfwmsac_vf_nxv16f32(<16 x float> %va, <16 x half> %vb, half %c) { ; CHECK-LABEL: vfwmsac_vf_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vfwmsac.vf v8, fa0, v12 ; CHECK-NEXT: ret %head = insertelement <16 x half> poison, half %c, i32 0 @@ -690,7 +690,7 @@ define <16 x float> @vfwnmsac_vv_nxv16f32(<16 x float> %va, <16 x half> %vb, <16 x half> %vc) { ; CHECK-LABEL: vfwnmsac_vv_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vfwnmsac.vv v8, v12, v14 ; CHECK-NEXT: ret %vd = fpext <16 x half> %vb to <16 x float> @@ -703,7 +703,7 @@ define <16 x float> @vfwnmsac_vf_nxv16f32(<16 x float> %va, <16 x half> %vb, half %c) { ; CHECK-LABEL: vfwnmsac_vf_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v12 ; CHECK-NEXT: ret %head = insertelement <16 x half> poison, half %c, i32 0 @@ -718,7 +718,7 @@ define <16 x float> @vfwnmsac_fv_nxv16f32(<16 x float> %va, <16 x half> %vb, half %c) { ; CHECK-LABEL: vfwnmsac_fv_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v12 ; CHECK-NEXT: ret %head = insertelement <16 x half> poison, half %c, i32 0 @@ -735,7 +735,7 @@ define <1 x double> @vfwmacc_vv_nxv1f64(<1 x double> %va, <1 x float> %vb, <1 x float> %vc) { ; CHECK-LABEL: vfwmacc_vv_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vfwmacc.vv v8, v9, v10 ; CHECK-NEXT: ret %vd = fpext <1 x float> %vb to <1 x double> @@ -747,7 +747,7 @@ define <1 x double> @vfwmacc_vf_nxv1f64(<1 x double> %va, <1 x float> %vb, float %c) { ; CHECK-LABEL: vfwmacc_vf_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vfwmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement <1 x float> poison, float %c, i32 0 @@ -761,7 +761,7 @@ define <1 x double> @vfwnmacc_vv_nxv1f64(<1 x double> %va, <1 x float> %vb, <1 x float> %vc) { ; CHECK-LABEL: vfwnmacc_vv_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vfwnmacc.vv v8, v9, v10 ; CHECK-NEXT: ret %vd = fpext <1 x float> %vb to <1 x double> @@ -775,7 +775,7 @@ define <1 x double> @vfwnmacc_vf_nxv1f64(<1 x double> %va, <1 x float> %vb, float %c) { ; CHECK-LABEL: vfwnmacc_vf_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement <1 x float> poison, float %c, i32 0 @@ -791,7 +791,7 @@ define <1 x double> @vfwnmacc_fv_nxv1f64(<1 x double> %va, <1 x float> %vb, float %c) { ; CHECK-LABEL: vfwnmacc_fv_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement <1 x float> poison, float %c, i32 0 @@ -807,7 +807,7 @@ define <1 x double> @vfwmsac_vv_nxv1f64(<1 x double> %va, <1 x float> %vb, <1 x float> %vc) { ; CHECK-LABEL: vfwmsac_vv_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vfwmsac.vv v8, v9, v10 ; CHECK-NEXT: ret %vd = fpext <1 x float> %vb to <1 x double> @@ -820,7 +820,7 @@ define <1 x double> @vfwmsac_vf_nxv1f64(<1 x double> %va, <1 x float> %vb, float %c) { ; CHECK-LABEL: vfwmsac_vf_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vfwmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement <1 x float> poison, float %c, i32 0 @@ -835,7 +835,7 @@ define <1 x double> @vfwnmsac_vv_nxv1f64(<1 x double> %va, <1 x float> %vb, <1 x float> %vc) { ; CHECK-LABEL: vfwnmsac_vv_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vfwnmsac.vv v8, v9, v10 ; CHECK-NEXT: ret %vd = fpext <1 x float> %vb to <1 x double> @@ -848,7 +848,7 @@ define <1 x double> @vfwnmsac_vf_nxv1f64(<1 x double> %va, <1 x float> %vb, float %c) { ; CHECK-LABEL: vfwnmsac_vf_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement <1 x float> poison, float %c, i32 0 @@ -863,7 +863,7 @@ define <1 x double> @vfwnmsac_fv_nxv1f64(<1 x double> %va, <1 x float> %vb, float %c) { ; CHECK-LABEL: vfwnmsac_fv_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement <1 x float> poison, float %c, i32 0 @@ -880,7 +880,7 @@ define <2 x double> @vfwmacc_vv_nxv2f64(<2 x double> %va, <2 x float> %vb, <2 x float> %vc) { ; CHECK-LABEL: vfwmacc_vv_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vfwmacc.vv v8, v9, v10 ; CHECK-NEXT: ret %vd = fpext <2 x float> %vb to <2 x double> @@ -892,7 +892,7 @@ define <2 x double> @vfwmacc_vf_nxv2f64(<2 x double> %va, <2 x float> %vb, float %c) { ; CHECK-LABEL: vfwmacc_vf_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vfwmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement <2 x float> poison, float %c, i32 0 @@ -906,7 +906,7 @@ define <2 x double> @vfwnmacc_vv_nxv2f64(<2 x double> %va, <2 x float> %vb, <2 x float> %vc) { ; CHECK-LABEL: vfwnmacc_vv_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vfwnmacc.vv v8, v9, v10 ; CHECK-NEXT: ret %vd = fpext <2 x float> %vb to <2 x double> @@ -920,7 +920,7 @@ define <2 x double> @vfwnmacc_vf_nxv2f64(<2 x double> %va, <2 x float> %vb, float %c) { ; CHECK-LABEL: vfwnmacc_vf_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement <2 x float> poison, float %c, i32 0 @@ -936,7 +936,7 @@ define <2 x double> @vfwnmacc_fv_nxv2f64(<2 x double> %va, <2 x float> %vb, float %c) { ; CHECK-LABEL: vfwnmacc_fv_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement <2 x float> poison, float %c, i32 0 @@ -952,7 +952,7 @@ define <2 x double> @vfwmsac_vv_nxv2f64(<2 x double> %va, <2 x float> %vb, <2 x float> %vc) { ; CHECK-LABEL: vfwmsac_vv_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vfwmsac.vv v8, v9, v10 ; CHECK-NEXT: ret %vd = fpext <2 x float> %vb to <2 x double> @@ -965,7 +965,7 @@ define <2 x double> @vfwmsac_vf_nxv2f64(<2 x double> %va, <2 x float> %vb, float %c) { ; CHECK-LABEL: vfwmsac_vf_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vfwmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement <2 x float> poison, float %c, i32 0 @@ -980,7 +980,7 @@ define <2 x double> @vfwnmsac_vv_nxv2f64(<2 x double> %va, <2 x float> %vb, <2 x float> %vc) { ; CHECK-LABEL: vfwnmsac_vv_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vfwnmsac.vv v8, v9, v10 ; CHECK-NEXT: ret %vd = fpext <2 x float> %vb to <2 x double> @@ -993,7 +993,7 @@ define <2 x double> @vfwnmsac_vf_nxv2f64(<2 x double> %va, <2 x float> %vb, float %c) { ; CHECK-LABEL: vfwnmsac_vf_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement <2 x float> poison, float %c, i32 0 @@ -1008,7 +1008,7 @@ define <2 x double> @vfwnmsac_fv_nxv2f64(<2 x double> %va, <2 x float> %vb, float %c) { ; CHECK-LABEL: vfwnmsac_fv_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement <2 x float> poison, float %c, i32 0 @@ -1026,7 +1026,7 @@ define <4 x double> @vfwmacc_vv_nxv4f64(<4 x double> %va, <4 x float> %vb, <4 x float> %vc) { ; CHECK-LABEL: vfwmacc_vv_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vfwmacc.vv v8, v10, v11 ; CHECK-NEXT: ret %vd = fpext <4 x float> %vb to <4 x double> @@ -1038,7 +1038,7 @@ define <4 x double> @vfwmacc_vf_nxv4f64(<4 x double> %va, <4 x float> %vb, float %c) { ; CHECK-LABEL: vfwmacc_vf_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vfwmacc.vf v8, fa0, v10 ; CHECK-NEXT: ret %head = insertelement <4 x float> poison, float %c, i32 0 @@ -1052,7 +1052,7 @@ define <4 x double> @vfwnmacc_vv_nxv4f64(<4 x double> %va, <4 x float> %vb, <4 x float> %vc) { ; CHECK-LABEL: vfwnmacc_vv_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vfwnmacc.vv v8, v10, v11 ; CHECK-NEXT: ret %vd = fpext <4 x float> %vb to <4 x double> @@ -1066,7 +1066,7 @@ define <4 x double> @vfwnmacc_vf_nxv4f64(<4 x double> %va, <4 x float> %vb, float %c) { ; CHECK-LABEL: vfwnmacc_vf_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v10 ; CHECK-NEXT: ret %head = insertelement <4 x float> poison, float %c, i32 0 @@ -1082,7 +1082,7 @@ define <4 x double> @vfwnmacc_fv_nxv4f64(<4 x double> %va, <4 x float> %vb, float %c) { ; CHECK-LABEL: vfwnmacc_fv_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v10 ; CHECK-NEXT: ret %head = insertelement <4 x float> poison, float %c, i32 0 @@ -1098,7 +1098,7 @@ define <4 x double> @vfwmsac_vv_nxv4f64(<4 x double> %va, <4 x float> %vb, <4 x float> %vc) { ; CHECK-LABEL: vfwmsac_vv_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vfwmsac.vv v8, v10, v11 ; CHECK-NEXT: ret %vd = fpext <4 x float> %vb to <4 x double> @@ -1111,7 +1111,7 @@ define <4 x double> @vfwmsac_vf_nxv4f64(<4 x double> %va, <4 x float> %vb, float %c) { ; CHECK-LABEL: vfwmsac_vf_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vfwmsac.vf v8, fa0, v10 ; CHECK-NEXT: ret %head = insertelement <4 x float> poison, float %c, i32 0 @@ -1126,7 +1126,7 @@ define <4 x double> @vfwnmsac_vv_nxv4f64(<4 x double> %va, <4 x float> %vb, <4 x float> %vc) { ; CHECK-LABEL: vfwnmsac_vv_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vfwnmsac.vv v8, v10, v11 ; CHECK-NEXT: ret %vd = fpext <4 x float> %vb to <4 x double> @@ -1139,7 +1139,7 @@ define <4 x double> @vfwnmsac_vf_nxv4f64(<4 x double> %va, <4 x float> %vb, float %c) { ; CHECK-LABEL: vfwnmsac_vf_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v10 ; CHECK-NEXT: ret %head = insertelement <4 x float> poison, float %c, i32 0 @@ -1154,7 +1154,7 @@ define <4 x double> @vfwnmsac_fv_nxv4f64(<4 x double> %va, <4 x float> %vb, float %c) { ; CHECK-LABEL: vfwnmsac_fv_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v10 ; CHECK-NEXT: ret %head = insertelement <4 x float> poison, float %c, i32 0 @@ -1171,7 +1171,7 @@ define <8 x double> @vfwmacc_vv_nxv8f64(<8 x double> %va, <8 x float> %vb, <8 x float> %vc) { ; CHECK-LABEL: vfwmacc_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vfwmacc.vv v8, v12, v14 ; CHECK-NEXT: ret %vd = fpext <8 x float> %vb to <8 x double> @@ -1183,7 +1183,7 @@ define <8 x double> @vfwmacc_vf_nxv8f64(<8 x double> %va, <8 x float> %vb, float %c) { ; CHECK-LABEL: vfwmacc_vf_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vfwmacc.vf v8, fa0, v12 ; CHECK-NEXT: ret %head = insertelement <8 x float> poison, float %c, i32 0 @@ -1197,7 +1197,7 @@ define <8 x double> @vfwnmacc_vv_nxv8f64(<8 x double> %va, <8 x float> %vb, <8 x float> %vc) { ; CHECK-LABEL: vfwnmacc_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vfwnmacc.vv v8, v12, v14 ; CHECK-NEXT: ret %vd = fpext <8 x float> %vb to <8 x double> @@ -1211,7 +1211,7 @@ define <8 x double> @vfwnmacc_vf_nxv8f64(<8 x double> %va, <8 x float> %vb, float %c) { ; CHECK-LABEL: vfwnmacc_vf_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v12 ; CHECK-NEXT: ret %head = insertelement <8 x float> poison, float %c, i32 0 @@ -1227,7 +1227,7 @@ define <8 x double> @vfwnmacc_fv_nxv8f64(<8 x double> %va, <8 x float> %vb, float %c) { ; CHECK-LABEL: vfwnmacc_fv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v12 ; CHECK-NEXT: ret %head = insertelement <8 x float> poison, float %c, i32 0 @@ -1243,7 +1243,7 @@ define <8 x double> @vfwmsac_vv_nxv8f64(<8 x double> %va, <8 x float> %vb, <8 x float> %vc) { ; CHECK-LABEL: vfwmsac_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vfwmsac.vv v8, v12, v14 ; CHECK-NEXT: ret %vd = fpext <8 x float> %vb to <8 x double> @@ -1256,7 +1256,7 @@ define <8 x double> @vfwmsac_vf_nxv8f64(<8 x double> %va, <8 x float> %vb, float %c) { ; CHECK-LABEL: vfwmsac_vf_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vfwmsac.vf v8, fa0, v12 ; CHECK-NEXT: ret %head = insertelement <8 x float> poison, float %c, i32 0 @@ -1271,7 +1271,7 @@ define <8 x double> @vfwnmsac_vv_nxv8f64(<8 x double> %va, <8 x float> %vb, <8 x float> %vc) { ; CHECK-LABEL: vfwnmsac_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vfwnmsac.vv v8, v12, v14 ; CHECK-NEXT: ret %vd = fpext <8 x float> %vb to <8 x double> @@ -1284,7 +1284,7 @@ define <8 x double> @vfwnmsac_vf_nxv8f64(<8 x double> %va, <8 x float> %vb, float %c) { ; CHECK-LABEL: vfwnmsac_vf_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v12 ; CHECK-NEXT: ret %head = insertelement <8 x float> poison, float %c, i32 0 @@ -1299,7 +1299,7 @@ define <8 x double> @vfwnmsac_fv_nxv8f64(<8 x double> %va, <8 x float> %vb, float %c) { ; CHECK-LABEL: vfwnmsac_fv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v12 ; CHECK-NEXT: ret %head = insertelement <8 x float> poison, float %c, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmul.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmul.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmul.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmul.ll @@ -7,7 +7,7 @@ define <2 x float> @vfwmul_v2f16(<2 x half> *%x, <2 x half> *%y) { ; CHECK-LABEL: vfwmul_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vle16.v v10, (a1) ; CHECK-NEXT: vfwmul.vv v8, v9, v10 @@ -23,7 +23,7 @@ define <4 x float> @vfwmul_v4f16(<4 x half> *%x, <4 x half> *%y) { ; CHECK-LABEL: vfwmul_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vle16.v v10, (a1) ; CHECK-NEXT: vfwmul.vv v8, v9, v10 @@ -39,7 +39,7 @@ define <8 x float> @vfwmul_v8f16(<8 x half> *%x, <8 x half> *%y) { ; CHECK-LABEL: vfwmul_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v10, (a0) ; CHECK-NEXT: vle16.v v11, (a1) ; CHECK-NEXT: vfwmul.vv v8, v10, v11 @@ -55,7 +55,7 @@ define <16 x float> @vfwmul_v16f16(<16 x half> *%x, <16 x half> *%y) { ; CHECK-LABEL: vfwmul_v16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v12, (a0) ; CHECK-NEXT: vle16.v v14, (a1) ; CHECK-NEXT: vfwmul.vv v8, v12, v14 @@ -72,7 +72,7 @@ ; CHECK-LABEL: vfwmul_v32f16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vle16.v v20, (a1) ; CHECK-NEXT: vfwmul.vv v8, v16, v20 @@ -94,16 +94,16 @@ ; CHECK-NEXT: slli a2, a2, 3 ; CHECK-NEXT: sub sp, sp, a2 ; CHECK-NEXT: li a2, 64 -; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vle16.v v24, (a1) ; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v16, a0 ; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: vslidedown.vx v0, v24, a0 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfwmul.vv v8, v16, v24 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload @@ -124,7 +124,7 @@ define <2 x double> @vfwmul_v2f32(<2 x float> *%x, <2 x float> *%y) { ; CHECK-LABEL: vfwmul_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vle32.v v10, (a1) ; CHECK-NEXT: vfwmul.vv v8, v9, v10 @@ -140,7 +140,7 @@ define <4 x double> @vfwmul_v4f32(<4 x float> *%x, <4 x float> *%y) { ; CHECK-LABEL: vfwmul_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v10, (a0) ; CHECK-NEXT: vle32.v v11, (a1) ; CHECK-NEXT: vfwmul.vv v8, v10, v11 @@ -156,7 +156,7 @@ define <8 x double> @vfwmul_v8f32(<8 x float> *%x, <8 x float> *%y) { ; CHECK-LABEL: vfwmul_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v12, (a0) ; CHECK-NEXT: vle32.v v14, (a1) ; CHECK-NEXT: vfwmul.vv v8, v12, v14 @@ -172,7 +172,7 @@ define <16 x double> @vfwmul_v16f32(<16 x float> *%x, <16 x float> *%y) { ; CHECK-LABEL: vfwmul_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vle32.v v20, (a1) ; CHECK-NEXT: vfwmul.vv v8, v16, v20 @@ -194,15 +194,15 @@ ; CHECK-NEXT: slli a2, a2, 3 ; CHECK-NEXT: sub sp, sp, a2 ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vle32.v v24, (a1) -; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v16, 16 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill ; CHECK-NEXT: vslidedown.vi v0, v24, 16 -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vfwmul.vv v8, v16, v24 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload @@ -223,7 +223,7 @@ define <2 x float> @vfwmul_vf_v2f16(<2 x half>* %x, half %y) { ; CHECK-LABEL: vfwmul_vf_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vfwmul.vf v8, v9, fa0 ; CHECK-NEXT: ret @@ -239,7 +239,7 @@ define <4 x float> @vfwmul_vf_v4f16(<4 x half>* %x, half %y) { ; CHECK-LABEL: vfwmul_vf_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vfwmul.vf v8, v9, fa0 ; CHECK-NEXT: ret @@ -255,7 +255,7 @@ define <8 x float> @vfwmul_vf_v8f16(<8 x half>* %x, half %y) { ; CHECK-LABEL: vfwmul_vf_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v10, (a0) ; CHECK-NEXT: vfwmul.vf v8, v10, fa0 ; CHECK-NEXT: ret @@ -271,7 +271,7 @@ define <16 x float> @vfwmul_vf_v16f16(<16 x half>* %x, half %y) { ; CHECK-LABEL: vfwmul_vf_v16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v12, (a0) ; CHECK-NEXT: vfwmul.vf v8, v12, fa0 ; CHECK-NEXT: ret @@ -288,7 +288,7 @@ ; CHECK-LABEL: vfwmul_vf_v32f16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vfwmul.vf v8, v16, fa0 ; CHECK-NEXT: ret @@ -304,7 +304,7 @@ define <2 x double> @vfwmul_vf_v2f32(<2 x float>* %x, float %y) { ; CHECK-LABEL: vfwmul_vf_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vfwmul.vf v8, v9, fa0 ; CHECK-NEXT: ret @@ -320,7 +320,7 @@ define <4 x double> @vfwmul_vf_v4f32(<4 x float>* %x, float %y) { ; CHECK-LABEL: vfwmul_vf_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v10, (a0) ; CHECK-NEXT: vfwmul.vf v8, v10, fa0 ; CHECK-NEXT: ret @@ -336,7 +336,7 @@ define <8 x double> @vfwmul_vf_v8f32(<8 x float>* %x, float %y) { ; CHECK-LABEL: vfwmul_vf_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v12, (a0) ; CHECK-NEXT: vfwmul.vf v8, v12, fa0 ; CHECK-NEXT: ret @@ -352,7 +352,7 @@ define <16 x double> @vfwmul_vf_v16f32(<16 x float>* %x, float %y) { ; CHECK-LABEL: vfwmul_vf_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vfwmul.vf v8, v16, fa0 ; CHECK-NEXT: ret @@ -369,16 +369,16 @@ ; CHECK-LABEL: vfwmul_vf_v32f32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v16, v8, 16 -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v24, v16 ; CHECK-NEXT: vfwcvt.f.f.v v16, v8 ; CHECK-NEXT: vfmv.v.f v8, fa0 ; CHECK-NEXT: vfwcvt.f.f.v v0, v8 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-NEXT: vfmul.vv v8, v16, v0 ; CHECK-NEXT: vfmul.vv v16, v24, v0 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll @@ -7,7 +7,7 @@ define <2 x float> @vfwsub_v2f16(<2 x half> *%x, <2 x half> *%y) { ; CHECK-LABEL: vfwsub_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vle16.v v10, (a1) ; CHECK-NEXT: vfwsub.vv v8, v9, v10 @@ -23,7 +23,7 @@ define <4 x float> @vfwsub_v4f16(<4 x half> *%x, <4 x half> *%y) { ; CHECK-LABEL: vfwsub_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vle16.v v10, (a1) ; CHECK-NEXT: vfwsub.vv v8, v9, v10 @@ -39,7 +39,7 @@ define <8 x float> @vfwsub_v8f16(<8 x half> *%x, <8 x half> *%y) { ; CHECK-LABEL: vfwsub_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v10, (a0) ; CHECK-NEXT: vle16.v v11, (a1) ; CHECK-NEXT: vfwsub.vv v8, v10, v11 @@ -55,7 +55,7 @@ define <16 x float> @vfwsub_v16f16(<16 x half> *%x, <16 x half> *%y) { ; CHECK-LABEL: vfwsub_v16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v12, (a0) ; CHECK-NEXT: vle16.v v14, (a1) ; CHECK-NEXT: vfwsub.vv v8, v12, v14 @@ -72,7 +72,7 @@ ; CHECK-LABEL: vfwsub_v32f16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vle16.v v20, (a1) ; CHECK-NEXT: vfwsub.vv v8, v16, v20 @@ -94,16 +94,16 @@ ; CHECK-NEXT: slli a2, a2, 3 ; CHECK-NEXT: sub sp, sp, a2 ; CHECK-NEXT: li a2, 64 -; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vle16.v v24, (a1) ; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v16, a0 ; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: vslidedown.vx v0, v24, a0 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfwsub.vv v8, v16, v24 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload @@ -124,7 +124,7 @@ define <2 x double> @vfwsub_v2f32(<2 x float> *%x, <2 x float> *%y) { ; CHECK-LABEL: vfwsub_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vle32.v v10, (a1) ; CHECK-NEXT: vfwsub.vv v8, v9, v10 @@ -140,7 +140,7 @@ define <4 x double> @vfwsub_v4f32(<4 x float> *%x, <4 x float> *%y) { ; CHECK-LABEL: vfwsub_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v10, (a0) ; CHECK-NEXT: vle32.v v11, (a1) ; CHECK-NEXT: vfwsub.vv v8, v10, v11 @@ -156,7 +156,7 @@ define <8 x double> @vfwsub_v8f32(<8 x float> *%x, <8 x float> *%y) { ; CHECK-LABEL: vfwsub_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v12, (a0) ; CHECK-NEXT: vle32.v v14, (a1) ; CHECK-NEXT: vfwsub.vv v8, v12, v14 @@ -172,7 +172,7 @@ define <16 x double> @vfwsub_v16f32(<16 x float> *%x, <16 x float> *%y) { ; CHECK-LABEL: vfwsub_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vle32.v v20, (a1) ; CHECK-NEXT: vfwsub.vv v8, v16, v20 @@ -194,15 +194,15 @@ ; CHECK-NEXT: slli a2, a2, 3 ; CHECK-NEXT: sub sp, sp, a2 ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vle32.v v24, (a1) -; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v16, 16 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill ; CHECK-NEXT: vslidedown.vi v0, v24, 16 -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vfwsub.vv v8, v16, v24 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload @@ -223,7 +223,7 @@ define <2 x float> @vfwsub_vf_v2f16(<2 x half>* %x, half %y) { ; CHECK-LABEL: vfwsub_vf_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vfwsub.vf v8, v9, fa0 ; CHECK-NEXT: ret @@ -239,7 +239,7 @@ define <4 x float> @vfwsub_vf_v4f16(<4 x half>* %x, half %y) { ; CHECK-LABEL: vfwsub_vf_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vfwsub.vf v8, v9, fa0 ; CHECK-NEXT: ret @@ -255,7 +255,7 @@ define <8 x float> @vfwsub_vf_v8f16(<8 x half>* %x, half %y) { ; CHECK-LABEL: vfwsub_vf_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v10, (a0) ; CHECK-NEXT: vfwsub.vf v8, v10, fa0 ; CHECK-NEXT: ret @@ -271,7 +271,7 @@ define <16 x float> @vfwsub_vf_v16f16(<16 x half>* %x, half %y) { ; CHECK-LABEL: vfwsub_vf_v16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v12, (a0) ; CHECK-NEXT: vfwsub.vf v8, v12, fa0 ; CHECK-NEXT: ret @@ -288,7 +288,7 @@ ; CHECK-LABEL: vfwsub_vf_v32f16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vfwsub.vf v8, v16, fa0 ; CHECK-NEXT: ret @@ -304,7 +304,7 @@ define <2 x double> @vfwsub_vf_v2f32(<2 x float>* %x, float %y) { ; CHECK-LABEL: vfwsub_vf_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vfwsub.vf v8, v9, fa0 ; CHECK-NEXT: ret @@ -320,7 +320,7 @@ define <4 x double> @vfwsub_vf_v4f32(<4 x float>* %x, float %y) { ; CHECK-LABEL: vfwsub_vf_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v10, (a0) ; CHECK-NEXT: vfwsub.vf v8, v10, fa0 ; CHECK-NEXT: ret @@ -336,7 +336,7 @@ define <8 x double> @vfwsub_vf_v8f32(<8 x float>* %x, float %y) { ; CHECK-LABEL: vfwsub_vf_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v12, (a0) ; CHECK-NEXT: vfwsub.vf v8, v12, fa0 ; CHECK-NEXT: ret @@ -352,7 +352,7 @@ define <16 x double> @vfwsub_vf_v16f32(<16 x float>* %x, float %y) { ; CHECK-LABEL: vfwsub_vf_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vfwsub.vf v8, v16, fa0 ; CHECK-NEXT: ret @@ -369,16 +369,16 @@ ; CHECK-LABEL: vfwsub_vf_v32f32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v16, v8, 16 -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v24, v16 ; CHECK-NEXT: vfwcvt.f.f.v v16, v8 ; CHECK-NEXT: vfmv.v.f v8, fa0 ; CHECK-NEXT: vfwcvt.f.f.v v0, v8 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-NEXT: vfsub.vv v8, v16, v0 ; CHECK-NEXT: vfsub.vv v16, v24, v0 ; CHECK-NEXT: ret @@ -394,7 +394,7 @@ define <2 x float> @vfwsub_wv_v2f16(<2 x float> *%x, <2 x half> *%y) { ; CHECK-LABEL: vfwsub_wv_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vfwsub.wv v8, v8, v9 @@ -409,7 +409,7 @@ define <4 x float> @vfwsub_wv_v4f16(<4 x float> *%x, <4 x half> *%y) { ; CHECK-LABEL: vfwsub_wv_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vfwsub.wv v8, v8, v9 @@ -424,7 +424,7 @@ define <8 x float> @vfwsub_wv_v8f16(<8 x float> *%x, <8 x half> *%y) { ; CHECK-LABEL: vfwsub_wv_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle16.v v10, (a1) ; CHECK-NEXT: vfwsub.wv v8, v8, v10 @@ -439,7 +439,7 @@ define <16 x float> @vfwsub_wv_v16f16(<16 x float> *%x, <16 x half> *%y) { ; CHECK-LABEL: vfwsub_wv_v16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle16.v v12, (a1) ; CHECK-NEXT: vfwsub.wv v8, v8, v12 @@ -455,7 +455,7 @@ ; CHECK-LABEL: vfwsub_wv_v32f16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle16.v v16, (a1) ; CHECK-NEXT: vfwsub.wv v8, v8, v16 @@ -470,7 +470,7 @@ define <2 x double> @vfwsub_wv_v2f32(<2 x double> *%x, <2 x float> *%y) { ; CHECK-LABEL: vfwsub_wv_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vfwsub.wv v8, v8, v9 @@ -485,7 +485,7 @@ define <4 x double> @vfwsub_wv_v4f32(<4 x double> *%x, <4 x float> *%y) { ; CHECK-LABEL: vfwsub_wv_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle32.v v10, (a1) ; CHECK-NEXT: vfwsub.wv v8, v8, v10 @@ -500,7 +500,7 @@ define <8 x double> @vfwsub_wv_v8f32(<8 x double> *%x, <8 x float> *%y) { ; CHECK-LABEL: vfwsub_wv_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle32.v v12, (a1) ; CHECK-NEXT: vfwsub.wv v8, v8, v12 @@ -515,7 +515,7 @@ define <16 x double> @vfwsub_wv_v16f32(<16 x double> *%x, <16 x float> *%y) { ; CHECK-LABEL: vfwsub_wv_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle32.v v16, (a1) ; CHECK-NEXT: vfwsub.wv v8, v8, v16 @@ -530,7 +530,7 @@ define <2 x float> @vfwsub_wf_v2f16(<2 x float>* %x, half %y) { ; CHECK-LABEL: vfwsub_wf_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfwsub.wf v8, v8, fa0 ; CHECK-NEXT: ret @@ -545,7 +545,7 @@ define <4 x float> @vfwsub_wf_v4f16(<4 x float>* %x, half %y) { ; CHECK-LABEL: vfwsub_wf_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfwsub.wf v8, v8, fa0 ; CHECK-NEXT: ret @@ -560,7 +560,7 @@ define <8 x float> @vfwsub_wf_v8f16(<8 x float>* %x, half %y) { ; CHECK-LABEL: vfwsub_wf_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfwsub.wf v8, v8, fa0 ; CHECK-NEXT: ret @@ -575,7 +575,7 @@ define <16 x float> @vfwsub_wf_v16f16(<16 x float>* %x, half %y) { ; CHECK-LABEL: vfwsub_wf_v16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfwsub.wf v8, v8, fa0 ; CHECK-NEXT: ret @@ -590,7 +590,7 @@ define <2 x double> @vfwsub_wf_v2f32(<2 x double>* %x, float %y) { ; CHECK-LABEL: vfwsub_wf_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfwsub.wf v8, v8, fa0 ; CHECK-NEXT: ret @@ -605,7 +605,7 @@ define <4 x double> @vfwsub_wf_v4f32(<4 x double>* %x, float %y) { ; CHECK-LABEL: vfwsub_wf_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfwsub.wf v8, v8, fa0 ; CHECK-NEXT: ret @@ -620,7 +620,7 @@ define <8 x double> @vfwsub_wf_v8f32(<8 x double>* %x, float %y) { ; CHECK-LABEL: vfwsub_wf_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfwsub.wf v8, v8, fa0 ; CHECK-NEXT: ret @@ -635,7 +635,7 @@ define <16 x double> @vfwsub_wf_v16f32(<16 x double>* %x, float %y) { ; CHECK-LABEL: vfwsub_wf_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vfwsub.wf v8, v8, fa0 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp-mask.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp-mask.ll @@ -9,7 +9,7 @@ define <2 x i1> @vmul_vv_v2i1(<2 x i1> %va, <2 x i1> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmand.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <2 x i1> @llvm.vp.mul.v2i1(<2 x i1> %va, <2 x i1> %b, <2 x i1> %m, i32 %evl) @@ -21,7 +21,7 @@ define <4 x i1> @vmul_vv_v4i1(<4 x i1> %va, <4 x i1> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmand.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <4 x i1> @llvm.vp.mul.v4i1(<4 x i1> %va, <4 x i1> %b, <4 x i1> %m, i32 %evl) @@ -33,7 +33,7 @@ define <8 x i1> @vmul_vv_v8i1(<8 x i1> %va, <8 x i1> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmand.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <8 x i1> @llvm.vp.mul.v8i1(<8 x i1> %va, <8 x i1> %b, <8 x i1> %m, i32 %evl) @@ -45,7 +45,7 @@ define <16 x i1> @vmul_vv_v16i1(<16 x i1> %va, <16 x i1> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmand.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <16 x i1> @llvm.vp.mul.v16i1(<16 x i1> %va, <16 x i1> %b, <16 x i1> %m, i32 %evl) @@ -57,7 +57,7 @@ define <32 x i1> @vmul_vv_v32i1(<32 x i1> %va, <32 x i1> %b, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmand.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <32 x i1> @llvm.vp.mul.v32i1(<32 x i1> %va, <32 x i1> %b, <32 x i1> %m, i32 %evl) @@ -69,7 +69,7 @@ define <64 x i1> @vmul_vv_v64i1(<64 x i1> %va, <64 x i1> %b, <64 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v64i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmand.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <64 x i1> @llvm.vp.mul.v64i1(<64 x i1> %va, <64 x i1> %b, <64 x i1> %m, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp.ll @@ -31,7 +31,7 @@ define <2 x i8> @vmul_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -55,7 +55,7 @@ define <2 x i8> @vmul_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_v2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0 @@ -81,7 +81,7 @@ define <4 x i8> @vmul_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -105,7 +105,7 @@ define <4 x i8> @vmul_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_v4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0 @@ -131,7 +131,7 @@ define <8 x i8> @vmul_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -155,7 +155,7 @@ define <8 x i8> @vmul_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_v8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 @@ -181,7 +181,7 @@ define <16 x i8> @vmul_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -205,7 +205,7 @@ define <16 x i8> @vmul_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_v16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0 @@ -231,7 +231,7 @@ define <2 x i16> @vmul_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -255,7 +255,7 @@ define <2 x i16> @vmul_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_v2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0 @@ -281,7 +281,7 @@ define <4 x i16> @vmul_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -305,7 +305,7 @@ define <4 x i16> @vmul_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_v4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0 @@ -331,7 +331,7 @@ define <8 x i16> @vmul_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -367,7 +367,7 @@ define <8 x i16> @vmul_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_v8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0 @@ -393,7 +393,7 @@ define <12 x i16> @vmul_vv_v12i16_unmasked(<12 x i16> %va, <12 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v12i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <12 x i1> poison, i1 true, i32 0 @@ -417,7 +417,7 @@ define <12 x i16> @vmul_vx_v12i16_unmasked(<12 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_v12i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <12 x i16> poison, i16 %b, i32 0 @@ -443,7 +443,7 @@ define <16 x i16> @vmul_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -467,7 +467,7 @@ define <16 x i16> @vmul_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_v16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0 @@ -493,7 +493,7 @@ define <2 x i32> @vmul_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -517,7 +517,7 @@ define <2 x i32> @vmul_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_v2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0 @@ -543,7 +543,7 @@ define <4 x i32> @vmul_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -567,7 +567,7 @@ define <4 x i32> @vmul_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_v4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0 @@ -593,7 +593,7 @@ define <8 x i32> @vmul_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -617,7 +617,7 @@ define <8 x i32> @vmul_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_v8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 @@ -643,7 +643,7 @@ define <16 x i32> @vmul_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -667,7 +667,7 @@ define <16 x i32> @vmul_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_v16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0 @@ -693,7 +693,7 @@ define <2 x i64> @vmul_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -710,7 +710,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vmul.vv v8, v8, v9, v0.t @@ -736,16 +736,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vmul.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vmul_vx_v2i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0 @@ -771,7 +771,7 @@ define <4 x i64> @vmul_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v4i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -788,7 +788,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vmul.vv v8, v8, v10, v0.t @@ -814,16 +814,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vmul.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vmul_vx_v4i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0 @@ -849,7 +849,7 @@ define <8 x i64> @vmul_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v8i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -866,7 +866,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vmul.vv v8, v8, v12, v0.t @@ -892,16 +892,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vmul.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vmul_vx_v8i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 @@ -927,7 +927,7 @@ define <16 x i64> @vmul_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_v16i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -944,7 +944,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vmul.vv v8, v8, v16, v0.t @@ -970,16 +970,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vmul.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vmul_vx_v16i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vnsra-vnsrl.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vnsra-vnsrl.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vnsra-vnsrl.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vnsra-vnsrl.ll @@ -5,7 +5,7 @@ define <8 x i8> @vnsra_v8i16_v8i8_scalar(<8 x i16> %x, i16 %y) { ; CHECK-LABEL: vnsra_v8i16_v8i8_scalar: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vnsra.wx v8, v8, a0 ; CHECK-NEXT: ret %insert = insertelement <8 x i16> poison, i16 %y, i16 0 @@ -18,7 +18,7 @@ define <8 x i8> @vnsra_v8i16_v8i8_scalar_sext(<8 x i16> %x, i8 %y) { ; CHECK-LABEL: vnsra_v8i16_v8i8_scalar_sext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vnsra.wx v8, v8, a0 ; CHECK-NEXT: ret %insert = insertelement <8 x i8> poison, i8 %y, i8 0 @@ -32,7 +32,7 @@ define <8 x i8> @vnsra_v8i16_v8i8_scalar_zext(<8 x i16> %x, i8 %y) { ; CHECK-LABEL: vnsra_v8i16_v8i8_scalar_zext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vnsra.wx v8, v8, a0 ; CHECK-NEXT: ret %insert = insertelement <8 x i8> poison, i8 %y, i8 0 @@ -46,7 +46,7 @@ define <4 x i16> @vnsra_v4i32_v4i16_scalar(<4 x i32> %x, i32 %y) { ; CHECK-LABEL: vnsra_v4i32_v4i16_scalar: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vnsra.wx v8, v8, a0 ; CHECK-NEXT: ret %insert = insertelement <4 x i32> poison, i32 %y, i32 0 @@ -59,7 +59,7 @@ define <4 x i16> @vnsra_v4i32_v4i16_scalar_sext(<4 x i32> %x, i16 %y) { ; CHECK-LABEL: vnsra_v4i32_v4i16_scalar_sext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vnsra.wx v8, v8, a0 ; CHECK-NEXT: ret %insert = insertelement <4 x i16> poison, i16 %y, i16 0 @@ -73,7 +73,7 @@ define <4 x i16> @vnsra_v4i32_v4i16_scalar_zext(<4 x i32> %x, i16 %y) { ; CHECK-LABEL: vnsra_v4i32_v4i16_scalar_zext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vnsra.wx v8, v8, a0 ; CHECK-NEXT: ret %insert = insertelement <4 x i16> poison, i16 %y, i16 0 @@ -87,7 +87,7 @@ define <2 x i32> @vnsra_v2i64_v2i32_scalar(<2 x i64> %x, i64 %y) { ; CHECK-LABEL: vnsra_v2i64_v2i32_scalar: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vnsra.wx v8, v8, a0 ; CHECK-NEXT: ret %insert = insertelement <2 x i64> poison, i64 %y, i32 0 @@ -100,7 +100,7 @@ define <2 x i32> @vnsra_v2i64_v2i32_scalar_sext(<2 x i64> %x, i32 %y) { ; CHECK-LABEL: vnsra_v2i64_v2i32_scalar_sext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vnsra.wx v8, v8, a0 ; CHECK-NEXT: ret %insert = insertelement <2 x i32> poison, i32 %y, i32 0 @@ -114,7 +114,7 @@ define <2 x i32> @vnsra_v2i64_v2i32_scalar_zext(<2 x i64> %x, i32 %y) { ; CHECK-LABEL: vnsra_v2i64_v2i32_scalar_zext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vnsra.wx v8, v8, a0 ; CHECK-NEXT: ret %insert = insertelement <2 x i32> poison, i32 %y, i32 0 @@ -128,7 +128,7 @@ define <8 x i8> @vnsra_v8i16_v8i8_imm(<8 x i16> %x) { ; CHECK-LABEL: vnsra_v8i16_v8i8_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 8 ; CHECK-NEXT: ret %a = ashr <8 x i16> %x, @@ -139,7 +139,7 @@ define <4 x i16> @vnsra_v4i32_v4i16_imm(<4 x i32> %x) { ; CHECK-LABEL: vnsra_v4i32_v4i16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 16 ; CHECK-NEXT: ret %a = ashr <4 x i32> %x, @@ -150,7 +150,7 @@ define <2 x i32> @vnsra_v2i64_v2i32_imm(<2 x i64> %x) { ; CHECK-LABEL: vnsra_v2i64_v2i32_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 31 ; CHECK-NEXT: ret %a = ashr <2 x i64> %x, @@ -161,7 +161,7 @@ define <8 x i8> @vnsrl_v8i16_v8i8_scalar(<8 x i16> %x, i16 %y) { ; CHECK-LABEL: vnsrl_v8i16_v8i8_scalar: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vnsrl.wx v8, v8, a0 ; CHECK-NEXT: ret %insert = insertelement <8 x i16> poison, i16 %y, i16 0 @@ -174,7 +174,7 @@ define <8 x i8> @vnsrl_v8i16_v8i8_scalar_sext(<8 x i16> %x, i8 %y) { ; CHECK-LABEL: vnsrl_v8i16_v8i8_scalar_sext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vnsrl.wx v8, v8, a0 ; CHECK-NEXT: ret %insert = insertelement <8 x i8> poison, i8 %y, i16 0 @@ -188,7 +188,7 @@ define <8 x i8> @vnsrl_v8i16_v8i8_scalar_zext(<8 x i16> %x, i8 %y) { ; CHECK-LABEL: vnsrl_v8i16_v8i8_scalar_zext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vnsrl.wx v8, v8, a0 ; CHECK-NEXT: ret %insert = insertelement <8 x i8> poison, i8 %y, i16 0 @@ -202,7 +202,7 @@ define <4 x i16> @vnsrl_v4i32_v4i16_scalar(<4 x i32> %x, i32 %y) { ; CHECK-LABEL: vnsrl_v4i32_v4i16_scalar: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wx v8, v8, a0 ; CHECK-NEXT: ret %insert = insertelement <4 x i32> poison, i32 %y, i32 0 @@ -215,7 +215,7 @@ define <4 x i16> @vnsrl_v4i32_v4i16_scalar_sext(<4 x i32> %x, i16 %y) { ; CHECK-LABEL: vnsrl_v4i32_v4i16_scalar_sext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wx v8, v8, a0 ; CHECK-NEXT: ret %insert = insertelement <4 x i16> poison, i16 %y, i16 0 @@ -229,7 +229,7 @@ define <4 x i16> @vnsrl_v4i32_v4i16_scalar_zext(<4 x i32> %x, i16 %y) { ; CHECK-LABEL: vnsrl_v4i32_v4i16_scalar_zext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wx v8, v8, a0 ; CHECK-NEXT: ret %insert = insertelement <4 x i16> poison, i16 %y, i16 0 @@ -243,7 +243,7 @@ define <2 x i32> @vnsrl_v2i64_v2i32_scalar(<2 x i64> %x, i64 %y) { ; CHECK-LABEL: vnsrl_v2i64_v2i32_scalar: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vnsrl.wx v8, v8, a0 ; CHECK-NEXT: ret %insert = insertelement <2 x i64> poison, i64 %y, i32 0 @@ -256,7 +256,7 @@ define <2 x i32> @vnsrl_v2i64_v2i32_scalar_sext(<2 x i64> %x, i32 %y) { ; CHECK-LABEL: vnsrl_v2i64_v2i32_scalar_sext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vnsrl.wx v8, v8, a0 ; CHECK-NEXT: ret %insert = insertelement <2 x i32> poison, i32 %y, i32 0 @@ -270,7 +270,7 @@ define <2 x i32> @vnsrl_v2i64_v2i32_scalar_zext(<2 x i64> %x, i32 %y) { ; CHECK-LABEL: vnsrl_v2i64_v2i32_scalar_zext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vnsrl.wx v8, v8, a0 ; CHECK-NEXT: ret %insert = insertelement <2 x i32> poison, i32 %y, i32 0 @@ -284,7 +284,7 @@ define <8 x i8> @vnsrl_v8i16_v8i8_imm(<8 x i16> %x) { ; CHECK-LABEL: vnsrl_v8i16_v8i8_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 8 ; CHECK-NEXT: ret %a = lshr <8 x i16> %x, @@ -295,7 +295,7 @@ define <4 x i16> @vnsrl_v4i32_v4i16_imm(<4 x i32> %x) { ; CHECK-LABEL: vnsrl_v4i32_v4i16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 16 ; CHECK-NEXT: ret %a = lshr <4 x i32> %x, @@ -306,7 +306,7 @@ define <2 x i32> @vnsrl_v2i64_v2i32_imm(<2 x i64> %x) { ; CHECK-LABEL: vnsrl_v2i64_v2i32_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 31 ; CHECK-NEXT: ret %a = lshr <2 x i64> %x, diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vor-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vor-vp.ll @@ -31,7 +31,7 @@ define <2 x i8> @vor_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -55,7 +55,7 @@ define <2 x i8> @vor_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_v2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0 @@ -81,7 +81,7 @@ define <2 x i8> @vor_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_v2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 5, i32 0 @@ -107,7 +107,7 @@ define <4 x i8> @vor_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -143,7 +143,7 @@ define <4 x i8> @vor_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_v4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0 @@ -169,7 +169,7 @@ define <4 x i8> @vor_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_v4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 5, i32 0 @@ -195,7 +195,7 @@ define <7 x i8> @vor_vv_v5i8_unmasked(<7 x i8> %va, <7 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v5i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <7 x i1> poison, i1 true, i32 0 @@ -219,7 +219,7 @@ define <7 x i8> @vor_vx_v5i8_unmasked(<7 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_v5i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <7 x i8> poison, i8 %b, i32 0 @@ -245,7 +245,7 @@ define <7 x i8> @vor_vi_v5i8_unmasked(<7 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_v5i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement <7 x i8> poison, i8 5, i32 0 @@ -271,7 +271,7 @@ define <8 x i8> @vor_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -295,7 +295,7 @@ define <8 x i8> @vor_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_v8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 @@ -321,7 +321,7 @@ define <8 x i8> @vor_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_v8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 5, i32 0 @@ -347,7 +347,7 @@ define <16 x i8> @vor_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -371,7 +371,7 @@ define <16 x i8> @vor_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_v16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0 @@ -397,7 +397,7 @@ define <16 x i8> @vor_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_v16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 5, i32 0 @@ -423,7 +423,7 @@ define <2 x i16> @vor_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -447,7 +447,7 @@ define <2 x i16> @vor_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_v2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0 @@ -473,7 +473,7 @@ define <2 x i16> @vor_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_v2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 5, i32 0 @@ -499,7 +499,7 @@ define <4 x i16> @vor_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -523,7 +523,7 @@ define <4 x i16> @vor_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_v4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0 @@ -549,7 +549,7 @@ define <4 x i16> @vor_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_v4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 5, i32 0 @@ -575,7 +575,7 @@ define <8 x i16> @vor_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -599,7 +599,7 @@ define <8 x i16> @vor_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_v8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0 @@ -625,7 +625,7 @@ define <8 x i16> @vor_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_v8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 5, i32 0 @@ -651,7 +651,7 @@ define <16 x i16> @vor_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -675,7 +675,7 @@ define <16 x i16> @vor_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_v16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0 @@ -701,7 +701,7 @@ define <16 x i16> @vor_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_v16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 5, i32 0 @@ -727,7 +727,7 @@ define <2 x i32> @vor_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -751,7 +751,7 @@ define <2 x i32> @vor_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_v2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0 @@ -777,7 +777,7 @@ define <2 x i32> @vor_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_v2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 5, i32 0 @@ -803,7 +803,7 @@ define <4 x i32> @vor_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -827,7 +827,7 @@ define <4 x i32> @vor_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_v4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0 @@ -853,7 +853,7 @@ define <4 x i32> @vor_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_v4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 5, i32 0 @@ -879,7 +879,7 @@ define <8 x i32> @vor_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -903,7 +903,7 @@ define <8 x i32> @vor_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_v8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 @@ -929,7 +929,7 @@ define <8 x i32> @vor_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_v8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 5, i32 0 @@ -955,7 +955,7 @@ define <16 x i32> @vor_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -979,7 +979,7 @@ define <16 x i32> @vor_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_v16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0 @@ -1005,7 +1005,7 @@ define <16 x i32> @vor_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_v16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 5, i32 0 @@ -1031,7 +1031,7 @@ define <2 x i64> @vor_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -1048,7 +1048,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vor.vv v8, v8, v9, v0.t @@ -1074,16 +1074,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vor.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vor_vx_v2i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vor.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0 @@ -1109,7 +1109,7 @@ define <2 x i64> @vor_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_v2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 5, i32 0 @@ -1135,7 +1135,7 @@ define <4 x i64> @vor_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v4i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -1152,7 +1152,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vor.vv v8, v8, v10, v0.t @@ -1178,16 +1178,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vor.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vor_vx_v4i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vor.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0 @@ -1213,7 +1213,7 @@ define <4 x i64> @vor_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_v4i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 5, i32 0 @@ -1239,7 +1239,7 @@ define <8 x i64> @vor_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v8i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -1256,7 +1256,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vor.vv v8, v8, v12, v0.t @@ -1282,16 +1282,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vor.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vor_vx_v8i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vor.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 @@ -1317,7 +1317,7 @@ define <8 x i64> @vor_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_v8i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 5, i32 0 @@ -1343,7 +1343,7 @@ define <16 x i64> @vor_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_v16i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -1360,7 +1360,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vor.vv v8, v8, v16, v0.t @@ -1386,16 +1386,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vor.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vor_vx_v16i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vor.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0 @@ -1421,7 +1421,7 @@ define <16 x i64> @vor_vi_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_v16i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 5, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll @@ -29,7 +29,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; RV32-NEXT: vsext.vf2 v8, v9 ; RV32-NEXT: ret ; @@ -37,7 +37,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t -; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; RV64-NEXT: vsext.vf2 v8, v9 ; RV64-NEXT: ret %v = call <2 x i8> @llvm.vp.gather.v2i8.v2p0i8(<2 x i8*> %ptrs, <2 x i1> %m, i32 %evl) @@ -50,7 +50,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; RV32-NEXT: vzext.vf2 v8, v9 ; RV32-NEXT: ret ; @@ -58,7 +58,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t -; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; RV64-NEXT: vzext.vf2 v8, v9 ; RV64-NEXT: ret %v = call <2 x i8> @llvm.vp.gather.v2i8.v2p0i8(<2 x i8*> %ptrs, <2 x i1> %m, i32 %evl) @@ -71,7 +71,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV32-NEXT: vsext.vf4 v8, v9 ; RV32-NEXT: ret ; @@ -79,7 +79,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-NEXT: vsext.vf4 v8, v9 ; RV64-NEXT: ret %v = call <2 x i8> @llvm.vp.gather.v2i8.v2p0i8(<2 x i8*> %ptrs, <2 x i1> %m, i32 %evl) @@ -92,7 +92,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV32-NEXT: vzext.vf4 v8, v9 ; RV32-NEXT: ret ; @@ -100,7 +100,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-NEXT: vzext.vf4 v8, v9 ; RV64-NEXT: ret %v = call <2 x i8> @llvm.vp.gather.v2i8.v2p0i8(<2 x i8*> %ptrs, <2 x i1> %m, i32 %evl) @@ -113,7 +113,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vsext.vf8 v8, v9 ; RV32-NEXT: ret ; @@ -121,7 +121,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t -; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vsext.vf8 v8, v9 ; RV64-NEXT: ret %v = call <2 x i8> @llvm.vp.gather.v2i8.v2p0i8(<2 x i8*> %ptrs, <2 x i1> %m, i32 %evl) @@ -134,7 +134,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vzext.vf8 v8, v9 ; RV32-NEXT: ret ; @@ -142,7 +142,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t -; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vzext.vf8 v8, v9 ; RV64-NEXT: ret %v = call <2 x i8> @llvm.vp.gather.v2i8.v2p0i8(<2 x i8*> %ptrs, <2 x i1> %m, i32 %evl) @@ -173,14 +173,14 @@ define <3 x i8> @vpgather_truemask_v3i8(<3 x i8*> %ptrs, i32 zeroext %evl) { ; RV32-LABEL: vpgather_truemask_v3i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; RV32-NEXT: vluxei32.v v9, (zero), v8 ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_truemask_v3i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; RV64-NEXT: vluxei64.v v10, (zero), v8 ; RV64-NEXT: vmv1r.v v8, v10 ; RV64-NEXT: ret @@ -213,14 +213,14 @@ define <4 x i8> @vpgather_truemask_v4i8(<4 x i8*> %ptrs, i32 zeroext %evl) { ; RV32-LABEL: vpgather_truemask_v4i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; RV32-NEXT: vluxei32.v v9, (zero), v8 ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_truemask_v4i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; RV64-NEXT: vluxei64.v v10, (zero), v8 ; RV64-NEXT: vmv1r.v v8, v10 ; RV64-NEXT: ret @@ -253,7 +253,7 @@ define <8 x i8> @vpgather_baseidx_v8i8(i8* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_v8i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v10, v8 ; RV32-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v10, v0.t @@ -261,7 +261,7 @@ ; ; RV64-LABEL: vpgather_baseidx_v8i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v12, v8 ; RV64-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t @@ -277,7 +277,7 @@ ; RV32-LABEL: vpgather_baseidx_v32i8: ; RV32: # %bb.0: ; RV32-NEXT: li a2, 32 -; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; RV32-NEXT: vsext.vf4 v16, v8 ; RV32-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t @@ -292,11 +292,11 @@ ; RV64-NEXT: # %bb.1: ; RV64-NEXT: mv a2, a3 ; RV64-NEXT: .LBB13_2: -; RV64-NEXT: vsetivli zero, 16, e8, m2, ta, mu +; RV64-NEXT: vsetivli zero, 16, e8, m2, ta, ma ; RV64-NEXT: vslidedown.vi v12, v8, 16 -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v12 -; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64-NEXT: vslidedown.vi v0, v10, 2 ; RV64-NEXT: vsetvli zero, a2, e8, m1, ta, mu ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t @@ -305,13 +305,13 @@ ; RV64-NEXT: # %bb.3: ; RV64-NEXT: li a1, 16 ; RV64-NEXT: .LBB13_4: -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; RV64-NEXT: vmv1r.v v0, v10 ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: li a0, 32 -; RV64-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; RV64-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; RV64-NEXT: vslideup.vi v8, v12, 16 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i8, i8* %base, <32 x i8> %idxs @@ -344,7 +344,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV32-NEXT: vsext.vf2 v8, v9 ; RV32-NEXT: ret ; @@ -352,7 +352,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-NEXT: vsext.vf2 v8, v9 ; RV64-NEXT: ret %v = call <2 x i16> @llvm.vp.gather.v2i16.v2p0i16(<2 x i16*> %ptrs, <2 x i1> %m, i32 %evl) @@ -365,7 +365,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV32-NEXT: vzext.vf2 v8, v9 ; RV32-NEXT: ret ; @@ -373,7 +373,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-NEXT: vzext.vf2 v8, v9 ; RV64-NEXT: ret %v = call <2 x i16> @llvm.vp.gather.v2i16.v2p0i16(<2 x i16*> %ptrs, <2 x i1> %m, i32 %evl) @@ -386,7 +386,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vsext.vf4 v8, v9 ; RV32-NEXT: ret ; @@ -394,7 +394,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t -; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vsext.vf4 v8, v9 ; RV64-NEXT: ret %v = call <2 x i16> @llvm.vp.gather.v2i16.v2p0i16(<2 x i16*> %ptrs, <2 x i1> %m, i32 %evl) @@ -407,7 +407,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vzext.vf4 v8, v9 ; RV32-NEXT: ret ; @@ -415,7 +415,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t -; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vzext.vf4 v8, v9 ; RV64-NEXT: ret %v = call <2 x i16> @llvm.vp.gather.v2i16.v2p0i16(<2 x i16*> %ptrs, <2 x i1> %m, i32 %evl) @@ -446,14 +446,14 @@ define <4 x i16> @vpgather_truemask_v4i16(<4 x i16*> %ptrs, i32 zeroext %evl) { ; RV32-LABEL: vpgather_truemask_v4i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; RV32-NEXT: vluxei32.v v9, (zero), v8 ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_truemask_v4i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; RV64-NEXT: vluxei64.v v10, (zero), v8 ; RV64-NEXT: vmv1r.v v8, v10 ; RV64-NEXT: ret @@ -486,7 +486,7 @@ define <8 x i16> @vpgather_baseidx_v8i8_v8i16(i16* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_v8i8_v8i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v10, v8 ; RV32-NEXT: vadd.vv v10, v10, v10 ; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu @@ -495,7 +495,7 @@ ; ; RV64-LABEL: vpgather_baseidx_v8i8_v8i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v12, v8 ; RV64-NEXT: vadd.vv v12, v12, v12 ; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu @@ -509,7 +509,7 @@ define <8 x i16> @vpgather_baseidx_sext_v8i8_v8i16(i16* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_sext_v8i8_v8i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v10, v8 ; RV32-NEXT: vadd.vv v10, v10, v10 ; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu @@ -518,7 +518,7 @@ ; ; RV64-LABEL: vpgather_baseidx_sext_v8i8_v8i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v12, v8 ; RV64-NEXT: vadd.vv v12, v12, v12 ; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu @@ -533,7 +533,7 @@ define <8 x i16> @vpgather_baseidx_zext_v8i8_v8i16(i16* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_zext_v8i8_v8i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vzext.vf4 v10, v8 ; RV32-NEXT: vadd.vv v10, v10, v10 ; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu @@ -542,7 +542,7 @@ ; ; RV64-LABEL: vpgather_baseidx_zext_v8i8_v8i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vzext.vf8 v12, v8 ; RV64-NEXT: vadd.vv v12, v12, v12 ; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu @@ -557,7 +557,7 @@ define <8 x i16> @vpgather_baseidx_v8i16(i16* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_v8i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf2 v10, v8 ; RV32-NEXT: vadd.vv v10, v10, v10 ; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu @@ -566,7 +566,7 @@ ; ; RV64-LABEL: vpgather_baseidx_v8i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf4 v12, v8 ; RV64-NEXT: vadd.vv v12, v12, v12 ; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu @@ -601,7 +601,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vsext.vf2 v8, v9 ; RV32-NEXT: ret ; @@ -609,7 +609,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t -; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vsext.vf2 v8, v9 ; RV64-NEXT: ret %v = call <2 x i32> @llvm.vp.gather.v2i32.v2p0i32(<2 x i32*> %ptrs, <2 x i1> %m, i32 %evl) @@ -622,7 +622,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vzext.vf2 v8, v9 ; RV32-NEXT: ret ; @@ -630,7 +630,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t -; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vzext.vf2 v8, v9 ; RV64-NEXT: ret %v = call <2 x i32> @llvm.vp.gather.v2i32.v2p0i32(<2 x i32*> %ptrs, <2 x i1> %m, i32 %evl) @@ -660,13 +660,13 @@ define <4 x i32> @vpgather_truemask_v4i32(<4 x i32*> %ptrs, i32 zeroext %evl) { ; RV32-LABEL: vpgather_truemask_v4i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; RV32-NEXT: vluxei32.v v8, (zero), v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_truemask_v4i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; RV64-NEXT: vluxei64.v v10, (zero), v8 ; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret @@ -698,7 +698,7 @@ define <8 x i32> @vpgather_baseidx_v8i8_v8i32(i32* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_v8i8_v8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v10, v8 ; RV32-NEXT: vsll.vi v8, v10, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu @@ -707,7 +707,7 @@ ; ; RV64-LABEL: vpgather_baseidx_v8i8_v8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v12, v8 ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu @@ -721,7 +721,7 @@ define <8 x i32> @vpgather_baseidx_sext_v8i8_v8i32(i32* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_sext_v8i8_v8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v10, v8 ; RV32-NEXT: vsll.vi v8, v10, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu @@ -730,7 +730,7 @@ ; ; RV64-LABEL: vpgather_baseidx_sext_v8i8_v8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v12, v8 ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu @@ -745,7 +745,7 @@ define <8 x i32> @vpgather_baseidx_zext_v8i8_v8i32(i32* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_zext_v8i8_v8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vzext.vf4 v10, v8 ; RV32-NEXT: vsll.vi v8, v10, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu @@ -754,7 +754,7 @@ ; ; RV64-LABEL: vpgather_baseidx_zext_v8i8_v8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vzext.vf8 v12, v8 ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu @@ -769,7 +769,7 @@ define <8 x i32> @vpgather_baseidx_v8i16_v8i32(i32* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_v8i16_v8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf2 v10, v8 ; RV32-NEXT: vsll.vi v8, v10, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu @@ -778,7 +778,7 @@ ; ; RV64-LABEL: vpgather_baseidx_v8i16_v8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf4 v12, v8 ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu @@ -792,7 +792,7 @@ define <8 x i32> @vpgather_baseidx_sext_v8i16_v8i32(i32* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_sext_v8i16_v8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf2 v10, v8 ; RV32-NEXT: vsll.vi v8, v10, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu @@ -801,7 +801,7 @@ ; ; RV64-LABEL: vpgather_baseidx_sext_v8i16_v8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf4 v12, v8 ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu @@ -816,7 +816,7 @@ define <8 x i32> @vpgather_baseidx_zext_v8i16_v8i32(i32* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_zext_v8i16_v8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vzext.vf2 v10, v8 ; RV32-NEXT: vsll.vi v8, v10, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu @@ -825,7 +825,7 @@ ; ; RV64-LABEL: vpgather_baseidx_zext_v8i16_v8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vzext.vf4 v12, v8 ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu @@ -840,7 +840,7 @@ define <8 x i32> @vpgather_baseidx_v8i32(i32* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_v8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsll.vi v8, v8, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t @@ -848,7 +848,7 @@ ; ; RV64-LABEL: vpgather_baseidx_v8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf2 v12, v8 ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu @@ -900,14 +900,14 @@ define <4 x i64> @vpgather_truemask_v4i64(<4 x i64*> %ptrs, i32 zeroext %evl) { ; RV32-LABEL: vpgather_truemask_v4i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; RV32-NEXT: vluxei32.v v10, (zero), v8 ; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_truemask_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; RV64-NEXT: vluxei64.v v8, (zero), v8 ; RV64-NEXT: ret %mhead = insertelement <4 x i1> poison, i1 1, i32 0 @@ -938,7 +938,7 @@ define <8 x i64> @vpgather_baseidx_v8i8_v8i64(i64* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_v8i8_v8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v10, v8 ; RV32-NEXT: vsll.vi v12, v10, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu @@ -947,7 +947,7 @@ ; ; RV64-LABEL: vpgather_baseidx_v8i8_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v12, v8 ; RV64-NEXT: vsll.vi v8, v12, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu @@ -961,7 +961,7 @@ define <8 x i64> @vpgather_baseidx_sext_v8i8_v8i64(i64* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_sext_v8i8_v8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v10, v8 ; RV32-NEXT: vsll.vi v12, v10, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu @@ -970,7 +970,7 @@ ; ; RV64-LABEL: vpgather_baseidx_sext_v8i8_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v12, v8 ; RV64-NEXT: vsll.vi v8, v12, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu @@ -985,7 +985,7 @@ define <8 x i64> @vpgather_baseidx_zext_v8i8_v8i64(i64* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_zext_v8i8_v8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vzext.vf4 v10, v8 ; RV32-NEXT: vsll.vi v12, v10, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu @@ -994,7 +994,7 @@ ; ; RV64-LABEL: vpgather_baseidx_zext_v8i8_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vzext.vf8 v12, v8 ; RV64-NEXT: vsll.vi v8, v12, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu @@ -1009,7 +1009,7 @@ define <8 x i64> @vpgather_baseidx_v8i16_v8i64(i64* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_v8i16_v8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf2 v10, v8 ; RV32-NEXT: vsll.vi v12, v10, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu @@ -1018,7 +1018,7 @@ ; ; RV64-LABEL: vpgather_baseidx_v8i16_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf4 v12, v8 ; RV64-NEXT: vsll.vi v8, v12, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu @@ -1032,7 +1032,7 @@ define <8 x i64> @vpgather_baseidx_sext_v8i16_v8i64(i64* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_sext_v8i16_v8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf2 v10, v8 ; RV32-NEXT: vsll.vi v12, v10, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu @@ -1041,7 +1041,7 @@ ; ; RV64-LABEL: vpgather_baseidx_sext_v8i16_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf4 v12, v8 ; RV64-NEXT: vsll.vi v8, v12, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu @@ -1056,7 +1056,7 @@ define <8 x i64> @vpgather_baseidx_zext_v8i16_v8i64(i64* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_zext_v8i16_v8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vzext.vf2 v10, v8 ; RV32-NEXT: vsll.vi v12, v10, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu @@ -1065,7 +1065,7 @@ ; ; RV64-LABEL: vpgather_baseidx_zext_v8i16_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vzext.vf4 v12, v8 ; RV64-NEXT: vsll.vi v8, v12, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu @@ -1080,7 +1080,7 @@ define <8 x i64> @vpgather_baseidx_v8i32_v8i64(i64* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_v8i32_v8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsll.vi v12, v8, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t @@ -1088,7 +1088,7 @@ ; ; RV64-LABEL: vpgather_baseidx_v8i32_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf2 v12, v8 ; RV64-NEXT: vsll.vi v8, v12, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu @@ -1102,7 +1102,7 @@ define <8 x i64> @vpgather_baseidx_sext_v8i32_v8i64(i64* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_sext_v8i32_v8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsll.vi v12, v8, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t @@ -1110,7 +1110,7 @@ ; ; RV64-LABEL: vpgather_baseidx_sext_v8i32_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf2 v12, v8 ; RV64-NEXT: vsll.vi v8, v12, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu @@ -1125,7 +1125,7 @@ define <8 x i64> @vpgather_baseidx_zext_v8i32_v8i64(i64* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_zext_v8i32_v8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsll.vi v12, v8, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t @@ -1133,7 +1133,7 @@ ; ; RV64-LABEL: vpgather_baseidx_zext_v8i32_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vzext.vf2 v12, v8 ; RV64-NEXT: vsll.vi v8, v12, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu @@ -1148,7 +1148,7 @@ define <8 x i64> @vpgather_baseidx_v8i64(i64* %base, <8 x i64> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_v8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vnsrl.wi v12, v8, 0 ; RV32-NEXT: vsll.vi v12, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu @@ -1157,7 +1157,7 @@ ; ; RV64-LABEL: vpgather_baseidx_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsll.vi v8, v8, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t @@ -1210,14 +1210,14 @@ define <4 x half> @vpgather_truemask_v4f16(<4 x half*> %ptrs, i32 zeroext %evl) { ; RV32-LABEL: vpgather_truemask_v4f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; RV32-NEXT: vluxei32.v v9, (zero), v8 ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_truemask_v4f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; RV64-NEXT: vluxei64.v v10, (zero), v8 ; RV64-NEXT: vmv1r.v v8, v10 ; RV64-NEXT: ret @@ -1250,7 +1250,7 @@ define <8 x half> @vpgather_baseidx_v8i8_v8f16(half* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_v8i8_v8f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v10, v8 ; RV32-NEXT: vadd.vv v10, v10, v10 ; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu @@ -1259,7 +1259,7 @@ ; ; RV64-LABEL: vpgather_baseidx_v8i8_v8f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v12, v8 ; RV64-NEXT: vadd.vv v12, v12, v12 ; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu @@ -1273,7 +1273,7 @@ define <8 x half> @vpgather_baseidx_sext_v8i8_v8f16(half* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_sext_v8i8_v8f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v10, v8 ; RV32-NEXT: vadd.vv v10, v10, v10 ; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu @@ -1282,7 +1282,7 @@ ; ; RV64-LABEL: vpgather_baseidx_sext_v8i8_v8f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v12, v8 ; RV64-NEXT: vadd.vv v12, v12, v12 ; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu @@ -1297,7 +1297,7 @@ define <8 x half> @vpgather_baseidx_zext_v8i8_v8f16(half* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_zext_v8i8_v8f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vzext.vf4 v10, v8 ; RV32-NEXT: vadd.vv v10, v10, v10 ; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu @@ -1306,7 +1306,7 @@ ; ; RV64-LABEL: vpgather_baseidx_zext_v8i8_v8f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vzext.vf8 v12, v8 ; RV64-NEXT: vadd.vv v12, v12, v12 ; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu @@ -1321,7 +1321,7 @@ define <8 x half> @vpgather_baseidx_v8f16(half* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_v8f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf2 v10, v8 ; RV32-NEXT: vadd.vv v10, v10, v10 ; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu @@ -1330,7 +1330,7 @@ ; ; RV64-LABEL: vpgather_baseidx_v8f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf4 v12, v8 ; RV64-NEXT: vadd.vv v12, v12, v12 ; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu @@ -1382,13 +1382,13 @@ define <4 x float> @vpgather_truemask_v4f32(<4 x float*> %ptrs, i32 zeroext %evl) { ; RV32-LABEL: vpgather_truemask_v4f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; RV32-NEXT: vluxei32.v v8, (zero), v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_truemask_v4f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; RV64-NEXT: vluxei64.v v10, (zero), v8 ; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret @@ -1420,7 +1420,7 @@ define <8 x float> @vpgather_baseidx_v8i8_v8f32(float* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_v8i8_v8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v10, v8 ; RV32-NEXT: vsll.vi v8, v10, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu @@ -1429,7 +1429,7 @@ ; ; RV64-LABEL: vpgather_baseidx_v8i8_v8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v12, v8 ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu @@ -1443,7 +1443,7 @@ define <8 x float> @vpgather_baseidx_sext_v8i8_v8f32(float* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_sext_v8i8_v8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v10, v8 ; RV32-NEXT: vsll.vi v8, v10, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu @@ -1452,7 +1452,7 @@ ; ; RV64-LABEL: vpgather_baseidx_sext_v8i8_v8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v12, v8 ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu @@ -1467,7 +1467,7 @@ define <8 x float> @vpgather_baseidx_zext_v8i8_v8f32(float* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_zext_v8i8_v8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vzext.vf4 v10, v8 ; RV32-NEXT: vsll.vi v8, v10, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu @@ -1476,7 +1476,7 @@ ; ; RV64-LABEL: vpgather_baseidx_zext_v8i8_v8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vzext.vf8 v12, v8 ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu @@ -1491,7 +1491,7 @@ define <8 x float> @vpgather_baseidx_v8i16_v8f32(float* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_v8i16_v8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf2 v10, v8 ; RV32-NEXT: vsll.vi v8, v10, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu @@ -1500,7 +1500,7 @@ ; ; RV64-LABEL: vpgather_baseidx_v8i16_v8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf4 v12, v8 ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu @@ -1514,7 +1514,7 @@ define <8 x float> @vpgather_baseidx_sext_v8i16_v8f32(float* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_sext_v8i16_v8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf2 v10, v8 ; RV32-NEXT: vsll.vi v8, v10, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu @@ -1523,7 +1523,7 @@ ; ; RV64-LABEL: vpgather_baseidx_sext_v8i16_v8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf4 v12, v8 ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu @@ -1538,7 +1538,7 @@ define <8 x float> @vpgather_baseidx_zext_v8i16_v8f32(float* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_zext_v8i16_v8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vzext.vf2 v10, v8 ; RV32-NEXT: vsll.vi v8, v10, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu @@ -1547,7 +1547,7 @@ ; ; RV64-LABEL: vpgather_baseidx_zext_v8i16_v8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vzext.vf4 v12, v8 ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu @@ -1562,7 +1562,7 @@ define <8 x float> @vpgather_baseidx_v8f32(float* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_v8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsll.vi v8, v8, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t @@ -1570,7 +1570,7 @@ ; ; RV64-LABEL: vpgather_baseidx_v8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf2 v12, v8 ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu @@ -1622,14 +1622,14 @@ define <4 x double> @vpgather_truemask_v4f64(<4 x double*> %ptrs, i32 zeroext %evl) { ; RV32-LABEL: vpgather_truemask_v4f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; RV32-NEXT: vluxei32.v v10, (zero), v8 ; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_truemask_v4f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; RV64-NEXT: vluxei64.v v8, (zero), v8 ; RV64-NEXT: ret %mhead = insertelement <4 x i1> poison, i1 1, i32 0 @@ -1660,7 +1660,7 @@ define <8 x double> @vpgather_baseidx_v8i8_v8f64(double* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_v8i8_v8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v10, v8 ; RV32-NEXT: vsll.vi v12, v10, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu @@ -1669,7 +1669,7 @@ ; ; RV64-LABEL: vpgather_baseidx_v8i8_v8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v12, v8 ; RV64-NEXT: vsll.vi v8, v12, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu @@ -1683,7 +1683,7 @@ define <8 x double> @vpgather_baseidx_sext_v8i8_v8f64(double* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_sext_v8i8_v8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v10, v8 ; RV32-NEXT: vsll.vi v12, v10, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu @@ -1692,7 +1692,7 @@ ; ; RV64-LABEL: vpgather_baseidx_sext_v8i8_v8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v12, v8 ; RV64-NEXT: vsll.vi v8, v12, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu @@ -1707,7 +1707,7 @@ define <8 x double> @vpgather_baseidx_zext_v8i8_v8f64(double* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_zext_v8i8_v8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vzext.vf4 v10, v8 ; RV32-NEXT: vsll.vi v12, v10, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu @@ -1716,7 +1716,7 @@ ; ; RV64-LABEL: vpgather_baseidx_zext_v8i8_v8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vzext.vf8 v12, v8 ; RV64-NEXT: vsll.vi v8, v12, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu @@ -1731,7 +1731,7 @@ define <8 x double> @vpgather_baseidx_v8i16_v8f64(double* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_v8i16_v8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf2 v10, v8 ; RV32-NEXT: vsll.vi v12, v10, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu @@ -1740,7 +1740,7 @@ ; ; RV64-LABEL: vpgather_baseidx_v8i16_v8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf4 v12, v8 ; RV64-NEXT: vsll.vi v8, v12, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu @@ -1754,7 +1754,7 @@ define <8 x double> @vpgather_baseidx_sext_v8i16_v8f64(double* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_sext_v8i16_v8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf2 v10, v8 ; RV32-NEXT: vsll.vi v12, v10, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu @@ -1763,7 +1763,7 @@ ; ; RV64-LABEL: vpgather_baseidx_sext_v8i16_v8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf4 v12, v8 ; RV64-NEXT: vsll.vi v8, v12, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu @@ -1778,7 +1778,7 @@ define <8 x double> @vpgather_baseidx_zext_v8i16_v8f64(double* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_zext_v8i16_v8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vzext.vf2 v10, v8 ; RV32-NEXT: vsll.vi v12, v10, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu @@ -1787,7 +1787,7 @@ ; ; RV64-LABEL: vpgather_baseidx_zext_v8i16_v8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vzext.vf4 v12, v8 ; RV64-NEXT: vsll.vi v8, v12, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu @@ -1802,7 +1802,7 @@ define <8 x double> @vpgather_baseidx_v8i32_v8f64(double* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_v8i32_v8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsll.vi v12, v8, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t @@ -1810,7 +1810,7 @@ ; ; RV64-LABEL: vpgather_baseidx_v8i32_v8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf2 v12, v8 ; RV64-NEXT: vsll.vi v8, v12, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu @@ -1824,7 +1824,7 @@ define <8 x double> @vpgather_baseidx_sext_v8i32_v8f64(double* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_sext_v8i32_v8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsll.vi v12, v8, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t @@ -1832,7 +1832,7 @@ ; ; RV64-LABEL: vpgather_baseidx_sext_v8i32_v8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf2 v12, v8 ; RV64-NEXT: vsll.vi v8, v12, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu @@ -1847,7 +1847,7 @@ define <8 x double> @vpgather_baseidx_zext_v8i32_v8f64(double* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_zext_v8i32_v8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsll.vi v12, v8, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t @@ -1855,7 +1855,7 @@ ; ; RV64-LABEL: vpgather_baseidx_zext_v8i32_v8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vzext.vf2 v12, v8 ; RV64-NEXT: vsll.vi v8, v12, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu @@ -1870,7 +1870,7 @@ define <8 x double> @vpgather_baseidx_v8f64(double* %base, <8 x i64> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_v8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vnsrl.wi v12, v8, 0 ; RV32-NEXT: vsll.vi v12, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu @@ -1879,7 +1879,7 @@ ; ; RV64-LABEL: vpgather_baseidx_v8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsll.vi v8, v8, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t @@ -1901,9 +1901,9 @@ ; RV32-NEXT: # %bb.1: ; RV32-NEXT: mv a1, a2 ; RV32-NEXT: .LBB86_2: -; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; RV32-NEXT: vslidedown.vi v24, v8, 16 -; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV32-NEXT: vslidedown.vi v0, v1, 2 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (zero), v24, v0.t @@ -1927,7 +1927,7 @@ ; RV64-NEXT: # %bb.1: ; RV64-NEXT: mv a1, a2 ; RV64-NEXT: .LBB86_2: -; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64-NEXT: vslidedown.vi v0, v24, 2 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vluxei64.v v16, (zero), v16, v0.t @@ -1948,7 +1948,7 @@ ; RV32-LABEL: vpgather_baseidx_v32i8_v32f64: ; RV32: # %bb.0: ; RV32-NEXT: li a2, 32 -; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; RV32-NEXT: vsext.vf4 v16, v8 ; RV32-NEXT: li a3, 16 ; RV32-NEXT: vsll.vi v16, v16, 3 @@ -1965,9 +1965,9 @@ ; RV32-NEXT: # %bb.3: ; RV32-NEXT: mv a2, a3 ; RV32-NEXT: .LBB87_4: -; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV32-NEXT: vslidedown.vi v0, v0, 2 -; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; RV32-NEXT: vslidedown.vi v24, v16, 16 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t @@ -1977,9 +1977,9 @@ ; RV64: # %bb.0: ; RV64-NEXT: vmv1r.v v10, v0 ; RV64-NEXT: li a2, 0 -; RV64-NEXT: vsetivli zero, 16, e8, m2, ta, mu +; RV64-NEXT: vsetivli zero, 16, e8, m2, ta, ma ; RV64-NEXT: vslidedown.vi v12, v8, 16 -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v12 ; RV64-NEXT: vsll.vi v16, v16, 3 ; RV64-NEXT: addi a3, a1, -16 @@ -1989,7 +1989,7 @@ ; RV64-NEXT: mv a2, a3 ; RV64-NEXT: .LBB87_2: ; RV64-NEXT: vsll.vi v24, v24, 3 -; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64-NEXT: vslidedown.vi v0, v10, 2 ; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t @@ -2011,7 +2011,7 @@ ; RV32-LABEL: vpgather_baseidx_sext_v32i8_v32f64: ; RV32: # %bb.0: ; RV32-NEXT: li a2, 32 -; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; RV32-NEXT: vsext.vf4 v16, v8 ; RV32-NEXT: li a3, 16 ; RV32-NEXT: vsll.vi v16, v16, 3 @@ -2028,9 +2028,9 @@ ; RV32-NEXT: # %bb.3: ; RV32-NEXT: mv a2, a3 ; RV32-NEXT: .LBB88_4: -; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV32-NEXT: vslidedown.vi v0, v0, 2 -; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; RV32-NEXT: vslidedown.vi v24, v16, 16 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t @@ -2040,9 +2040,9 @@ ; RV64: # %bb.0: ; RV64-NEXT: vmv1r.v v10, v0 ; RV64-NEXT: li a2, 0 -; RV64-NEXT: vsetivli zero, 16, e8, m2, ta, mu +; RV64-NEXT: vsetivli zero, 16, e8, m2, ta, ma ; RV64-NEXT: vslidedown.vi v12, v8, 16 -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v12 ; RV64-NEXT: vsext.vf8 v24, v8 ; RV64-NEXT: addi a3, a1, -16 @@ -2052,7 +2052,7 @@ ; RV64-NEXT: mv a2, a3 ; RV64-NEXT: .LBB88_2: ; RV64-NEXT: vsll.vi v24, v24, 3 -; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64-NEXT: vslidedown.vi v0, v10, 2 ; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t @@ -2075,7 +2075,7 @@ ; RV32-LABEL: vpgather_baseidx_zext_v32i8_v32f64: ; RV32: # %bb.0: ; RV32-NEXT: li a2, 32 -; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; RV32-NEXT: vzext.vf4 v16, v8 ; RV32-NEXT: li a3, 16 ; RV32-NEXT: vsll.vi v16, v16, 3 @@ -2092,9 +2092,9 @@ ; RV32-NEXT: # %bb.3: ; RV32-NEXT: mv a2, a3 ; RV32-NEXT: .LBB89_4: -; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV32-NEXT: vslidedown.vi v0, v0, 2 -; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; RV32-NEXT: vslidedown.vi v24, v16, 16 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t @@ -2104,9 +2104,9 @@ ; RV64: # %bb.0: ; RV64-NEXT: vmv1r.v v10, v0 ; RV64-NEXT: li a2, 0 -; RV64-NEXT: vsetivli zero, 16, e8, m2, ta, mu +; RV64-NEXT: vsetivli zero, 16, e8, m2, ta, ma ; RV64-NEXT: vslidedown.vi v12, v8, 16 -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vzext.vf8 v16, v12 ; RV64-NEXT: vzext.vf8 v24, v8 ; RV64-NEXT: addi a3, a1, -16 @@ -2116,7 +2116,7 @@ ; RV64-NEXT: mv a2, a3 ; RV64-NEXT: .LBB89_2: ; RV64-NEXT: vsll.vi v24, v24, 3 -; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64-NEXT: vslidedown.vi v0, v10, 2 ; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t @@ -2139,7 +2139,7 @@ ; RV32-LABEL: vpgather_baseidx_v32i16_v32f64: ; RV32: # %bb.0: ; RV32-NEXT: li a2, 32 -; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; RV32-NEXT: vsext.vf2 v16, v8 ; RV32-NEXT: li a3, 16 ; RV32-NEXT: vsll.vi v16, v16, 3 @@ -2156,9 +2156,9 @@ ; RV32-NEXT: # %bb.3: ; RV32-NEXT: mv a2, a3 ; RV32-NEXT: .LBB90_4: -; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV32-NEXT: vslidedown.vi v0, v0, 2 -; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; RV32-NEXT: vslidedown.vi v24, v16, 16 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t @@ -2168,9 +2168,9 @@ ; RV64: # %bb.0: ; RV64-NEXT: vmv1r.v v12, v0 ; RV64-NEXT: li a2, 0 -; RV64-NEXT: vsetivli zero, 16, e16, m4, ta, mu +; RV64-NEXT: vsetivli zero, 16, e16, m4, ta, ma ; RV64-NEXT: vslidedown.vi v16, v8, 16 -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 ; RV64-NEXT: addi a3, a1, -16 @@ -2180,7 +2180,7 @@ ; RV64-NEXT: mv a2, a3 ; RV64-NEXT: .LBB90_2: ; RV64-NEXT: vsll.vi v24, v24, 3 -; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64-NEXT: vslidedown.vi v0, v12, 2 ; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t @@ -2202,7 +2202,7 @@ ; RV32-LABEL: vpgather_baseidx_sext_v32i16_v32f64: ; RV32: # %bb.0: ; RV32-NEXT: li a2, 32 -; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; RV32-NEXT: vsext.vf2 v16, v8 ; RV32-NEXT: li a3, 16 ; RV32-NEXT: vsll.vi v16, v16, 3 @@ -2219,9 +2219,9 @@ ; RV32-NEXT: # %bb.3: ; RV32-NEXT: mv a2, a3 ; RV32-NEXT: .LBB91_4: -; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV32-NEXT: vslidedown.vi v0, v0, 2 -; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; RV32-NEXT: vslidedown.vi v24, v16, 16 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t @@ -2231,9 +2231,9 @@ ; RV64: # %bb.0: ; RV64-NEXT: vmv1r.v v12, v0 ; RV64-NEXT: li a2, 0 -; RV64-NEXT: vsetivli zero, 16, e16, m4, ta, mu +; RV64-NEXT: vsetivli zero, 16, e16, m4, ta, ma ; RV64-NEXT: vslidedown.vi v16, v8, 16 -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v0, v16 ; RV64-NEXT: vsext.vf4 v24, v8 ; RV64-NEXT: addi a3, a1, -16 @@ -2243,7 +2243,7 @@ ; RV64-NEXT: mv a2, a3 ; RV64-NEXT: .LBB91_2: ; RV64-NEXT: vsll.vi v24, v24, 3 -; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64-NEXT: vslidedown.vi v0, v12, 2 ; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t @@ -2266,7 +2266,7 @@ ; RV32-LABEL: vpgather_baseidx_zext_v32i16_v32f64: ; RV32: # %bb.0: ; RV32-NEXT: li a2, 32 -; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; RV32-NEXT: vzext.vf2 v16, v8 ; RV32-NEXT: li a3, 16 ; RV32-NEXT: vsll.vi v16, v16, 3 @@ -2283,9 +2283,9 @@ ; RV32-NEXT: # %bb.3: ; RV32-NEXT: mv a2, a3 ; RV32-NEXT: .LBB92_4: -; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV32-NEXT: vslidedown.vi v0, v0, 2 -; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; RV32-NEXT: vslidedown.vi v24, v16, 16 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t @@ -2295,9 +2295,9 @@ ; RV64: # %bb.0: ; RV64-NEXT: vmv1r.v v12, v0 ; RV64-NEXT: li a2, 0 -; RV64-NEXT: vsetivli zero, 16, e16, m4, ta, mu +; RV64-NEXT: vsetivli zero, 16, e16, m4, ta, ma ; RV64-NEXT: vslidedown.vi v16, v8, 16 -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vzext.vf4 v0, v16 ; RV64-NEXT: vzext.vf4 v24, v8 ; RV64-NEXT: addi a3, a1, -16 @@ -2307,7 +2307,7 @@ ; RV64-NEXT: mv a2, a3 ; RV64-NEXT: .LBB92_2: ; RV64-NEXT: vsll.vi v24, v24, 3 -; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64-NEXT: vslidedown.vi v0, v12, 2 ; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t @@ -2330,7 +2330,7 @@ ; RV32-LABEL: vpgather_baseidx_v32i32_v32f64: ; RV32: # %bb.0: ; RV32-NEXT: li a2, 32 -; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; RV32-NEXT: li a3, 16 ; RV32-NEXT: vsll.vi v16, v8, 3 ; RV32-NEXT: mv a2, a1 @@ -2346,9 +2346,9 @@ ; RV32-NEXT: # %bb.3: ; RV32-NEXT: mv a2, a3 ; RV32-NEXT: .LBB93_4: -; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; RV32-NEXT: vslidedown.vi v24, v16, 16 -; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV32-NEXT: vslidedown.vi v0, v0, 2 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t @@ -2358,9 +2358,9 @@ ; RV64: # %bb.0: ; RV64-NEXT: vmv1r.v v24, v0 ; RV64-NEXT: li a2, 0 -; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; RV64-NEXT: vslidedown.vi v16, v8, 16 -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vsext.vf2 v0, v16 ; RV64-NEXT: vsll.vi v16, v0, 3 ; RV64-NEXT: addi a3, a1, -16 @@ -2370,7 +2370,7 @@ ; RV64-NEXT: mv a2, a3 ; RV64-NEXT: .LBB93_2: ; RV64-NEXT: vsll.vi v8, v0, 3 -; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64-NEXT: vslidedown.vi v0, v24, 2 ; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t @@ -2392,7 +2392,7 @@ ; RV32-LABEL: vpgather_baseidx_sext_v32i32_v32f64: ; RV32: # %bb.0: ; RV32-NEXT: li a2, 32 -; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; RV32-NEXT: li a3, 16 ; RV32-NEXT: vsll.vi v16, v8, 3 ; RV32-NEXT: mv a2, a1 @@ -2408,9 +2408,9 @@ ; RV32-NEXT: # %bb.3: ; RV32-NEXT: mv a2, a3 ; RV32-NEXT: .LBB94_4: -; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; RV32-NEXT: vslidedown.vi v24, v16, 16 -; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV32-NEXT: vslidedown.vi v0, v0, 2 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t @@ -2426,9 +2426,9 @@ ; RV64-NEXT: addi a2, sp, 16 ; RV64-NEXT: vs1r.v v0, (a2) # Unknown-size Folded Spill ; RV64-NEXT: li a2, 0 -; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; RV64-NEXT: vslidedown.vi v16, v8, 16 -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vsext.vf2 v24, v16 ; RV64-NEXT: vsext.vf2 v0, v8 ; RV64-NEXT: addi a3, a1, -16 @@ -2438,7 +2438,7 @@ ; RV64-NEXT: mv a2, a3 ; RV64-NEXT: .LBB94_2: ; RV64-NEXT: vsll.vi v8, v0, 3 -; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64-NEXT: addi a3, sp, 16 ; RV64-NEXT: vl1r.v v24, (a3) # Unknown-size Folded Reload ; RV64-NEXT: vslidedown.vi v0, v24, 2 @@ -2467,7 +2467,7 @@ ; RV32-LABEL: vpgather_baseidx_zext_v32i32_v32f64: ; RV32: # %bb.0: ; RV32-NEXT: li a2, 32 -; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; RV32-NEXT: li a3, 16 ; RV32-NEXT: vsll.vi v16, v8, 3 ; RV32-NEXT: mv a2, a1 @@ -2483,9 +2483,9 @@ ; RV32-NEXT: # %bb.3: ; RV32-NEXT: mv a2, a3 ; RV32-NEXT: .LBB95_4: -; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; RV32-NEXT: vslidedown.vi v24, v16, 16 -; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV32-NEXT: vslidedown.vi v0, v0, 2 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t @@ -2501,9 +2501,9 @@ ; RV64-NEXT: addi a2, sp, 16 ; RV64-NEXT: vs1r.v v0, (a2) # Unknown-size Folded Spill ; RV64-NEXT: li a2, 0 -; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; RV64-NEXT: vslidedown.vi v16, v8, 16 -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vzext.vf2 v24, v16 ; RV64-NEXT: vzext.vf2 v0, v8 ; RV64-NEXT: addi a3, a1, -16 @@ -2513,7 +2513,7 @@ ; RV64-NEXT: mv a2, a3 ; RV64-NEXT: .LBB95_2: ; RV64-NEXT: vsll.vi v8, v0, 3 -; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64-NEXT: addi a3, sp, 16 ; RV64-NEXT: vl1r.v v24, (a3) # Unknown-size Folded Reload ; RV64-NEXT: vslidedown.vi v0, v24, 2 @@ -2543,22 +2543,22 @@ ; RV32: # %bb.0: ; RV32-NEXT: vmv1r.v v1, v0 ; RV32-NEXT: li a2, 0 -; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; RV32-NEXT: vnsrl.wi v24, v16, 0 ; RV32-NEXT: vnsrl.wi v16, v8, 0 ; RV32-NEXT: li a3, 32 -; RV32-NEXT: vsetvli zero, a3, e32, m8, tu, mu +; RV32-NEXT: vsetvli zero, a3, e32, m8, tu, ma ; RV32-NEXT: vslideup.vi v16, v24, 16 -; RV32-NEXT: vsetvli zero, zero, e32, m8, ta, mu +; RV32-NEXT: vsetvli zero, zero, e32, m8, ta, ma ; RV32-NEXT: addi a3, a1, -16 ; RV32-NEXT: vsll.vi v24, v16, 3 ; RV32-NEXT: bltu a1, a3, .LBB96_2 ; RV32-NEXT: # %bb.1: ; RV32-NEXT: mv a2, a3 ; RV32-NEXT: .LBB96_2: -; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; RV32-NEXT: vslidedown.vi v8, v24, 16 -; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV32-NEXT: vslidedown.vi v0, v1, 2 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t @@ -2576,7 +2576,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vmv1r.v v24, v0 ; RV64-NEXT: li a2, 0 -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: addi a3, a1, -16 ; RV64-NEXT: vsll.vi v16, v16, 3 ; RV64-NEXT: bltu a1, a3, .LBB96_2 @@ -2584,7 +2584,7 @@ ; RV64-NEXT: mv a2, a3 ; RV64-NEXT: .LBB96_2: ; RV64-NEXT: vsll.vi v8, v8, 3 -; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64-NEXT: vslidedown.vi v0, v24, 2 ; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll @@ -43,7 +43,7 @@ define <4 x i8> @vpload_v4i8_allones_mask(<4 x i8>* %ptr, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v4i8_allones_mask: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: ret %a = insertelement <4 x i1> poison, i1 true, i32 0 @@ -103,7 +103,7 @@ define <8 x i16> @vpload_v8i16_allones_mask(<8 x i16>* %ptr, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v8i16_allones_mask: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: ret %a = insertelement <8 x i1> poison, i1 true, i32 0 @@ -151,7 +151,7 @@ define <6 x i32> @vpload_v6i32_allones_mask(<6 x i32>* %ptr, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v6i32_allones_mask: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: ret %a = insertelement <6 x i1> poison, i1 true, i32 0 @@ -175,7 +175,7 @@ define <8 x i32> @vpload_v8i32_allones_mask(<8 x i32>* %ptr, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v8i32_allones_mask: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: ret %a = insertelement <8 x i1> poison, i1 true, i32 0 @@ -211,7 +211,7 @@ define <4 x i64> @vpload_v4i64_allones_mask(<4 x i64>* %ptr, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v4i64_allones_mask: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: ret %a = insertelement <4 x i1> poison, i1 true, i32 0 @@ -247,7 +247,7 @@ define <2 x half> @vpload_v2f16_allones_mask(<2 x half>* %ptr, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v2f16_allones_mask: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: ret %a = insertelement <2 x i1> poison, i1 true, i32 0 @@ -319,7 +319,7 @@ define <8 x float> @vpload_v8f32_allones_mask(<8 x float>* %ptr, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v8f32_allones_mask: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: ret %a = insertelement <8 x i1> poison, i1 true, i32 0 @@ -355,7 +355,7 @@ define <4 x double> @vpload_v4f64_allones_mask(<4 x double>* %ptr, i32 zeroext %evl) { ; CHECK-LABEL: vpload_v4f64_allones_mask: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: ret %a = insertelement <4 x i1> poison, i1 true, i32 0 @@ -388,7 +388,7 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a2, a3 ; CHECK-NEXT: .LBB31_2: -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v0, v8, 2 ; CHECK-NEXT: addi a3, a0, 128 ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu @@ -426,7 +426,7 @@ ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a4, a5 ; CHECK-NEXT: .LBB32_4: -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v0, v8, 2 ; CHECK-NEXT: addi a5, a1, 128 ; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, mu @@ -442,7 +442,7 @@ ; CHECK-NEXT: # %bb.7: ; CHECK-NEXT: li a4, 16 ; CHECK-NEXT: .LBB32_8: -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v0, v8, 4 ; CHECK-NEXT: addi a5, a1, 256 ; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, mu @@ -454,13 +454,13 @@ ; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, mu ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vle64.v v8, (a1), v0.t -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vse64.v v8, (a0) ; CHECK-NEXT: addi a1, a0, 256 -; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, ma ; CHECK-NEXT: vse64.v v24, (a1) ; CHECK-NEXT: addi a0, a0, 128 -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vse64.v v16, (a0) ; CHECK-NEXT: ret %load = call <33 x double> @llvm.vp.load.v33f64.p0v33f64(<33 x double>* %ptr, <33 x i1> %m, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll @@ -9,7 +9,7 @@ define <4 x i1> @vpmerge_vv_v4i1(<4 x i1> %va, <4 x i1> %vb, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpmerge_vv_v4i1: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vid.v v10 ; RV32-NEXT: vmsltu.vx v10, v10, a0 ; RV32-NEXT: vmand.mm v9, v9, v10 @@ -20,7 +20,7 @@ ; ; RV64-LABEL: vpmerge_vv_v4i1: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-NEXT: vid.v v10 ; RV64-NEXT: vmsltu.vx v12, v10, a0 ; RV64-NEXT: vmand.mm v9, v9, v12 @@ -37,7 +37,7 @@ define <2 x i8> @vpmerge_vv_v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vmerge.vvm v9, v9, v8, v0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -48,7 +48,7 @@ define <2 x i8> @vpmerge_vx_v2i8(i8 %a, <2 x i8> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vx_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 %a, i32 0 @@ -60,7 +60,7 @@ define <2 x i8> @vpmerge_vi_v2i8(<2 x i8> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vi_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vmerge.vim v8, v8, 2, v0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 2, i32 0 @@ -74,7 +74,7 @@ define <4 x i8> @vpmerge_vv_v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vmerge.vvm v9, v9, v8, v0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -85,7 +85,7 @@ define <4 x i8> @vpmerge_vx_v4i8(i8 %a, <4 x i8> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vx_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 %a, i32 0 @@ -97,7 +97,7 @@ define <4 x i8> @vpmerge_vi_v4i8(<4 x i8> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vi_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vmerge.vim v8, v8, 2, v0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 2, i32 0 @@ -111,7 +111,7 @@ define <6 x i8> @vpmerge_vv_v6i8(<6 x i8> %va, <6 x i8> %vb, <6 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v6i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vmerge.vvm v9, v9, v8, v0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -122,7 +122,7 @@ define <6 x i8> @vpmerge_vx_v6i8(i8 %a, <6 x i8> %vb, <6 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vx_v6i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %elt.head = insertelement <6 x i8> poison, i8 %a, i32 0 @@ -134,7 +134,7 @@ define <6 x i8> @vpmerge_vi_v6i8(<6 x i8> %vb, <6 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vi_v6i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vmerge.vim v8, v8, 2, v0 ; CHECK-NEXT: ret %elt.head = insertelement <6 x i8> poison, i8 2, i32 0 @@ -148,7 +148,7 @@ define <8 x i7> @vpmerge_vv_v8i7(<8 x i7> %va, <8 x i7> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v8i7: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vmerge.vvm v9, v9, v8, v0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -159,7 +159,7 @@ define <8 x i7> @vpmerge_vx_v8i7(i7 %a, <8 x i7> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vx_v8i7: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i7> poison, i7 %a, i32 0 @@ -171,7 +171,7 @@ define <8 x i7> @vpmerge_vi_v8i7(<8 x i7> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vi_v8i7: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vmerge.vim v8, v8, 2, v0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i7> poison, i7 2, i32 0 @@ -185,7 +185,7 @@ define <8 x i8> @vpmerge_vv_v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vmerge.vvm v9, v9, v8, v0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -196,7 +196,7 @@ define <8 x i8> @vpmerge_vx_v8i8(i8 %a, <8 x i8> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vx_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 %a, i32 0 @@ -208,7 +208,7 @@ define <8 x i8> @vpmerge_vi_v8i8(<8 x i8> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vi_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vmerge.vim v8, v8, 2, v0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 2, i32 0 @@ -222,7 +222,7 @@ define <16 x i8> @vpmerge_vv_v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vmerge.vvm v9, v9, v8, v0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -233,7 +233,7 @@ define <16 x i8> @vpmerge_vx_v16i8(i8 %a, <16 x i8> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vx_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 %a, i32 0 @@ -245,7 +245,7 @@ define <16 x i8> @vpmerge_vi_v16i8(<16 x i8> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vi_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vmerge.vim v8, v8, 2, v0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 2, i32 0 @@ -259,7 +259,7 @@ define <2 x i16> @vpmerge_vv_v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vmerge.vvm v9, v9, v8, v0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -270,7 +270,7 @@ define <2 x i16> @vpmerge_vx_v2i16(i16 %a, <2 x i16> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vx_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 %a, i32 0 @@ -282,7 +282,7 @@ define <2 x i16> @vpmerge_vi_v2i16(<2 x i16> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vi_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vmerge.vim v8, v8, 2, v0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 2, i32 0 @@ -296,7 +296,7 @@ define <4 x i16> @vpmerge_vv_v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vmerge.vvm v9, v9, v8, v0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -307,7 +307,7 @@ define <4 x i16> @vpmerge_vx_v4i16(i16 %a, <4 x i16> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vx_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 %a, i32 0 @@ -319,7 +319,7 @@ define <4 x i16> @vpmerge_vi_v4i16(<4 x i16> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vi_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vmerge.vim v8, v8, 2, v0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 2, i32 0 @@ -333,7 +333,7 @@ define <8 x i16> @vpmerge_vv_v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vmerge.vvm v9, v9, v8, v0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -344,7 +344,7 @@ define <8 x i16> @vpmerge_vx_v8i16(i16 %a, <8 x i16> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vx_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 %a, i32 0 @@ -356,7 +356,7 @@ define <8 x i16> @vpmerge_vi_v8i16(<8 x i16> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vi_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vmerge.vim v8, v8, 2, v0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 2, i32 0 @@ -370,7 +370,7 @@ define <16 x i16> @vpmerge_vv_v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vmerge.vvm v10, v10, v8, v0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -381,7 +381,7 @@ define <16 x i16> @vpmerge_vx_v16i16(i16 %a, <16 x i16> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vx_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 %a, i32 0 @@ -393,7 +393,7 @@ define <16 x i16> @vpmerge_vi_v16i16(<16 x i16> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vi_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vmerge.vim v8, v8, 2, v0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 2, i32 0 @@ -407,7 +407,7 @@ define <2 x i32> @vpmerge_vv_v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vmerge.vvm v9, v9, v8, v0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -418,7 +418,7 @@ define <2 x i32> @vpmerge_vx_v2i32(i32 %a, <2 x i32> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vx_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 %a, i32 0 @@ -430,7 +430,7 @@ define <2 x i32> @vpmerge_vi_v2i32(<2 x i32> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vi_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vmerge.vim v8, v8, 2, v0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 2, i32 0 @@ -444,7 +444,7 @@ define <4 x i32> @vpmerge_vv_v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vmerge.vvm v9, v9, v8, v0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -455,7 +455,7 @@ define <4 x i32> @vpmerge_vx_v4i32(i32 %a, <4 x i32> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vx_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 %a, i32 0 @@ -467,7 +467,7 @@ define <4 x i32> @vpmerge_vi_v4i32(<4 x i32> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vi_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vmerge.vim v8, v8, 2, v0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 2, i32 0 @@ -481,7 +481,7 @@ define <8 x i32> @vpmerge_vv_v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vmerge.vvm v10, v10, v8, v0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -492,7 +492,7 @@ define <8 x i32> @vpmerge_vx_v8i32(i32 %a, <8 x i32> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vx_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 %a, i32 0 @@ -504,7 +504,7 @@ define <8 x i32> @vpmerge_vi_v8i32(<8 x i32> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vi_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vmerge.vim v8, v8, 2, v0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 2, i32 0 @@ -518,7 +518,7 @@ define <16 x i32> @vpmerge_vv_v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vmerge.vvm v12, v12, v8, v0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -529,7 +529,7 @@ define <16 x i32> @vpmerge_vx_v16i32(i32 %a, <16 x i32> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vx_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 %a, i32 0 @@ -541,7 +541,7 @@ define <16 x i32> @vpmerge_vi_v16i32(<16 x i32> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vi_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vmerge.vim v8, v8, 2, v0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 2, i32 0 @@ -555,7 +555,7 @@ define <2 x i64> @vpmerge_vv_v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vmerge.vvm v9, v9, v8, v0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -571,16 +571,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, ma ; RV32-NEXT: vmerge.vvm v8, v8, v9, v0 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vpmerge_vx_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma ; RV64-NEXT: vmerge.vxm v8, v8, a0, v0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 %a, i32 0 @@ -592,7 +592,7 @@ define <2 x i64> @vpmerge_vi_v2i64(<2 x i64> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vi_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vmerge.vim v8, v8, 2, v0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 2, i32 0 @@ -606,7 +606,7 @@ define <4 x i64> @vpmerge_vv_v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vmerge.vvm v10, v10, v8, v0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -622,16 +622,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, ma ; RV32-NEXT: vmerge.vvm v8, v8, v10, v0 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vpmerge_vx_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, ma ; RV64-NEXT: vmerge.vxm v8, v8, a0, v0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 %a, i32 0 @@ -643,7 +643,7 @@ define <4 x i64> @vpmerge_vi_v4i64(<4 x i64> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vi_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vmerge.vim v8, v8, 2, v0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 2, i32 0 @@ -657,7 +657,7 @@ define <8 x i64> @vpmerge_vv_v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vmerge.vvm v12, v12, v8, v0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -673,16 +673,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, ma ; RV32-NEXT: vmerge.vvm v8, v8, v12, v0 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vpmerge_vx_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, ma ; RV64-NEXT: vmerge.vxm v8, v8, a0, v0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 %a, i32 0 @@ -694,7 +694,7 @@ define <8 x i64> @vpmerge_vi_v8i64(<8 x i64> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vi_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vmerge.vim v8, v8, 2, v0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 2, i32 0 @@ -708,7 +708,7 @@ define <16 x i64> @vpmerge_vv_v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v16i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vmerge.vvm v16, v16, v8, v0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -724,16 +724,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m8, tu, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, tu, ma ; RV32-NEXT: vmerge.vvm v8, v8, v16, v0 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vpmerge_vx_v16i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, tu, ma ; RV64-NEXT: vmerge.vxm v8, v8, a0, v0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 %a, i32 0 @@ -745,7 +745,7 @@ define <16 x i64> @vpmerge_vi_v16i64(<16 x i64> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vi_v16i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vmerge.vim v8, v8, 2, v0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 2, i32 0 @@ -759,7 +759,7 @@ define <2 x half> @vpmerge_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vmerge.vvm v9, v9, v8, v0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -770,7 +770,7 @@ define <2 x half> @vpmerge_vf_v2f16(half %a, <2 x half> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vf_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x half> poison, half %a, i32 0 @@ -784,7 +784,7 @@ define <4 x half> @vpmerge_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vmerge.vvm v9, v9, v8, v0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -795,7 +795,7 @@ define <4 x half> @vpmerge_vf_v4f16(half %a, <4 x half> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vf_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x half> poison, half %a, i32 0 @@ -809,7 +809,7 @@ define <8 x half> @vpmerge_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vmerge.vvm v9, v9, v8, v0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -820,7 +820,7 @@ define <8 x half> @vpmerge_vf_v8f16(half %a, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vf_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x half> poison, half %a, i32 0 @@ -834,7 +834,7 @@ define <16 x half> @vpmerge_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vmerge.vvm v10, v10, v8, v0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -845,7 +845,7 @@ define <16 x half> @vpmerge_vf_v16f16(half %a, <16 x half> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vf_v16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x half> poison, half %a, i32 0 @@ -859,7 +859,7 @@ define <2 x float> @vpmerge_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vmerge.vvm v9, v9, v8, v0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -870,7 +870,7 @@ define <2 x float> @vpmerge_vf_v2f32(float %a, <2 x float> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vf_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x float> poison, float %a, i32 0 @@ -884,7 +884,7 @@ define <4 x float> @vpmerge_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vmerge.vvm v9, v9, v8, v0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -895,7 +895,7 @@ define <4 x float> @vpmerge_vf_v4f32(float %a, <4 x float> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vf_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x float> poison, float %a, i32 0 @@ -909,7 +909,7 @@ define <8 x float> @vpmerge_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vmerge.vvm v10, v10, v8, v0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -920,7 +920,7 @@ define <8 x float> @vpmerge_vf_v8f32(float %a, <8 x float> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vf_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x float> poison, float %a, i32 0 @@ -934,7 +934,7 @@ define <16 x float> @vpmerge_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vmerge.vvm v12, v12, v8, v0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -945,7 +945,7 @@ define <16 x float> @vpmerge_vf_v16f32(float %a, <16 x float> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vf_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x float> poison, float %a, i32 0 @@ -959,7 +959,7 @@ define <2 x double> @vpmerge_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vmerge.vvm v9, v9, v8, v0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -970,7 +970,7 @@ define <2 x double> @vpmerge_vf_v2f64(double %a, <2 x double> %vb, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vf_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x double> poison, double %a, i32 0 @@ -984,7 +984,7 @@ define <4 x double> @vpmerge_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vmerge.vvm v10, v10, v8, v0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -995,7 +995,7 @@ define <4 x double> @vpmerge_vf_v4f64(double %a, <4 x double> %vb, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vf_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x double> poison, double %a, i32 0 @@ -1009,7 +1009,7 @@ define <8 x double> @vpmerge_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vmerge.vvm v12, v12, v8, v0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -1020,7 +1020,7 @@ define <8 x double> @vpmerge_vf_v8f64(double %a, <8 x double> %vb, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vf_v8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x double> poison, double %a, i32 0 @@ -1034,7 +1034,7 @@ define <16 x double> @vpmerge_vv_v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_v16f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vmerge.vvm v16, v16, v8, v0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -1045,7 +1045,7 @@ define <16 x double> @vpmerge_vf_v16f64(double %a, <16 x double> %vb, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vf_v16f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x double> poison, double %a, i32 0 @@ -1066,7 +1066,7 @@ ; RV32-NEXT: mul a1, a1, a3 ; RV32-NEXT: sub sp, sp, a1 ; RV32-NEXT: addi a1, a0, 128 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vle64.v v24, (a1) ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 3 @@ -1088,9 +1088,9 @@ ; RV32-NEXT: mv a1, a3 ; RV32-NEXT: .LBB79_2: ; RV32-NEXT: vle64.v v8, (a0) -; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV32-NEXT: vslidedown.vi v0, v1, 2 -; RV32-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, tu, ma ; RV32-NEXT: li a0, 16 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 4 @@ -1107,7 +1107,7 @@ ; RV32-NEXT: # %bb.3: ; RV32-NEXT: li a2, 16 ; RV32-NEXT: .LBB79_4: -; RV32-NEXT: vsetvli zero, a2, e64, m8, tu, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, tu, ma ; RV32-NEXT: vmv1r.v v0, v1 ; RV32-NEXT: addi a0, sp, 16 ; RV32-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload @@ -1127,7 +1127,7 @@ ; RV64-NEXT: slli a1, a1, 4 ; RV64-NEXT: sub sp, sp, a1 ; RV64-NEXT: addi a1, a0, 128 -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vle64.v v24, (a1) ; RV64-NEXT: addi a3, a2, -16 ; RV64-NEXT: addi a1, sp, 16 @@ -1144,9 +1144,9 @@ ; RV64-NEXT: mv a1, a3 ; RV64-NEXT: .LBB79_2: ; RV64-NEXT: vle64.v v8, (a0) -; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64-NEXT: vslidedown.vi v0, v1, 2 -; RV64-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, tu, ma ; RV64-NEXT: li a0, 16 ; RV64-NEXT: addi a1, sp, 16 ; RV64-NEXT: vl8re8.v v16, (a1) # Unknown-size Folded Reload @@ -1155,7 +1155,7 @@ ; RV64-NEXT: # %bb.3: ; RV64-NEXT: li a2, 16 ; RV64-NEXT: .LBB79_4: -; RV64-NEXT: vsetvli zero, a2, e64, m8, tu, mu +; RV64-NEXT: vsetvli zero, a2, e64, m8, tu, ma ; RV64-NEXT: vmv1r.v v0, v1 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 3 @@ -1183,16 +1183,16 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a1, a2 ; CHECK-NEXT: .LBB80_2: -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v0, v24, 2 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, ma ; CHECK-NEXT: li a1, 16 ; CHECK-NEXT: vfmerge.vfm v16, v16, fa0, v0 ; CHECK-NEXT: bltu a0, a1, .LBB80_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: .LBB80_4: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll @@ -9,13 +9,13 @@ define void @vpscatter_v2i8(<2 x i8> %val, <2 x i8*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v2i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v2i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.v2i8.v2p0i8(<2 x i8> %val, <2 x i8*> %ptrs, <2 x i1> %m, i32 %evl) @@ -25,17 +25,17 @@ define void @vpscatter_v2i16_truncstore_v2i8(<2 x i16> %val, <2 x i8*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v2i16_truncstore_v2i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; RV32-NEXT: vnsrl.wi v8, v8, 0 -; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v2i16_truncstore_v2i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; RV64-NEXT: vnsrl.wi v8, v8, 0 -; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret %tval = trunc <2 x i16> %val to <2 x i8> @@ -46,21 +46,21 @@ define void @vpscatter_v2i32_truncstore_v2i8(<2 x i32> %val, <2 x i8*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v2i32_truncstore_v2i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; RV32-NEXT: vnsrl.wi v8, v8, 0 -; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; RV32-NEXT: vnsrl.wi v8, v8, 0 -; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v2i32_truncstore_v2i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; RV64-NEXT: vnsrl.wi v8, v8, 0 -; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; RV64-NEXT: vnsrl.wi v8, v8, 0 -; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret %tval = trunc <2 x i32> %val to <2 x i8> @@ -71,25 +71,25 @@ define void @vpscatter_v2i64_truncstore_v2i8(<2 x i64> %val, <2 x i8*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v2i64_truncstore_v2i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV32-NEXT: vnsrl.wi v8, v8, 0 -; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; RV32-NEXT: vnsrl.wi v8, v8, 0 -; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; RV32-NEXT: vnsrl.wi v8, v8, 0 -; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v2i64_truncstore_v2i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-NEXT: vnsrl.wi v8, v8, 0 -; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; RV64-NEXT: vnsrl.wi v8, v8, 0 -; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; RV64-NEXT: vnsrl.wi v8, v8, 0 -; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret %tval = trunc <2 x i64> %val to <2 x i8> @@ -102,13 +102,13 @@ define void @vpscatter_v4i8(<4 x i8> %val, <4 x i8*> %ptrs, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v4i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v4i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.v4i8.v4p0i8(<4 x i8> %val, <4 x i8*> %ptrs, <4 x i1> %m, i32 %evl) @@ -118,13 +118,13 @@ define void @vpscatter_truemask_v4i8(<4 x i8> %val, <4 x i8*> %ptrs, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_truemask_v4i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_truemask_v4i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v10 ; RV64-NEXT: ret %mhead = insertelement <4 x i1> poison, i1 1, i32 0 @@ -138,13 +138,13 @@ define void @vpscatter_v8i8(<8 x i8> %val, <8 x i8*> %ptrs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v8i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v8i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.v8i8.v8p0i8(<8 x i8> %val, <8 x i8*> %ptrs, <8 x i1> %m, i32 %evl) @@ -154,17 +154,17 @@ define void @vpscatter_baseidx_v8i8(<8 x i8> %val, i8* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_v8i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v10, v9 -; RV32-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; RV32-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_v8i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v12, v9 -; RV64-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds i8, i8* %base, <8 x i8> %idxs @@ -177,13 +177,13 @@ define void @vpscatter_v2i16(<2 x i16> %val, <2 x i16*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v2i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v2i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.v2i16.v2p0i16(<2 x i16> %val, <2 x i16*> %ptrs, <2 x i1> %m, i32 %evl) @@ -193,17 +193,17 @@ define void @vpscatter_v2i32_truncstore_v2i16(<2 x i32> %val, <2 x i16*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v2i32_truncstore_v2i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; RV32-NEXT: vnsrl.wi v8, v8, 0 -; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v2i32_truncstore_v2i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; RV64-NEXT: vnsrl.wi v8, v8, 0 -; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret %tval = trunc <2 x i32> %val to <2 x i16> @@ -214,21 +214,21 @@ define void @vpscatter_v2i64_truncstore_v2i16(<2 x i64> %val, <2 x i16*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v2i64_truncstore_v2i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV32-NEXT: vnsrl.wi v8, v8, 0 -; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; RV32-NEXT: vnsrl.wi v8, v8, 0 -; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v2i64_truncstore_v2i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-NEXT: vnsrl.wi v8, v8, 0 -; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; RV64-NEXT: vnsrl.wi v8, v8, 0 -; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret %tval = trunc <2 x i64> %val to <2 x i16> @@ -241,13 +241,13 @@ define void @vpscatter_v3i16(<3 x i16> %val, <3 x i16*> %ptrs, <3 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v3i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v3i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.v3i16.v3p0i16(<3 x i16> %val, <3 x i16*> %ptrs, <3 x i1> %m, i32 %evl) @@ -257,13 +257,13 @@ define void @vpscatter_truemask_v3i16(<3 x i16> %val, <3 x i16*> %ptrs, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_truemask_v3i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_truemask_v3i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v10 ; RV64-NEXT: ret %mhead = insertelement <3 x i1> poison, i1 1, i32 0 @@ -277,13 +277,13 @@ define void @vpscatter_v4i16(<4 x i16> %val, <4 x i16*> %ptrs, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v4i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v4i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.v4i16.v4p0i16(<4 x i16> %val, <4 x i16*> %ptrs, <4 x i1> %m, i32 %evl) @@ -293,13 +293,13 @@ define void @vpscatter_truemask_v4i16(<4 x i16> %val, <4 x i16*> %ptrs, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_truemask_v4i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_truemask_v4i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v10 ; RV64-NEXT: ret %mhead = insertelement <4 x i1> poison, i1 1, i32 0 @@ -313,13 +313,13 @@ define void @vpscatter_v8i16(<8 x i16> %val, <8 x i16*> %ptrs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v8i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v8i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.v8i16.v8p0i16(<8 x i16> %val, <8 x i16*> %ptrs, <8 x i1> %m, i32 %evl) @@ -329,19 +329,19 @@ define void @vpscatter_baseidx_v8i8_v8i16(<8 x i16> %val, i16* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_v8i8_v8i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v10, v9 ; RV32-NEXT: vadd.vv v10, v10, v10 -; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_v8i8_v8i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v12, v9 ; RV64-NEXT: vadd.vv v12, v12, v12 -; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds i16, i16* %base, <8 x i8> %idxs @@ -352,19 +352,19 @@ define void @vpscatter_baseidx_sext_v8i8_v8i16(<8 x i16> %val, i16* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_sext_v8i8_v8i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v10, v9 ; RV32-NEXT: vadd.vv v10, v10, v10 -; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_sext_v8i8_v8i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v12, v9 ; RV64-NEXT: vadd.vv v12, v12, v12 -; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %eidxs = sext <8 x i8> %idxs to <8 x i16> @@ -376,19 +376,19 @@ define void @vpscatter_baseidx_zext_v8i8_v8i16(<8 x i16> %val, i16* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_zext_v8i8_v8i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vzext.vf4 v10, v9 ; RV32-NEXT: vadd.vv v10, v10, v10 -; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_zext_v8i8_v8i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vzext.vf8 v12, v9 ; RV64-NEXT: vadd.vv v12, v12, v12 -; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %eidxs = zext <8 x i8> %idxs to <8 x i16> @@ -400,19 +400,19 @@ define void @vpscatter_baseidx_v8i16(<8 x i16> %val, i16* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_v8i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf2 v10, v9 ; RV32-NEXT: vadd.vv v10, v10, v10 -; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_v8i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf4 v12, v9 ; RV64-NEXT: vadd.vv v12, v12, v12 -; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds i16, i16* %base, <8 x i16> %idxs @@ -425,13 +425,13 @@ define void @vpscatter_v2i32(<2 x i32> %val, <2 x i32*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v2i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v2i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.v2i32.v2p0i32(<2 x i32> %val, <2 x i32*> %ptrs, <2 x i1> %m, i32 %evl) @@ -441,17 +441,17 @@ define void @vpscatter_v2i64_truncstore_v2i32(<2 x i64> %val, <2 x i32*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v2i64_truncstore_v2i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV32-NEXT: vnsrl.wi v8, v8, 0 -; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v2i64_truncstore_v2i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-NEXT: vnsrl.wi v8, v8, 0 -; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret %tval = trunc <2 x i64> %val to <2 x i32> @@ -464,13 +464,13 @@ define void @vpscatter_v4i32(<4 x i32> %val, <4 x i32*> %ptrs, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v4i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v4i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.v4i32.v4p0i32(<4 x i32> %val, <4 x i32*> %ptrs, <4 x i1> %m, i32 %evl) @@ -480,13 +480,13 @@ define void @vpscatter_truemask_v4i32(<4 x i32> %val, <4 x i32*> %ptrs, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_truemask_v4i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_truemask_v4i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v10 ; RV64-NEXT: ret %mhead = insertelement <4 x i1> poison, i1 1, i32 0 @@ -500,13 +500,13 @@ define void @vpscatter_v8i32(<8 x i32> %val, <8 x i32*> %ptrs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.v8i32.v8p0i32(<8 x i32> %val, <8 x i32*> %ptrs, <8 x i1> %m, i32 %evl) @@ -516,19 +516,19 @@ define void @vpscatter_baseidx_v8i8_v8i32(<8 x i32> %val, i32* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_v8i8_v8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v12, v10 ; RV32-NEXT: vsll.vi v10, v12, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_v8i8_v8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v12, v10 ; RV64-NEXT: vsll.vi v12, v12, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds i32, i32* %base, <8 x i8> %idxs @@ -539,19 +539,19 @@ define void @vpscatter_baseidx_sext_v8i8_v8i32(<8 x i32> %val, i32* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_sext_v8i8_v8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v12, v10 ; RV32-NEXT: vsll.vi v10, v12, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_sext_v8i8_v8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v12, v10 ; RV64-NEXT: vsll.vi v12, v12, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %eidxs = sext <8 x i8> %idxs to <8 x i32> @@ -563,19 +563,19 @@ define void @vpscatter_baseidx_zext_v8i8_v8i32(<8 x i32> %val, i32* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_zext_v8i8_v8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vzext.vf4 v12, v10 ; RV32-NEXT: vsll.vi v10, v12, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_zext_v8i8_v8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vzext.vf8 v12, v10 ; RV64-NEXT: vsll.vi v12, v12, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %eidxs = zext <8 x i8> %idxs to <8 x i32> @@ -587,19 +587,19 @@ define void @vpscatter_baseidx_v8i16_v8i32(<8 x i32> %val, i32* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_v8i16_v8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf2 v12, v10 ; RV32-NEXT: vsll.vi v10, v12, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_v8i16_v8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf4 v12, v10 ; RV64-NEXT: vsll.vi v12, v12, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds i32, i32* %base, <8 x i16> %idxs @@ -610,19 +610,19 @@ define void @vpscatter_baseidx_sext_v8i16_v8i32(<8 x i32> %val, i32* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_sext_v8i16_v8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf2 v12, v10 ; RV32-NEXT: vsll.vi v10, v12, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_sext_v8i16_v8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf4 v12, v10 ; RV64-NEXT: vsll.vi v12, v12, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %eidxs = sext <8 x i16> %idxs to <8 x i32> @@ -634,19 +634,19 @@ define void @vpscatter_baseidx_zext_v8i16_v8i32(<8 x i32> %val, i32* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_zext_v8i16_v8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vzext.vf2 v12, v10 ; RV32-NEXT: vsll.vi v10, v12, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_zext_v8i16_v8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vzext.vf4 v12, v10 ; RV64-NEXT: vsll.vi v12, v12, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %eidxs = zext <8 x i16> %idxs to <8 x i32> @@ -658,18 +658,18 @@ define void @vpscatter_baseidx_v8i32(<8 x i32> %val, i32* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_v8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsll.vi v10, v10, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_v8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf2 v12, v10 ; RV64-NEXT: vsll.vi v12, v12, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds i32, i32* %base, <8 x i32> %idxs @@ -682,13 +682,13 @@ define void @vpscatter_v2i64(<2 x i64> %val, <2 x i64*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.v2i64.v2p0i64(<2 x i64> %val, <2 x i64*> %ptrs, <2 x i1> %m, i32 %evl) @@ -700,13 +700,13 @@ define void @vpscatter_v4i64(<4 x i64> %val, <4 x i64*> %ptrs, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v4i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.v4i64.v4p0i64(<4 x i64> %val, <4 x i64*> %ptrs, <4 x i1> %m, i32 %evl) @@ -716,13 +716,13 @@ define void @vpscatter_truemask_v4i64(<4 x i64> %val, <4 x i64*> %ptrs, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_truemask_v4i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v10 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_truemask_v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v10 ; RV64-NEXT: ret %mhead = insertelement <4 x i1> poison, i1 1, i32 0 @@ -736,13 +736,13 @@ define void @vpscatter_v8i64(<8 x i64> %val, <8 x i64*> %ptrs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.v8i64.v8p0i64(<8 x i64> %val, <8 x i64*> %ptrs, <8 x i1> %m, i32 %evl) @@ -752,19 +752,19 @@ define void @vpscatter_baseidx_v8i8_v8i64(<8 x i64> %val, i64* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_v8i8_v8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v14, v12 ; RV32-NEXT: vsll.vi v12, v14, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_v8i8_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, <8 x i8> %idxs @@ -775,19 +775,19 @@ define void @vpscatter_baseidx_sext_v8i8_v8i64(<8 x i64> %val, i64* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_sext_v8i8_v8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v14, v12 ; RV32-NEXT: vsll.vi v12, v14, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_sext_v8i8_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %eidxs = sext <8 x i8> %idxs to <8 x i64> @@ -799,19 +799,19 @@ define void @vpscatter_baseidx_zext_v8i8_v8i64(<8 x i64> %val, i64* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_zext_v8i8_v8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vzext.vf4 v14, v12 ; RV32-NEXT: vsll.vi v12, v14, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_zext_v8i8_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vzext.vf8 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %eidxs = zext <8 x i8> %idxs to <8 x i64> @@ -823,19 +823,19 @@ define void @vpscatter_baseidx_v8i16_v8i64(<8 x i64> %val, i64* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_v8i16_v8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf2 v14, v12 ; RV32-NEXT: vsll.vi v12, v14, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_v8i16_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf4 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, <8 x i16> %idxs @@ -846,19 +846,19 @@ define void @vpscatter_baseidx_sext_v8i16_v8i64(<8 x i64> %val, i64* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_sext_v8i16_v8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf2 v14, v12 ; RV32-NEXT: vsll.vi v12, v14, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_sext_v8i16_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf4 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %eidxs = sext <8 x i16> %idxs to <8 x i64> @@ -870,19 +870,19 @@ define void @vpscatter_baseidx_zext_v8i16_v8i64(<8 x i64> %val, i64* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_zext_v8i16_v8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vzext.vf2 v14, v12 ; RV32-NEXT: vsll.vi v12, v14, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_zext_v8i16_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vzext.vf4 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %eidxs = zext <8 x i16> %idxs to <8 x i64> @@ -894,18 +894,18 @@ define void @vpscatter_baseidx_v8i32_v8i64(<8 x i64> %val, i64* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_v8i32_v8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsll.vi v12, v12, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_v8i32_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf2 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, <8 x i32> %idxs @@ -916,18 +916,18 @@ define void @vpscatter_baseidx_sext_v8i32_v8i64(<8 x i64> %val, i64* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_sext_v8i32_v8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsll.vi v12, v12, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_sext_v8i32_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf2 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %eidxs = sext <8 x i32> %idxs to <8 x i64> @@ -939,18 +939,18 @@ define void @vpscatter_baseidx_zext_v8i32_v8i64(<8 x i64> %val, i64* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_zext_v8i32_v8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsll.vi v12, v12, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_zext_v8i32_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vzext.vf2 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %eidxs = zext <8 x i32> %idxs to <8 x i64> @@ -962,18 +962,18 @@ define void @vpscatter_baseidx_v8i64(<8 x i64> %val, i64* %base, <8 x i64> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_v8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vnsrl.wi v16, v12, 0 ; RV32-NEXT: vsll.vi v12, v16, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_v8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsll.vi v12, v12, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %idxs @@ -986,13 +986,13 @@ define void @vpscatter_v2f16(<2 x half> %val, <2 x half*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v2f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v2f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.v2f16.v2p0f16(<2 x half> %val, <2 x half*> %ptrs, <2 x i1> %m, i32 %evl) @@ -1004,13 +1004,13 @@ define void @vpscatter_v4f16(<4 x half> %val, <4 x half*> %ptrs, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v4f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v4f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.v4f16.v4p0f16(<4 x half> %val, <4 x half*> %ptrs, <4 x i1> %m, i32 %evl) @@ -1020,13 +1020,13 @@ define void @vpscatter_truemask_v4f16(<4 x half> %val, <4 x half*> %ptrs, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_truemask_v4f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_truemask_v4f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v10 ; RV64-NEXT: ret %mhead = insertelement <4 x i1> poison, i1 1, i32 0 @@ -1040,13 +1040,13 @@ define void @vpscatter_v8f16(<8 x half> %val, <8 x half*> %ptrs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v8f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v8f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.v8f16.v8p0f16(<8 x half> %val, <8 x half*> %ptrs, <8 x i1> %m, i32 %evl) @@ -1056,19 +1056,19 @@ define void @vpscatter_baseidx_v8i8_v8f16(<8 x half> %val, half* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_v8i8_v8f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v10, v9 ; RV32-NEXT: vadd.vv v10, v10, v10 -; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_v8i8_v8f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v12, v9 ; RV64-NEXT: vadd.vv v12, v12, v12 -; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds half, half* %base, <8 x i8> %idxs @@ -1079,19 +1079,19 @@ define void @vpscatter_baseidx_sext_v8i8_v8f16(<8 x half> %val, half* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_sext_v8i8_v8f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v10, v9 ; RV32-NEXT: vadd.vv v10, v10, v10 -; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_sext_v8i8_v8f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v12, v9 ; RV64-NEXT: vadd.vv v12, v12, v12 -; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %eidxs = sext <8 x i8> %idxs to <8 x i16> @@ -1103,19 +1103,19 @@ define void @vpscatter_baseidx_zext_v8i8_v8f16(<8 x half> %val, half* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_zext_v8i8_v8f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vzext.vf4 v10, v9 ; RV32-NEXT: vadd.vv v10, v10, v10 -; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_zext_v8i8_v8f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vzext.vf8 v12, v9 ; RV64-NEXT: vadd.vv v12, v12, v12 -; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %eidxs = zext <8 x i8> %idxs to <8 x i16> @@ -1127,19 +1127,19 @@ define void @vpscatter_baseidx_v8f16(<8 x half> %val, half* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_v8f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf2 v10, v9 ; RV32-NEXT: vadd.vv v10, v10, v10 -; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_v8f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf4 v12, v9 ; RV64-NEXT: vadd.vv v12, v12, v12 -; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds half, half* %base, <8 x i16> %idxs @@ -1152,13 +1152,13 @@ define void @vpscatter_v2f32(<2 x float> %val, <2 x float*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v2f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v2f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.v2f32.v2p0f32(<2 x float> %val, <2 x float*> %ptrs, <2 x i1> %m, i32 %evl) @@ -1170,13 +1170,13 @@ define void @vpscatter_v4f32(<4 x float> %val, <4 x float*> %ptrs, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v4f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v4f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.v4f32.v4p0f32(<4 x float> %val, <4 x float*> %ptrs, <4 x i1> %m, i32 %evl) @@ -1186,13 +1186,13 @@ define void @vpscatter_truemask_v4f32(<4 x float> %val, <4 x float*> %ptrs, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_truemask_v4f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_truemask_v4f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v10 ; RV64-NEXT: ret %mhead = insertelement <4 x i1> poison, i1 1, i32 0 @@ -1206,13 +1206,13 @@ define void @vpscatter_v8f32(<8 x float> %val, <8 x float*> %ptrs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.v8f32.v8p0f32(<8 x float> %val, <8 x float*> %ptrs, <8 x i1> %m, i32 %evl) @@ -1222,19 +1222,19 @@ define void @vpscatter_baseidx_v8i8_v8f32(<8 x float> %val, float* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_v8i8_v8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v12, v10 ; RV32-NEXT: vsll.vi v10, v12, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_v8i8_v8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v12, v10 ; RV64-NEXT: vsll.vi v12, v12, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds float, float* %base, <8 x i8> %idxs @@ -1245,19 +1245,19 @@ define void @vpscatter_baseidx_sext_v8i8_v8f32(<8 x float> %val, float* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_sext_v8i8_v8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v12, v10 ; RV32-NEXT: vsll.vi v10, v12, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_sext_v8i8_v8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v12, v10 ; RV64-NEXT: vsll.vi v12, v12, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %eidxs = sext <8 x i8> %idxs to <8 x i32> @@ -1269,19 +1269,19 @@ define void @vpscatter_baseidx_zext_v8i8_v8f32(<8 x float> %val, float* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_zext_v8i8_v8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vzext.vf4 v12, v10 ; RV32-NEXT: vsll.vi v10, v12, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_zext_v8i8_v8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vzext.vf8 v12, v10 ; RV64-NEXT: vsll.vi v12, v12, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %eidxs = zext <8 x i8> %idxs to <8 x i32> @@ -1293,19 +1293,19 @@ define void @vpscatter_baseidx_v8i16_v8f32(<8 x float> %val, float* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_v8i16_v8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf2 v12, v10 ; RV32-NEXT: vsll.vi v10, v12, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_v8i16_v8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf4 v12, v10 ; RV64-NEXT: vsll.vi v12, v12, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds float, float* %base, <8 x i16> %idxs @@ -1316,19 +1316,19 @@ define void @vpscatter_baseidx_sext_v8i16_v8f32(<8 x float> %val, float* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_sext_v8i16_v8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf2 v12, v10 ; RV32-NEXT: vsll.vi v10, v12, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_sext_v8i16_v8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf4 v12, v10 ; RV64-NEXT: vsll.vi v12, v12, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %eidxs = sext <8 x i16> %idxs to <8 x i32> @@ -1340,19 +1340,19 @@ define void @vpscatter_baseidx_zext_v8i16_v8f32(<8 x float> %val, float* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_zext_v8i16_v8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vzext.vf2 v12, v10 ; RV32-NEXT: vsll.vi v10, v12, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_zext_v8i16_v8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vzext.vf4 v12, v10 ; RV64-NEXT: vsll.vi v12, v12, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %eidxs = zext <8 x i16> %idxs to <8 x i32> @@ -1364,18 +1364,18 @@ define void @vpscatter_baseidx_v8f32(<8 x float> %val, float* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_v8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsll.vi v10, v10, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_v8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf2 v12, v10 ; RV64-NEXT: vsll.vi v12, v12, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds float, float* %base, <8 x i32> %idxs @@ -1388,13 +1388,13 @@ define void @vpscatter_v2f64(<2 x double> %val, <2 x double*> %ptrs, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v2f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v2f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.v2f64.v2p0f64(<2 x double> %val, <2 x double*> %ptrs, <2 x i1> %m, i32 %evl) @@ -1406,13 +1406,13 @@ define void @vpscatter_v4f64(<4 x double> %val, <4 x double*> %ptrs, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v4f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v4f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.v4f64.v4p0f64(<4 x double> %val, <4 x double*> %ptrs, <4 x i1> %m, i32 %evl) @@ -1422,13 +1422,13 @@ define void @vpscatter_truemask_v4f64(<4 x double> %val, <4 x double*> %ptrs, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_truemask_v4f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v10 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_truemask_v4f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v10 ; RV64-NEXT: ret %mhead = insertelement <4 x i1> poison, i1 1, i32 0 @@ -1442,13 +1442,13 @@ define void @vpscatter_v8f64(<8 x double> %val, <8 x double*> %ptrs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_v8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_v8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.v8f64.v8p0f64(<8 x double> %val, <8 x double*> %ptrs, <8 x i1> %m, i32 %evl) @@ -1458,19 +1458,19 @@ define void @vpscatter_baseidx_v8i8_v8f64(<8 x double> %val, double* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_v8i8_v8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v14, v12 ; RV32-NEXT: vsll.vi v12, v14, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_v8i8_v8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, <8 x i8> %idxs @@ -1481,19 +1481,19 @@ define void @vpscatter_baseidx_sext_v8i8_v8f64(<8 x double> %val, double* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_sext_v8i8_v8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf4 v14, v12 ; RV32-NEXT: vsll.vi v12, v14, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_sext_v8i8_v8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf8 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %eidxs = sext <8 x i8> %idxs to <8 x i64> @@ -1505,19 +1505,19 @@ define void @vpscatter_baseidx_zext_v8i8_v8f64(<8 x double> %val, double* %base, <8 x i8> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_zext_v8i8_v8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vzext.vf4 v14, v12 ; RV32-NEXT: vsll.vi v12, v14, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_zext_v8i8_v8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vzext.vf8 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %eidxs = zext <8 x i8> %idxs to <8 x i64> @@ -1529,19 +1529,19 @@ define void @vpscatter_baseidx_v8i16_v8f64(<8 x double> %val, double* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_v8i16_v8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf2 v14, v12 ; RV32-NEXT: vsll.vi v12, v14, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_v8i16_v8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf4 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, <8 x i16> %idxs @@ -1552,19 +1552,19 @@ define void @vpscatter_baseidx_sext_v8i16_v8f64(<8 x double> %val, double* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_sext_v8i16_v8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsext.vf2 v14, v12 ; RV32-NEXT: vsll.vi v12, v14, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_sext_v8i16_v8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf4 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %eidxs = sext <8 x i16> %idxs to <8 x i64> @@ -1576,19 +1576,19 @@ define void @vpscatter_baseidx_zext_v8i16_v8f64(<8 x double> %val, double* %base, <8 x i16> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_zext_v8i16_v8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vzext.vf2 v14, v12 ; RV32-NEXT: vsll.vi v12, v14, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_zext_v8i16_v8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vzext.vf4 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %eidxs = zext <8 x i16> %idxs to <8 x i64> @@ -1600,18 +1600,18 @@ define void @vpscatter_baseidx_v8i32_v8f64(<8 x double> %val, double* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_v8i32_v8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsll.vi v12, v12, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_v8i32_v8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf2 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, <8 x i32> %idxs @@ -1622,18 +1622,18 @@ define void @vpscatter_baseidx_sext_v8i32_v8f64(<8 x double> %val, double* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_sext_v8i32_v8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsll.vi v12, v12, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_sext_v8i32_v8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsext.vf2 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %eidxs = sext <8 x i32> %idxs to <8 x i64> @@ -1645,18 +1645,18 @@ define void @vpscatter_baseidx_zext_v8i32_v8f64(<8 x double> %val, double* %base, <8 x i32> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_zext_v8i32_v8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vsll.vi v12, v12, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_zext_v8i32_v8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vzext.vf2 v16, v12 ; RV64-NEXT: vsll.vi v12, v16, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %eidxs = zext <8 x i32> %idxs to <8 x i64> @@ -1668,18 +1668,18 @@ define void @vpscatter_baseidx_v8f64(<8 x double> %val, double* %base, <8 x i64> %idxs, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_v8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vnsrl.wi v16, v12, 0 ; RV32-NEXT: vsll.vi v12, v16, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_v8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsll.vi v12, v12, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %idxs @@ -1693,7 +1693,7 @@ ; RV32-LABEL: vpscatter_v32f64: ; RV32: # %bb.0: ; RV32-NEXT: li a2, 32 -; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; RV32-NEXT: vle32.v v24, (a0) ; RV32-NEXT: li a0, 16 ; RV32-NEXT: mv a2, a1 @@ -1702,18 +1702,18 @@ ; RV32-NEXT: li a2, 16 ; RV32-NEXT: .LBB79_2: ; RV32-NEXT: li a0, 0 -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: addi a2, a1, -16 ; RV32-NEXT: vsoxei32.v v8, (zero), v24, v0.t ; RV32-NEXT: bltu a1, a2, .LBB79_4 ; RV32-NEXT: # %bb.3: ; RV32-NEXT: mv a0, a2 ; RV32-NEXT: .LBB79_4: -; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; RV32-NEXT: vslidedown.vi v8, v24, 16 -; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV32-NEXT: vslidedown.vi v0, v0, 2 -; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v16, (zero), v8, v0.t ; RV32-NEXT: ret ; @@ -1724,7 +1724,7 @@ ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 4 ; RV64-NEXT: sub sp, sp, a1 -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vle64.v v24, (a0) ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 3 @@ -1742,7 +1742,7 @@ ; RV64-NEXT: .LBB79_2: ; RV64-NEXT: li a3, 0 ; RV64-NEXT: vle64.v v16, (a0) -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: addi a0, a2, -16 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 3 @@ -1754,9 +1754,9 @@ ; RV64-NEXT: # %bb.3: ; RV64-NEXT: mv a3, a0 ; RV64-NEXT: .LBB79_4: -; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64-NEXT: vslidedown.vi v0, v0, 2 -; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; RV64-NEXT: addi a0, sp, 16 ; RV64-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload ; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t @@ -1773,7 +1773,7 @@ ; RV32-LABEL: vpscatter_baseidx_v32i32_v32f64: ; RV32: # %bb.0: ; RV32-NEXT: li a3, 32 -; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, mu +; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; RV32-NEXT: vle32.v v24, (a1) ; RV32-NEXT: li a1, 16 ; RV32-NEXT: vsll.vi v24, v24, 3 @@ -1783,18 +1783,18 @@ ; RV32-NEXT: li a3, 16 ; RV32-NEXT: .LBB80_2: ; RV32-NEXT: li a1, 0 -; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; RV32-NEXT: addi a3, a2, -16 ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: bltu a2, a3, .LBB80_4 ; RV32-NEXT: # %bb.3: ; RV32-NEXT: mv a1, a3 ; RV32-NEXT: .LBB80_4: -; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV32-NEXT: vslidedown.vi v0, v0, 2 -; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; RV32-NEXT: vslidedown.vi v8, v24, 16 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v16, (a0), v8, v0.t ; RV32-NEXT: ret ; @@ -1806,7 +1806,7 @@ ; RV64-NEXT: slli a3, a3, 4 ; RV64-NEXT: sub sp, sp, a3 ; RV64-NEXT: li a3, 32 -; RV64-NEXT: vsetvli zero, a3, e32, m8, ta, mu +; RV64-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; RV64-NEXT: vle32.v v24, (a1) ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 3 @@ -1815,9 +1815,9 @@ ; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill ; RV64-NEXT: addi a1, sp, 16 ; RV64-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; RV64-NEXT: vslidedown.vi v16, v24, 16 -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vsext.vf2 v8, v16 ; RV64-NEXT: vsext.vf2 v16, v24 ; RV64-NEXT: li a3, 16 @@ -1829,7 +1829,7 @@ ; RV64-NEXT: .LBB80_2: ; RV64-NEXT: li a3, 0 ; RV64-NEXT: vsll.vi v16, v8, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: addi a1, a2, -16 ; RV64-NEXT: addi a4, sp, 16 ; RV64-NEXT: vl8re8.v v8, (a4) # Unknown-size Folded Reload @@ -1838,9 +1838,9 @@ ; RV64-NEXT: # %bb.3: ; RV64-NEXT: mv a3, a1 ; RV64-NEXT: .LBB80_4: -; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64-NEXT: vslidedown.vi v0, v0, 2 -; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 3 ; RV64-NEXT: add a1, sp, a1 @@ -1861,7 +1861,7 @@ ; RV32-LABEL: vpscatter_baseidx_sext_v32i32_v32f64: ; RV32: # %bb.0: ; RV32-NEXT: li a3, 32 -; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, mu +; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; RV32-NEXT: vle32.v v24, (a1) ; RV32-NEXT: li a1, 16 ; RV32-NEXT: vsll.vi v24, v24, 3 @@ -1871,18 +1871,18 @@ ; RV32-NEXT: li a3, 16 ; RV32-NEXT: .LBB81_2: ; RV32-NEXT: li a1, 0 -; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; RV32-NEXT: addi a3, a2, -16 ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: bltu a2, a3, .LBB81_4 ; RV32-NEXT: # %bb.3: ; RV32-NEXT: mv a1, a3 ; RV32-NEXT: .LBB81_4: -; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV32-NEXT: vslidedown.vi v0, v0, 2 -; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; RV32-NEXT: vslidedown.vi v8, v24, 16 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v16, (a0), v8, v0.t ; RV32-NEXT: ret ; @@ -1894,7 +1894,7 @@ ; RV64-NEXT: slli a3, a3, 4 ; RV64-NEXT: sub sp, sp, a3 ; RV64-NEXT: li a3, 32 -; RV64-NEXT: vsetvli zero, a3, e32, m8, ta, mu +; RV64-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; RV64-NEXT: vle32.v v24, (a1) ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 3 @@ -1903,11 +1903,11 @@ ; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill ; RV64-NEXT: addi a1, sp, 16 ; RV64-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vsext.vf2 v16, v24 -; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; RV64-NEXT: vslidedown.vi v24, v24, 16 -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vsext.vf2 v8, v24 ; RV64-NEXT: li a3, 16 ; RV64-NEXT: vsll.vi v24, v16, 3 @@ -1918,7 +1918,7 @@ ; RV64-NEXT: .LBB81_2: ; RV64-NEXT: li a3, 0 ; RV64-NEXT: vsll.vi v16, v8, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: addi a1, a2, -16 ; RV64-NEXT: addi a4, sp, 16 ; RV64-NEXT: vl8re8.v v8, (a4) # Unknown-size Folded Reload @@ -1927,9 +1927,9 @@ ; RV64-NEXT: # %bb.3: ; RV64-NEXT: mv a3, a1 ; RV64-NEXT: .LBB81_4: -; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64-NEXT: vslidedown.vi v0, v0, 2 -; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 3 ; RV64-NEXT: add a1, sp, a1 @@ -1951,7 +1951,7 @@ ; RV32-LABEL: vpscatter_baseidx_zext_v32i32_v32f64: ; RV32: # %bb.0: ; RV32-NEXT: li a3, 32 -; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, mu +; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; RV32-NEXT: vle32.v v24, (a1) ; RV32-NEXT: li a1, 16 ; RV32-NEXT: vsll.vi v24, v24, 3 @@ -1961,18 +1961,18 @@ ; RV32-NEXT: li a3, 16 ; RV32-NEXT: .LBB82_2: ; RV32-NEXT: li a1, 0 -; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; RV32-NEXT: addi a3, a2, -16 ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: bltu a2, a3, .LBB82_4 ; RV32-NEXT: # %bb.3: ; RV32-NEXT: mv a1, a3 ; RV32-NEXT: .LBB82_4: -; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV32-NEXT: vslidedown.vi v0, v0, 2 -; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; RV32-NEXT: vslidedown.vi v8, v24, 16 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v16, (a0), v8, v0.t ; RV32-NEXT: ret ; @@ -1984,7 +1984,7 @@ ; RV64-NEXT: slli a3, a3, 4 ; RV64-NEXT: sub sp, sp, a3 ; RV64-NEXT: li a3, 32 -; RV64-NEXT: vsetvli zero, a3, e32, m8, ta, mu +; RV64-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; RV64-NEXT: vle32.v v24, (a1) ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 3 @@ -1993,11 +1993,11 @@ ; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill ; RV64-NEXT: addi a1, sp, 16 ; RV64-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vzext.vf2 v16, v24 -; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; RV64-NEXT: vslidedown.vi v24, v24, 16 -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vzext.vf2 v8, v24 ; RV64-NEXT: li a3, 16 ; RV64-NEXT: vsll.vi v24, v16, 3 @@ -2008,7 +2008,7 @@ ; RV64-NEXT: .LBB82_2: ; RV64-NEXT: li a3, 0 ; RV64-NEXT: vsll.vi v16, v8, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: addi a1, a2, -16 ; RV64-NEXT: addi a4, sp, 16 ; RV64-NEXT: vl8re8.v v8, (a4) # Unknown-size Folded Reload @@ -2017,9 +2017,9 @@ ; RV64-NEXT: # %bb.3: ; RV64-NEXT: mv a3, a1 ; RV64-NEXT: .LBB82_4: -; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64-NEXT: vslidedown.vi v0, v0, 2 -; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 3 ; RV64-NEXT: add a1, sp, a1 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpstore.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpstore.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpstore.ll @@ -9,7 +9,7 @@ define void @vpstore_v2i8(<2 x i8> %val, <2 x i8>* %ptr, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vse8.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.v2i8.p0v2i8(<2 x i8> %val, <2 x i8>* %ptr, <2 x i1> %m, i32 %evl) @@ -21,7 +21,7 @@ define void @vpstore_v4i8(<4 x i8> %val, <4 x i8>* %ptr, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vse8.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.v4i8.p0v4i8(<4 x i8> %val, <4 x i8>* %ptr, <4 x i1> %m, i32 %evl) @@ -33,7 +33,7 @@ define void @vpstore_v8i8(<8 x i8> %val, <8 x i8>* %ptr, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vse8.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.v8i8.p0v8i8(<8 x i8> %val, <8 x i8>* %ptr, <8 x i1> %m, i32 %evl) @@ -45,7 +45,7 @@ define void @vpstore_v2i16(<2 x i16> %val, <2 x i16>* %ptr, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vse16.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.v2i16.p0v2i16(<2 x i16> %val, <2 x i16>* %ptr, <2 x i1> %m, i32 %evl) @@ -57,7 +57,7 @@ define void @vpstore_v4i16(<4 x i16> %val, <4 x i16>* %ptr, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vse16.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.v4i16.p0v4i16(<4 x i16> %val, <4 x i16>* %ptr, <4 x i1> %m, i32 %evl) @@ -69,7 +69,7 @@ define void @vpstore_v8i16(<8 x i16> %val, <8 x i16>* %ptr, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vse16.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.v8i16.p0v8i16(<8 x i16> %val, <8 x i16>* %ptr, <8 x i1> %m, i32 %evl) @@ -81,7 +81,7 @@ define void @vpstore_v2i32(<2 x i32> %val, <2 x i32>* %ptr, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vse32.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.v2i32.p0v2i32(<2 x i32> %val, <2 x i32>* %ptr, <2 x i1> %m, i32 %evl) @@ -93,7 +93,7 @@ define void @vpstore_v4i32(<4 x i32> %val, <4 x i32>* %ptr, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.v4i32.p0v4i32(<4 x i32> %val, <4 x i32>* %ptr, <4 x i1> %m, i32 %evl) @@ -105,7 +105,7 @@ define void @vpstore_v8i32(<8 x i32> %val, <8 x i32>* %ptr, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vse32.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.v8i32.p0v8i32(<8 x i32> %val, <8 x i32>* %ptr, <8 x i1> %m, i32 %evl) @@ -117,7 +117,7 @@ define void @vpstore_v2i64(<2 x i64> %val, <2 x i64>* %ptr, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vse64.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.v2i64.p0v2i64(<2 x i64> %val, <2 x i64>* %ptr, <2 x i1> %m, i32 %evl) @@ -129,7 +129,7 @@ define void @vpstore_v4i64(<4 x i64> %val, <4 x i64>* %ptr, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vse64.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.v4i64.p0v4i64(<4 x i64> %val, <4 x i64>* %ptr, <4 x i1> %m, i32 %evl) @@ -141,7 +141,7 @@ define void @vpstore_v8i64(<8 x i64> %val, <8 x i64>* %ptr, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vse64.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.v8i64.p0v8i64(<8 x i64> %val, <8 x i64>* %ptr, <8 x i1> %m, i32 %evl) @@ -153,7 +153,7 @@ define void @vpstore_v2f16(<2 x half> %val, <2 x half>* %ptr, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vse16.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.v2f16.p0v2f16(<2 x half> %val, <2 x half>* %ptr, <2 x i1> %m, i32 %evl) @@ -165,7 +165,7 @@ define void @vpstore_v4f16(<4 x half> %val, <4 x half>* %ptr, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vse16.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.v4f16.p0v4f16(<4 x half> %val, <4 x half>* %ptr, <4 x i1> %m, i32 %evl) @@ -177,7 +177,7 @@ define void @vpstore_v8f16(<8 x half> %val, <8 x half>* %ptr, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vse16.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.v8f16.p0v8f16(<8 x half> %val, <8 x half>* %ptr, <8 x i1> %m, i32 %evl) @@ -189,7 +189,7 @@ define void @vpstore_v2f32(<2 x float> %val, <2 x float>* %ptr, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vse32.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.v2f32.p0v2f32(<2 x float> %val, <2 x float>* %ptr, <2 x i1> %m, i32 %evl) @@ -201,7 +201,7 @@ define void @vpstore_v4f32(<4 x float> %val, <4 x float>* %ptr, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.v4f32.p0v4f32(<4 x float> %val, <4 x float>* %ptr, <4 x i1> %m, i32 %evl) @@ -213,7 +213,7 @@ define void @vpstore_v6f32(<6 x float> %val, <6 x float>* %ptr, <6 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v6f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vse32.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.v6f32.p0v6f32(<6 x float> %val, <6 x float>* %ptr, <6 x i1> %m, i32 %evl) @@ -225,7 +225,7 @@ define void @vpstore_v8f32(<8 x float> %val, <8 x float>* %ptr, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vse32.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.v8f32.p0v8f32(<8 x float> %val, <8 x float>* %ptr, <8 x i1> %m, i32 %evl) @@ -237,7 +237,7 @@ define void @vpstore_v2f64(<2 x double> %val, <2 x double>* %ptr, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vse64.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.v2f64.p0v2f64(<2 x double> %val, <2 x double>* %ptr, <2 x i1> %m, i32 %evl) @@ -249,7 +249,7 @@ define void @vpstore_v4f64(<4 x double> %val, <4 x double>* %ptr, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vse64.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.v4f64.p0v4f64(<4 x double> %val, <4 x double>* %ptr, <4 x i1> %m, i32 %evl) @@ -261,7 +261,7 @@ define void @vpstore_v8f64(<8 x double> %val, <8 x double>* %ptr, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vse64.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.v8f64.p0v8f64(<8 x double> %val, <8 x double>* %ptr, <8 x i1> %m, i32 %evl) @@ -271,7 +271,7 @@ define void @vpstore_v2i8_allones_mask(<2 x i8> %val, <2 x i8>* %ptr, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_v2i8_allones_mask: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %a = insertelement <2 x i1> poison, i1 true, i32 0 @@ -292,17 +292,17 @@ ; CHECK-NEXT: li a3, 16 ; CHECK-NEXT: .LBB23_2: ; CHECK-NEXT: li a2, 0 -; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; CHECK-NEXT: addi a3, a1, -16 ; CHECK-NEXT: vse64.v v8, (a0), v0.t ; CHECK-NEXT: bltu a1, a3, .LBB23_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a2, a3 ; CHECK-NEXT: .LBB23_4: -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v0, v0, 2 ; CHECK-NEXT: addi a0, a0, 128 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vse64.v v16, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.v32f64.p0v32f64(<32 x double> %val, <32 x double>* %ptr, <32 x i1> %m, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll @@ -9,7 +9,7 @@ define signext i1 @vreduce_or_v1i1(<1 x i1> %v) { ; CHECK-LABEL: vreduce_or_v1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -25,7 +25,7 @@ define signext i1 @vreduce_xor_v1i1(<1 x i1> %v) { ; CHECK-LABEL: vreduce_xor_v1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -41,7 +41,7 @@ define signext i1 @vreduce_and_v1i1(<1 x i1> %v) { ; CHECK-LABEL: vreduce_and_v1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -57,7 +57,7 @@ define signext i1 @vreduce_umax_v1i1(<1 x i1> %v) { ; CHECK-LABEL: vreduce_umax_v1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -73,7 +73,7 @@ define signext i1 @vreduce_smax_v1i1(<1 x i1> %v) { ; CHECK-LABEL: vreduce_smax_v1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -89,7 +89,7 @@ define signext i1 @vreduce_umin_v1i1(<1 x i1> %v) { ; CHECK-LABEL: vreduce_umin_v1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -105,7 +105,7 @@ define signext i1 @vreduce_smin_v1i1(<1 x i1> %v) { ; CHECK-LABEL: vreduce_smin_v1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -121,7 +121,7 @@ define signext i1 @vreduce_or_v2i1(<2 x i1> %v) { ; CHECK-LABEL: vreduce_or_v2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: addi a0, a0, -1 @@ -135,7 +135,7 @@ define signext i1 @vreduce_xor_v2i1(<2 x i1> %v) { ; CHECK-LABEL: vreduce_xor_v2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 @@ -149,7 +149,7 @@ define signext i1 @vreduce_and_v2i1(<2 x i1> %v) { ; CHECK-LABEL: vreduce_and_v2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmnot.m v8, v0 ; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: snez a0, a0 @@ -164,7 +164,7 @@ define signext i1 @vreduce_umax_v2i1(<2 x i1> %v) { ; CHECK-LABEL: vreduce_umax_v2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: addi a0, a0, -1 @@ -178,7 +178,7 @@ define signext i1 @vreduce_smax_v2i1(<2 x i1> %v) { ; CHECK-LABEL: vreduce_smax_v2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmnot.m v8, v0 ; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: snez a0, a0 @@ -193,7 +193,7 @@ define signext i1 @vreduce_umin_v2i1(<2 x i1> %v) { ; CHECK-LABEL: vreduce_umin_v2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmnot.m v8, v0 ; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: snez a0, a0 @@ -208,7 +208,7 @@ define signext i1 @vreduce_smin_v2i1(<2 x i1> %v) { ; CHECK-LABEL: vreduce_smin_v2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: addi a0, a0, -1 @@ -222,7 +222,7 @@ define signext i1 @vreduce_or_v4i1(<4 x i1> %v) { ; CHECK-LABEL: vreduce_or_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: addi a0, a0, -1 @@ -236,7 +236,7 @@ define signext i1 @vreduce_xor_v4i1(<4 x i1> %v) { ; CHECK-LABEL: vreduce_xor_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 @@ -250,7 +250,7 @@ define signext i1 @vreduce_and_v4i1(<4 x i1> %v) { ; CHECK-LABEL: vreduce_and_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmnot.m v8, v0 ; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: snez a0, a0 @@ -265,7 +265,7 @@ define signext i1 @vreduce_umax_v4i1(<4 x i1> %v) { ; CHECK-LABEL: vreduce_umax_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: addi a0, a0, -1 @@ -279,7 +279,7 @@ define signext i1 @vreduce_smax_v4i1(<4 x i1> %v) { ; CHECK-LABEL: vreduce_smax_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmnot.m v8, v0 ; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: snez a0, a0 @@ -294,7 +294,7 @@ define signext i1 @vreduce_umin_v4i1(<4 x i1> %v) { ; CHECK-LABEL: vreduce_umin_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmnot.m v8, v0 ; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: snez a0, a0 @@ -309,7 +309,7 @@ define signext i1 @vreduce_smin_v4i1(<4 x i1> %v) { ; CHECK-LABEL: vreduce_smin_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: addi a0, a0, -1 @@ -323,7 +323,7 @@ define signext i1 @vreduce_or_v8i1(<8 x i1> %v) { ; CHECK-LABEL: vreduce_or_v8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: addi a0, a0, -1 @@ -337,7 +337,7 @@ define signext i1 @vreduce_xor_v8i1(<8 x i1> %v) { ; CHECK-LABEL: vreduce_xor_v8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 @@ -351,7 +351,7 @@ define signext i1 @vreduce_and_v8i1(<8 x i1> %v) { ; CHECK-LABEL: vreduce_and_v8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmnot.m v8, v0 ; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: snez a0, a0 @@ -366,7 +366,7 @@ define signext i1 @vreduce_umax_v8i1(<8 x i1> %v) { ; CHECK-LABEL: vreduce_umax_v8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: addi a0, a0, -1 @@ -380,7 +380,7 @@ define signext i1 @vreduce_smax_v8i1(<8 x i1> %v) { ; CHECK-LABEL: vreduce_smax_v8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmnot.m v8, v0 ; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: snez a0, a0 @@ -395,7 +395,7 @@ define signext i1 @vreduce_umin_v8i1(<8 x i1> %v) { ; CHECK-LABEL: vreduce_umin_v8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmnot.m v8, v0 ; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: snez a0, a0 @@ -410,7 +410,7 @@ define signext i1 @vreduce_smin_v8i1(<8 x i1> %v) { ; CHECK-LABEL: vreduce_smin_v8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: addi a0, a0, -1 @@ -424,7 +424,7 @@ define signext i1 @vreduce_or_v16i1(<16 x i1> %v) { ; CHECK-LABEL: vreduce_or_v16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: addi a0, a0, -1 @@ -438,7 +438,7 @@ define signext i1 @vreduce_xor_v16i1(<16 x i1> %v) { ; CHECK-LABEL: vreduce_xor_v16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 @@ -452,7 +452,7 @@ define signext i1 @vreduce_and_v16i1(<16 x i1> %v) { ; CHECK-LABEL: vreduce_and_v16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmnot.m v8, v0 ; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: snez a0, a0 @@ -467,7 +467,7 @@ define signext i1 @vreduce_umax_v16i1(<16 x i1> %v) { ; CHECK-LABEL: vreduce_umax_v16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: addi a0, a0, -1 @@ -481,7 +481,7 @@ define signext i1 @vreduce_smax_v16i1(<16 x i1> %v) { ; CHECK-LABEL: vreduce_smax_v16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmnot.m v8, v0 ; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: snez a0, a0 @@ -496,7 +496,7 @@ define signext i1 @vreduce_umin_v16i1(<16 x i1> %v) { ; CHECK-LABEL: vreduce_umin_v16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmnot.m v8, v0 ; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: snez a0, a0 @@ -511,7 +511,7 @@ define signext i1 @vreduce_smin_v16i1(<16 x i1> %v) { ; CHECK-LABEL: vreduce_smin_v16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: addi a0, a0, -1 @@ -525,7 +525,7 @@ define signext i1 @vreduce_or_v32i1(<32 x i1> %v) { ; LMULMAX1-LABEL: vreduce_or_v32i1: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-NEXT: vmor.mm v8, v0, v8 ; LMULMAX1-NEXT: vcpop.m a0, v8 ; LMULMAX1-NEXT: seqz a0, a0 @@ -535,7 +535,7 @@ ; LMULMAX8-LABEL: vreduce_or_v32i1: ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: li a0, 32 -; LMULMAX8-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; LMULMAX8-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; LMULMAX8-NEXT: vcpop.m a0, v0 ; LMULMAX8-NEXT: seqz a0, a0 ; LMULMAX8-NEXT: addi a0, a0, -1 @@ -549,7 +549,7 @@ define signext i1 @vreduce_xor_v32i1(<32 x i1> %v) { ; LMULMAX1-LABEL: vreduce_xor_v32i1: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-NEXT: vmxor.mm v8, v0, v8 ; LMULMAX1-NEXT: vcpop.m a0, v8 ; LMULMAX1-NEXT: andi a0, a0, 1 @@ -559,7 +559,7 @@ ; LMULMAX8-LABEL: vreduce_xor_v32i1: ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: li a0, 32 -; LMULMAX8-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; LMULMAX8-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; LMULMAX8-NEXT: vcpop.m a0, v0 ; LMULMAX8-NEXT: andi a0, a0, 1 ; LMULMAX8-NEXT: neg a0, a0 @@ -573,7 +573,7 @@ define signext i1 @vreduce_and_v32i1(<32 x i1> %v) { ; LMULMAX1-LABEL: vreduce_and_v32i1: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-NEXT: vmnand.mm v8, v0, v8 ; LMULMAX1-NEXT: vcpop.m a0, v8 ; LMULMAX1-NEXT: snez a0, a0 @@ -583,7 +583,7 @@ ; LMULMAX8-LABEL: vreduce_and_v32i1: ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: li a0, 32 -; LMULMAX8-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; LMULMAX8-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; LMULMAX8-NEXT: vmnot.m v8, v0 ; LMULMAX8-NEXT: vcpop.m a0, v8 ; LMULMAX8-NEXT: snez a0, a0 @@ -598,7 +598,7 @@ define signext i1 @vreduce_umax_v32i1(<32 x i1> %v) { ; LMULMAX1-LABEL: vreduce_umax_v32i1: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-NEXT: vmor.mm v8, v0, v8 ; LMULMAX1-NEXT: vcpop.m a0, v8 ; LMULMAX1-NEXT: seqz a0, a0 @@ -608,7 +608,7 @@ ; LMULMAX8-LABEL: vreduce_umax_v32i1: ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: li a0, 32 -; LMULMAX8-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; LMULMAX8-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; LMULMAX8-NEXT: vcpop.m a0, v0 ; LMULMAX8-NEXT: seqz a0, a0 ; LMULMAX8-NEXT: addi a0, a0, -1 @@ -622,7 +622,7 @@ define signext i1 @vreduce_smax_v32i1(<32 x i1> %v) { ; LMULMAX1-LABEL: vreduce_smax_v32i1: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-NEXT: vmnand.mm v8, v0, v8 ; LMULMAX1-NEXT: vcpop.m a0, v8 ; LMULMAX1-NEXT: snez a0, a0 @@ -632,7 +632,7 @@ ; LMULMAX8-LABEL: vreduce_smax_v32i1: ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: li a0, 32 -; LMULMAX8-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; LMULMAX8-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; LMULMAX8-NEXT: vmnot.m v8, v0 ; LMULMAX8-NEXT: vcpop.m a0, v8 ; LMULMAX8-NEXT: snez a0, a0 @@ -647,7 +647,7 @@ define signext i1 @vreduce_umin_v32i1(<32 x i1> %v) { ; LMULMAX1-LABEL: vreduce_umin_v32i1: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-NEXT: vmnand.mm v8, v0, v8 ; LMULMAX1-NEXT: vcpop.m a0, v8 ; LMULMAX1-NEXT: snez a0, a0 @@ -657,7 +657,7 @@ ; LMULMAX8-LABEL: vreduce_umin_v32i1: ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: li a0, 32 -; LMULMAX8-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; LMULMAX8-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; LMULMAX8-NEXT: vmnot.m v8, v0 ; LMULMAX8-NEXT: vcpop.m a0, v8 ; LMULMAX8-NEXT: snez a0, a0 @@ -672,7 +672,7 @@ define signext i1 @vreduce_smin_v32i1(<32 x i1> %v) { ; LMULMAX1-LABEL: vreduce_smin_v32i1: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-NEXT: vmor.mm v8, v0, v8 ; LMULMAX1-NEXT: vcpop.m a0, v8 ; LMULMAX1-NEXT: seqz a0, a0 @@ -682,7 +682,7 @@ ; LMULMAX8-LABEL: vreduce_smin_v32i1: ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: li a0, 32 -; LMULMAX8-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; LMULMAX8-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; LMULMAX8-NEXT: vcpop.m a0, v0 ; LMULMAX8-NEXT: seqz a0, a0 ; LMULMAX8-NEXT: addi a0, a0, -1 @@ -696,7 +696,7 @@ define signext i1 @vreduce_or_v64i1(<64 x i1> %v) { ; LMULMAX1-LABEL: vreduce_or_v64i1: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-NEXT: vmor.mm v8, v8, v10 ; LMULMAX1-NEXT: vmor.mm v9, v0, v9 ; LMULMAX1-NEXT: vmor.mm v8, v9, v8 @@ -708,7 +708,7 @@ ; LMULMAX8-LABEL: vreduce_or_v64i1: ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: li a0, 64 -; LMULMAX8-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; LMULMAX8-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; LMULMAX8-NEXT: vcpop.m a0, v0 ; LMULMAX8-NEXT: seqz a0, a0 ; LMULMAX8-NEXT: addi a0, a0, -1 @@ -722,7 +722,7 @@ define signext i1 @vreduce_xor_v64i1(<64 x i1> %v) { ; LMULMAX1-LABEL: vreduce_xor_v64i1: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-NEXT: vmxor.mm v8, v8, v10 ; LMULMAX1-NEXT: vmxor.mm v9, v0, v9 ; LMULMAX1-NEXT: vmxor.mm v8, v9, v8 @@ -734,7 +734,7 @@ ; LMULMAX8-LABEL: vreduce_xor_v64i1: ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: li a0, 64 -; LMULMAX8-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; LMULMAX8-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; LMULMAX8-NEXT: vcpop.m a0, v0 ; LMULMAX8-NEXT: andi a0, a0, 1 ; LMULMAX8-NEXT: neg a0, a0 @@ -748,7 +748,7 @@ define signext i1 @vreduce_and_v64i1(<64 x i1> %v) { ; LMULMAX1-LABEL: vreduce_and_v64i1: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-NEXT: vmand.mm v8, v8, v10 ; LMULMAX1-NEXT: vmand.mm v9, v0, v9 ; LMULMAX1-NEXT: vmnand.mm v8, v9, v8 @@ -760,7 +760,7 @@ ; LMULMAX8-LABEL: vreduce_and_v64i1: ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: li a0, 64 -; LMULMAX8-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; LMULMAX8-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; LMULMAX8-NEXT: vmnot.m v8, v0 ; LMULMAX8-NEXT: vcpop.m a0, v8 ; LMULMAX8-NEXT: snez a0, a0 @@ -775,7 +775,7 @@ define signext i1 @vreduce_umax_v64i1(<64 x i1> %v) { ; LMULMAX1-LABEL: vreduce_umax_v64i1: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-NEXT: vmor.mm v8, v8, v10 ; LMULMAX1-NEXT: vmor.mm v9, v0, v9 ; LMULMAX1-NEXT: vmor.mm v8, v9, v8 @@ -787,7 +787,7 @@ ; LMULMAX8-LABEL: vreduce_umax_v64i1: ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: li a0, 64 -; LMULMAX8-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; LMULMAX8-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; LMULMAX8-NEXT: vcpop.m a0, v0 ; LMULMAX8-NEXT: seqz a0, a0 ; LMULMAX8-NEXT: addi a0, a0, -1 @@ -801,7 +801,7 @@ define signext i1 @vreduce_smax_v64i1(<64 x i1> %v) { ; LMULMAX1-LABEL: vreduce_smax_v64i1: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-NEXT: vmand.mm v8, v8, v10 ; LMULMAX1-NEXT: vmand.mm v9, v0, v9 ; LMULMAX1-NEXT: vmnand.mm v8, v9, v8 @@ -813,7 +813,7 @@ ; LMULMAX8-LABEL: vreduce_smax_v64i1: ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: li a0, 64 -; LMULMAX8-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; LMULMAX8-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; LMULMAX8-NEXT: vmnot.m v8, v0 ; LMULMAX8-NEXT: vcpop.m a0, v8 ; LMULMAX8-NEXT: snez a0, a0 @@ -828,7 +828,7 @@ define signext i1 @vreduce_umin_v64i1(<64 x i1> %v) { ; LMULMAX1-LABEL: vreduce_umin_v64i1: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-NEXT: vmand.mm v8, v8, v10 ; LMULMAX1-NEXT: vmand.mm v9, v0, v9 ; LMULMAX1-NEXT: vmnand.mm v8, v9, v8 @@ -840,7 +840,7 @@ ; LMULMAX8-LABEL: vreduce_umin_v64i1: ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: li a0, 64 -; LMULMAX8-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; LMULMAX8-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; LMULMAX8-NEXT: vmnot.m v8, v0 ; LMULMAX8-NEXT: vcpop.m a0, v8 ; LMULMAX8-NEXT: snez a0, a0 @@ -855,7 +855,7 @@ define signext i1 @vreduce_smin_v64i1(<64 x i1> %v) { ; LMULMAX1-LABEL: vreduce_smin_v64i1: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-NEXT: vmor.mm v8, v8, v10 ; LMULMAX1-NEXT: vmor.mm v9, v0, v9 ; LMULMAX1-NEXT: vmor.mm v8, v9, v8 @@ -867,7 +867,7 @@ ; LMULMAX8-LABEL: vreduce_smin_v64i1: ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: li a0, 64 -; LMULMAX8-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; LMULMAX8-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; LMULMAX8-NEXT: vcpop.m a0, v0 ; LMULMAX8-NEXT: seqz a0, a0 ; LMULMAX8-NEXT: addi a0, a0, -1 @@ -881,7 +881,7 @@ define signext i1 @vreduce_add_v1i1(<1 x i1> %v) { ; CHECK-LABEL: vreduce_add_v1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -897,7 +897,7 @@ define signext i1 @vreduce_add_v2i1(<2 x i1> %v) { ; CHECK-LABEL: vreduce_add_v2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 @@ -911,7 +911,7 @@ define signext i1 @vreduce_add_v4i1(<4 x i1> %v) { ; CHECK-LABEL: vreduce_add_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 @@ -925,7 +925,7 @@ define signext i1 @vreduce_add_v8i1(<8 x i1> %v) { ; CHECK-LABEL: vreduce_add_v8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 @@ -939,7 +939,7 @@ define signext i1 @vreduce_add_v16i1(<16 x i1> %v) { ; CHECK-LABEL: vreduce_add_v16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 @@ -953,7 +953,7 @@ define signext i1 @vreduce_add_v32i1(<32 x i1> %v) { ; LMULMAX1-LABEL: vreduce_add_v32i1: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-NEXT: vmxor.mm v8, v0, v8 ; LMULMAX1-NEXT: vcpop.m a0, v8 ; LMULMAX1-NEXT: andi a0, a0, 1 @@ -963,7 +963,7 @@ ; LMULMAX8-LABEL: vreduce_add_v32i1: ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: li a0, 32 -; LMULMAX8-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; LMULMAX8-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; LMULMAX8-NEXT: vcpop.m a0, v0 ; LMULMAX8-NEXT: andi a0, a0, 1 ; LMULMAX8-NEXT: neg a0, a0 @@ -977,7 +977,7 @@ define signext i1 @vreduce_add_v64i1(<64 x i1> %v) { ; LMULMAX1-LABEL: vreduce_add_v64i1: ; LMULMAX1: # %bb.0: -; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; LMULMAX1-NEXT: vmxor.mm v8, v8, v10 ; LMULMAX1-NEXT: vmxor.mm v9, v0, v9 ; LMULMAX1-NEXT: vmxor.mm v8, v9, v8 @@ -989,7 +989,7 @@ ; LMULMAX8-LABEL: vreduce_add_v64i1: ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: li a0, 64 -; LMULMAX8-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; LMULMAX8-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; LMULMAX8-NEXT: vcpop.m a0, v0 ; LMULMAX8-NEXT: andi a0, a0, 1 ; LMULMAX8-NEXT: neg a0, a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll @@ -9,7 +9,7 @@ define <8 x i7> @vrem_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v8i7: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vadd.vv v9, v9, v9 ; CHECK-NEXT: vsra.vi v9, v9, 1 ; CHECK-NEXT: vadd.vv v8, v8, v8 @@ -36,7 +36,7 @@ define <2 x i8> @vrem_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -60,7 +60,7 @@ define <2 x i8> @vrem_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_v2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0 @@ -86,7 +86,7 @@ define <4 x i8> @vrem_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -110,7 +110,7 @@ define <4 x i8> @vrem_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_v4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0 @@ -148,7 +148,7 @@ define <8 x i8> @vrem_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -172,7 +172,7 @@ define <8 x i8> @vrem_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_v8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 @@ -198,7 +198,7 @@ define <16 x i8> @vrem_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -222,7 +222,7 @@ define <16 x i8> @vrem_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_v16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0 @@ -248,7 +248,7 @@ define <2 x i16> @vrem_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -272,7 +272,7 @@ define <2 x i16> @vrem_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_v2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0 @@ -298,7 +298,7 @@ define <4 x i16> @vrem_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -322,7 +322,7 @@ define <4 x i16> @vrem_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_v4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0 @@ -348,7 +348,7 @@ define <8 x i16> @vrem_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -372,7 +372,7 @@ define <8 x i16> @vrem_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_v8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0 @@ -398,7 +398,7 @@ define <16 x i16> @vrem_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -422,7 +422,7 @@ define <16 x i16> @vrem_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_v16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0 @@ -448,7 +448,7 @@ define <2 x i32> @vrem_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -472,7 +472,7 @@ define <2 x i32> @vrem_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_v2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0 @@ -498,7 +498,7 @@ define <4 x i32> @vrem_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -522,7 +522,7 @@ define <4 x i32> @vrem_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_v4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0 @@ -548,7 +548,7 @@ define <8 x i32> @vrem_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -572,7 +572,7 @@ define <8 x i32> @vrem_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_v8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 @@ -598,7 +598,7 @@ define <16 x i32> @vrem_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -622,7 +622,7 @@ define <16 x i32> @vrem_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_v16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0 @@ -648,7 +648,7 @@ define <2 x i64> @vrem_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -665,7 +665,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vrem.vv v8, v8, v9, v0.t @@ -691,16 +691,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vrem.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vx_v2i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vrem.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0 @@ -726,7 +726,7 @@ define <4 x i64> @vrem_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v4i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -743,7 +743,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vrem.vv v8, v8, v10, v0.t @@ -769,16 +769,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vrem.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vx_v4i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vrem.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0 @@ -804,7 +804,7 @@ define <8 x i64> @vrem_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v8i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -821,7 +821,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vrem.vv v8, v8, v12, v0.t @@ -847,16 +847,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vrem.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vx_v8i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vrem.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 @@ -882,7 +882,7 @@ define <16 x i64> @vrem_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_v16i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -899,7 +899,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vrem.vv v8, v8, v16, v0.t @@ -925,16 +925,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vrem.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vx_v16i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vrem.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll @@ -10,7 +10,7 @@ ; CHECK-LABEL: vremu_vv_v8i7: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 127 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vand.vx v9, v9, a1 ; CHECK-NEXT: vand.vx v8, v8, a1 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -35,7 +35,7 @@ define <2 x i8> @vremu_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -59,7 +59,7 @@ define <2 x i8> @vremu_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_v2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0 @@ -85,7 +85,7 @@ define <4 x i8> @vremu_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -109,7 +109,7 @@ define <4 x i8> @vremu_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_v4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0 @@ -147,7 +147,7 @@ define <8 x i8> @vremu_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -171,7 +171,7 @@ define <8 x i8> @vremu_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_v8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 @@ -197,7 +197,7 @@ define <16 x i8> @vremu_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -221,7 +221,7 @@ define <16 x i8> @vremu_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_v16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0 @@ -247,7 +247,7 @@ define <2 x i16> @vremu_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -271,7 +271,7 @@ define <2 x i16> @vremu_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_v2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0 @@ -297,7 +297,7 @@ define <4 x i16> @vremu_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -321,7 +321,7 @@ define <4 x i16> @vremu_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_v4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0 @@ -347,7 +347,7 @@ define <8 x i16> @vremu_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -371,7 +371,7 @@ define <8 x i16> @vremu_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_v8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0 @@ -397,7 +397,7 @@ define <16 x i16> @vremu_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -421,7 +421,7 @@ define <16 x i16> @vremu_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_v16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0 @@ -447,7 +447,7 @@ define <2 x i32> @vremu_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -471,7 +471,7 @@ define <2 x i32> @vremu_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_v2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0 @@ -497,7 +497,7 @@ define <4 x i32> @vremu_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -521,7 +521,7 @@ define <4 x i32> @vremu_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_v4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0 @@ -547,7 +547,7 @@ define <8 x i32> @vremu_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -571,7 +571,7 @@ define <8 x i32> @vremu_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_v8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 @@ -597,7 +597,7 @@ define <16 x i32> @vremu_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -621,7 +621,7 @@ define <16 x i32> @vremu_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_v16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0 @@ -647,7 +647,7 @@ define <2 x i64> @vremu_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -664,7 +664,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vremu.vv v8, v8, v9, v0.t @@ -690,16 +690,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vremu.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vx_v2i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vremu.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0 @@ -725,7 +725,7 @@ define <4 x i64> @vremu_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v4i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -742,7 +742,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vremu.vv v8, v8, v10, v0.t @@ -768,16 +768,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vremu.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vx_v4i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vremu.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0 @@ -803,7 +803,7 @@ define <8 x i64> @vremu_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v8i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -820,7 +820,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vremu.vv v8, v8, v12, v0.t @@ -846,16 +846,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vremu.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vx_v8i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vremu.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 @@ -881,7 +881,7 @@ define <16 x i64> @vremu_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_v16i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -898,7 +898,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vremu.vv v8, v8, v16, v0.t @@ -924,16 +924,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vremu.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vx_v16i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vremu.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrsub-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrsub-vp.ll @@ -21,7 +21,7 @@ define <2 x i8> @vrsub_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_v2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0 @@ -47,7 +47,7 @@ define <2 x i8> @vrsub_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_v2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 2, i32 0 @@ -75,7 +75,7 @@ define <4 x i8> @vrsub_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_v4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0 @@ -101,7 +101,7 @@ define <4 x i8> @vrsub_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_v4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 2, i32 0 @@ -129,7 +129,7 @@ define <8 x i8> @vrsub_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_v8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 @@ -155,7 +155,7 @@ define <8 x i8> @vrsub_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_v8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 2, i32 0 @@ -183,7 +183,7 @@ define <16 x i8> @vrsub_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_v16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0 @@ -209,7 +209,7 @@ define <16 x i8> @vrsub_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_v16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 2, i32 0 @@ -237,7 +237,7 @@ define <2 x i16> @vrsub_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_v2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0 @@ -263,7 +263,7 @@ define <2 x i16> @vrsub_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_v2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 2, i32 0 @@ -291,7 +291,7 @@ define <4 x i16> @vrsub_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_v4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0 @@ -317,7 +317,7 @@ define <4 x i16> @vrsub_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_v4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 2, i32 0 @@ -345,7 +345,7 @@ define <8 x i16> @vrsub_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_v8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0 @@ -371,7 +371,7 @@ define <8 x i16> @vrsub_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_v8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 2, i32 0 @@ -399,7 +399,7 @@ define <16 x i16> @vrsub_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_v16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0 @@ -425,7 +425,7 @@ define <16 x i16> @vrsub_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_v16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 2, i32 0 @@ -453,7 +453,7 @@ define <2 x i32> @vrsub_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_v2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0 @@ -479,7 +479,7 @@ define <2 x i32> @vrsub_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_v2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 2, i32 0 @@ -507,7 +507,7 @@ define <4 x i32> @vrsub_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_v4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0 @@ -533,7 +533,7 @@ define <4 x i32> @vrsub_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_v4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 2, i32 0 @@ -561,7 +561,7 @@ define <8 x i32> @vrsub_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_v8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 @@ -587,7 +587,7 @@ define <8 x i32> @vrsub_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_v8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 2, i32 0 @@ -615,7 +615,7 @@ define <16 x i32> @vrsub_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_v16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0 @@ -641,7 +641,7 @@ define <16 x i32> @vrsub_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_v16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 2, i32 0 @@ -662,7 +662,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vsub.vv v8, v9, v8, v0.t @@ -688,16 +688,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vsub.vv v8, v9, v8 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vrsub_vx_v2i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vrsub.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0 @@ -723,7 +723,7 @@ define <2 x i64> @vrsub_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_v2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 2, i32 0 @@ -744,7 +744,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vsub.vv v8, v10, v8, v0.t @@ -770,16 +770,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vsub.vv v8, v10, v8 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vrsub_vx_v4i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vrsub.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0 @@ -805,7 +805,7 @@ define <4 x i64> @vrsub_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_v4i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 2, i32 0 @@ -826,7 +826,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vsub.vv v8, v12, v8, v0.t @@ -852,16 +852,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vsub.vv v8, v12, v8 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vrsub_vx_v8i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vrsub.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 @@ -887,7 +887,7 @@ define <8 x i64> @vrsub_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_v8i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 2, i32 0 @@ -908,7 +908,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vsub.vv v8, v16, v8, v0.t @@ -934,16 +934,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vsub.vv v8, v16, v8 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vrsub_vx_v16i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vrsub.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0 @@ -969,7 +969,7 @@ define <16 x i64> @vrsub_vi_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_v16i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 2, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd.ll @@ -9,7 +9,7 @@ define <2 x i8> @sadd_v2i8_vv(<2 x i8> %va, <2 x i8> %b) { ; CHECK-LABEL: sadd_v2i8_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %va, <2 x i8> %b) @@ -19,7 +19,7 @@ define <2 x i8> @sadd_v2i8_vx(<2 x i8> %va, i8 %b) { ; CHECK-LABEL: sadd_v2i8_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0 @@ -31,7 +31,7 @@ define <2 x i8> @sadd_v2i8_vi(<2 x i8> %va) { ; CHECK-LABEL: sadd_v2i8_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 5, i32 0 @@ -45,7 +45,7 @@ define <4 x i8> @sadd_v4i8_vv(<4 x i8> %va, <4 x i8> %b) { ; CHECK-LABEL: sadd_v4i8_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.sadd.sat.v4i8(<4 x i8> %va, <4 x i8> %b) @@ -55,7 +55,7 @@ define <4 x i8> @sadd_v4i8_vx(<4 x i8> %va, i8 %b) { ; CHECK-LABEL: sadd_v4i8_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0 @@ -67,7 +67,7 @@ define <4 x i8> @sadd_v4i8_vi(<4 x i8> %va) { ; CHECK-LABEL: sadd_v4i8_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 5, i32 0 @@ -81,7 +81,7 @@ define <8 x i8> @sadd_v8i8_vv(<8 x i8> %va, <8 x i8> %b) { ; CHECK-LABEL: sadd_v8i8_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <8 x i8> @llvm.sadd.sat.v8i8(<8 x i8> %va, <8 x i8> %b) @@ -91,7 +91,7 @@ define <8 x i8> @sadd_v8i8_vx(<8 x i8> %va, i8 %b) { ; CHECK-LABEL: sadd_v8i8_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 @@ -103,7 +103,7 @@ define <8 x i8> @sadd_v8i8_vi(<8 x i8> %va) { ; CHECK-LABEL: sadd_v8i8_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 5, i32 0 @@ -117,7 +117,7 @@ define <16 x i8> @sadd_v16i8_vv(<16 x i8> %va, <16 x i8> %b) { ; CHECK-LABEL: sadd_v16i8_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %va, <16 x i8> %b) @@ -127,7 +127,7 @@ define <16 x i8> @sadd_v16i8_vx(<16 x i8> %va, i8 %b) { ; CHECK-LABEL: sadd_v16i8_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0 @@ -139,7 +139,7 @@ define <16 x i8> @sadd_v16i8_vi(<16 x i8> %va) { ; CHECK-LABEL: sadd_v16i8_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 5, i32 0 @@ -153,7 +153,7 @@ define <2 x i16> @sadd_v2i16_vv(<2 x i16> %va, <2 x i16> %b) { ; CHECK-LABEL: sadd_v2i16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16> %va, <2 x i16> %b) @@ -163,7 +163,7 @@ define <2 x i16> @sadd_v2i16_vx(<2 x i16> %va, i16 %b) { ; CHECK-LABEL: sadd_v2i16_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0 @@ -175,7 +175,7 @@ define <2 x i16> @sadd_v2i16_vi(<2 x i16> %va) { ; CHECK-LABEL: sadd_v2i16_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 5, i32 0 @@ -189,7 +189,7 @@ define <4 x i16> @sadd_v4i16_vv(<4 x i16> %va, <4 x i16> %b) { ; CHECK-LABEL: sadd_v4i16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.sadd.sat.v4i16(<4 x i16> %va, <4 x i16> %b) @@ -199,7 +199,7 @@ define <4 x i16> @sadd_v4i16_vx(<4 x i16> %va, i16 %b) { ; CHECK-LABEL: sadd_v4i16_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0 @@ -211,7 +211,7 @@ define <4 x i16> @sadd_v4i16_vi(<4 x i16> %va) { ; CHECK-LABEL: sadd_v4i16_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 5, i32 0 @@ -225,7 +225,7 @@ define <8 x i16> @sadd_v8i16_vv(<8 x i16> %va, <8 x i16> %b) { ; CHECK-LABEL: sadd_v8i16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %va, <8 x i16> %b) @@ -235,7 +235,7 @@ define <8 x i16> @sadd_v8i16_vx(<8 x i16> %va, i16 %b) { ; CHECK-LABEL: sadd_v8i16_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0 @@ -247,7 +247,7 @@ define <8 x i16> @sadd_v8i16_vi(<8 x i16> %va) { ; CHECK-LABEL: sadd_v8i16_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 5, i32 0 @@ -261,7 +261,7 @@ define <16 x i16> @sadd_v16i16_vv(<16 x i16> %va, <16 x i16> %b) { ; CHECK-LABEL: sadd_v16i16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v10 ; CHECK-NEXT: ret %v = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> %va, <16 x i16> %b) @@ -271,7 +271,7 @@ define <16 x i16> @sadd_v16i16_vx(<16 x i16> %va, i16 %b) { ; CHECK-LABEL: sadd_v16i16_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0 @@ -283,7 +283,7 @@ define <16 x i16> @sadd_v16i16_vi(<16 x i16> %va) { ; CHECK-LABEL: sadd_v16i16_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 5, i32 0 @@ -297,7 +297,7 @@ define <2 x i32> @sadd_v2i32_vv(<2 x i32> %va, <2 x i32> %b) { ; CHECK-LABEL: sadd_v2i32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <2 x i32> @llvm.sadd.sat.v2i32(<2 x i32> %va, <2 x i32> %b) @@ -307,7 +307,7 @@ define <2 x i32> @sadd_v2i32_vx(<2 x i32> %va, i32 %b) { ; CHECK-LABEL: sadd_v2i32_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0 @@ -319,7 +319,7 @@ define <2 x i32> @sadd_v2i32_vx_commute(<2 x i32> %va, i32 %b) { ; CHECK-LABEL: sadd_v2i32_vx_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0 @@ -331,7 +331,7 @@ define <2 x i32> @sadd_v2i32_vi(<2 x i32> %va) { ; CHECK-LABEL: sadd_v2i32_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 5, i32 0 @@ -345,7 +345,7 @@ define <4 x i32> @sadd_v4i32_vv(<4 x i32> %va, <4 x i32> %b) { ; CHECK-LABEL: sadd_v4i32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %va, <4 x i32> %b) @@ -355,7 +355,7 @@ define <4 x i32> @sadd_v4i32_vx(<4 x i32> %va, i32 %b) { ; CHECK-LABEL: sadd_v4i32_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0 @@ -367,7 +367,7 @@ define <4 x i32> @sadd_v4i32_vi(<4 x i32> %va) { ; CHECK-LABEL: sadd_v4i32_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 5, i32 0 @@ -381,7 +381,7 @@ define <8 x i32> @sadd_v8i32_vv(<8 x i32> %va, <8 x i32> %b) { ; CHECK-LABEL: sadd_v8i32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v10 ; CHECK-NEXT: ret %v = call <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32> %va, <8 x i32> %b) @@ -391,7 +391,7 @@ define <8 x i32> @sadd_v8i32_vx(<8 x i32> %va, i32 %b) { ; CHECK-LABEL: sadd_v8i32_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 @@ -403,7 +403,7 @@ define <8 x i32> @sadd_v8i32_vi(<8 x i32> %va) { ; CHECK-LABEL: sadd_v8i32_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 5, i32 0 @@ -417,7 +417,7 @@ define <16 x i32> @sadd_v16i32_vv(<16 x i32> %va, <16 x i32> %b) { ; CHECK-LABEL: sadd_v16i32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v12 ; CHECK-NEXT: ret %v = call <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32> %va, <16 x i32> %b) @@ -427,7 +427,7 @@ define <16 x i32> @sadd_v16i32_vx(<16 x i32> %va, i32 %b) { ; CHECK-LABEL: sadd_v16i32_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0 @@ -439,7 +439,7 @@ define <16 x i32> @sadd_v16i32_vi(<16 x i32> %va) { ; CHECK-LABEL: sadd_v16i32_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 5, i32 0 @@ -453,7 +453,7 @@ define <2 x i64> @sadd_v2i64_vv(<2 x i64> %va, <2 x i64> %b) { ; CHECK-LABEL: sadd_v2i64_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> %va, <2 x i64> %b) @@ -468,7 +468,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsadd.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -476,7 +476,7 @@ ; ; RV64-LABEL: sadd_v2i64_vx: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vsadd.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0 @@ -488,7 +488,7 @@ define <2 x i64> @sadd_v2i64_vi(<2 x i64> %va) { ; CHECK-LABEL: sadd_v2i64_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 5, i32 0 @@ -502,7 +502,7 @@ define <4 x i64> @sadd_v4i64_vv(<4 x i64> %va, <4 x i64> %b) { ; CHECK-LABEL: sadd_v4i64_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v10 ; CHECK-NEXT: ret %v = call <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64> %va, <4 x i64> %b) @@ -517,7 +517,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsadd.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -525,7 +525,7 @@ ; ; RV64-LABEL: sadd_v4i64_vx: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-NEXT: vsadd.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0 @@ -537,7 +537,7 @@ define <4 x i64> @sadd_v4i64_vi(<4 x i64> %va) { ; CHECK-LABEL: sadd_v4i64_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 5, i32 0 @@ -551,7 +551,7 @@ define <8 x i64> @sadd_v8i64_vv(<8 x i64> %va, <8 x i64> %b) { ; CHECK-LABEL: sadd_v8i64_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v12 ; CHECK-NEXT: ret %v = call <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64> %va, <8 x i64> %b) @@ -566,7 +566,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsadd.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -574,7 +574,7 @@ ; ; RV64-LABEL: sadd_v8i64_vx: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsadd.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 @@ -586,7 +586,7 @@ define <8 x i64> @sadd_v8i64_vi(<8 x i64> %va) { ; CHECK-LABEL: sadd_v8i64_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 5, i32 0 @@ -600,7 +600,7 @@ define <16 x i64> @sadd_v16i64_vv(<16 x i64> %va, <16 x i64> %b) { ; CHECK-LABEL: sadd_v16i64_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v16 ; CHECK-NEXT: ret %v = call <16 x i64> @llvm.sadd.sat.v16i64(<16 x i64> %va, <16 x i64> %b) @@ -615,7 +615,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsadd.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -623,7 +623,7 @@ ; ; RV64-LABEL: sadd_v16i64_vx: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vsadd.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0 @@ -635,7 +635,7 @@ define <16 x i64> @sadd_v16i64_vi(<16 x i64> %va) { ; CHECK-LABEL: sadd_v16i64_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 5, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu.ll @@ -9,7 +9,7 @@ define <2 x i8> @uadd_v2i8_vv(<2 x i8> %va, <2 x i8> %b) { ; CHECK-LABEL: uadd_v2i8_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %va, <2 x i8> %b) @@ -19,7 +19,7 @@ define <2 x i8> @uadd_v2i8_vx(<2 x i8> %va, i8 %b) { ; CHECK-LABEL: uadd_v2i8_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0 @@ -31,7 +31,7 @@ define <2 x i8> @uadd_v2i8_vi(<2 x i8> %va) { ; CHECK-LABEL: uadd_v2i8_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 8 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 8, i32 0 @@ -45,7 +45,7 @@ define <4 x i8> @uadd_v4i8_vv(<4 x i8> %va, <4 x i8> %b) { ; CHECK-LABEL: uadd_v4i8_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8> %va, <4 x i8> %b) @@ -55,7 +55,7 @@ define <4 x i8> @uadd_v4i8_vx(<4 x i8> %va, i8 %b) { ; CHECK-LABEL: uadd_v4i8_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0 @@ -67,7 +67,7 @@ define <4 x i8> @uadd_v4i8_vi(<4 x i8> %va) { ; CHECK-LABEL: uadd_v4i8_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 8 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 8, i32 0 @@ -81,7 +81,7 @@ define <8 x i8> @uadd_v8i8_vv(<8 x i8> %va, <8 x i8> %b) { ; CHECK-LABEL: uadd_v8i8_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <8 x i8> @llvm.uadd.sat.v8i8(<8 x i8> %va, <8 x i8> %b) @@ -91,7 +91,7 @@ define <8 x i8> @uadd_v8i8_vx(<8 x i8> %va, i8 %b) { ; CHECK-LABEL: uadd_v8i8_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 @@ -103,7 +103,7 @@ define <8 x i8> @uadd_v8i8_vi(<8 x i8> %va) { ; CHECK-LABEL: uadd_v8i8_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 8 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 8, i32 0 @@ -117,7 +117,7 @@ define <16 x i8> @uadd_v16i8_vv(<16 x i8> %va, <16 x i8> %b) { ; CHECK-LABEL: uadd_v16i8_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %va, <16 x i8> %b) @@ -127,7 +127,7 @@ define <16 x i8> @uadd_v16i8_vx(<16 x i8> %va, i8 %b) { ; CHECK-LABEL: uadd_v16i8_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0 @@ -139,7 +139,7 @@ define <16 x i8> @uadd_v16i8_vi(<16 x i8> %va) { ; CHECK-LABEL: uadd_v16i8_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 8 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 8, i32 0 @@ -153,7 +153,7 @@ define <2 x i16> @uadd_v2i16_vv(<2 x i16> %va, <2 x i16> %b) { ; CHECK-LABEL: uadd_v2i16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> %va, <2 x i16> %b) @@ -163,7 +163,7 @@ define <2 x i16> @uadd_v2i16_vx(<2 x i16> %va, i16 %b) { ; CHECK-LABEL: uadd_v2i16_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0 @@ -175,7 +175,7 @@ define <2 x i16> @uadd_v2i16_vi(<2 x i16> %va) { ; CHECK-LABEL: uadd_v2i16_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 8 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 8, i32 0 @@ -189,7 +189,7 @@ define <4 x i16> @uadd_v4i16_vv(<4 x i16> %va, <4 x i16> %b) { ; CHECK-LABEL: uadd_v4i16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16> %va, <4 x i16> %b) @@ -199,7 +199,7 @@ define <4 x i16> @uadd_v4i16_vx(<4 x i16> %va, i16 %b) { ; CHECK-LABEL: uadd_v4i16_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0 @@ -211,7 +211,7 @@ define <4 x i16> @uadd_v4i16_vi(<4 x i16> %va) { ; CHECK-LABEL: uadd_v4i16_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 8 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 8, i32 0 @@ -225,7 +225,7 @@ define <8 x i16> @uadd_v8i16_vv(<8 x i16> %va, <8 x i16> %b) { ; CHECK-LABEL: uadd_v8i16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %va, <8 x i16> %b) @@ -235,7 +235,7 @@ define <8 x i16> @uadd_v8i16_vx(<8 x i16> %va, i16 %b) { ; CHECK-LABEL: uadd_v8i16_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0 @@ -247,7 +247,7 @@ define <8 x i16> @uadd_v8i16_vi(<8 x i16> %va) { ; CHECK-LABEL: uadd_v8i16_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 8 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 8, i32 0 @@ -261,7 +261,7 @@ define <16 x i16> @uadd_v16i16_vv(<16 x i16> %va, <16 x i16> %b) { ; CHECK-LABEL: uadd_v16i16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v10 ; CHECK-NEXT: ret %v = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> %va, <16 x i16> %b) @@ -271,7 +271,7 @@ define <16 x i16> @uadd_v16i16_vx(<16 x i16> %va, i16 %b) { ; CHECK-LABEL: uadd_v16i16_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0 @@ -283,7 +283,7 @@ define <16 x i16> @uadd_v16i16_vi(<16 x i16> %va) { ; CHECK-LABEL: uadd_v16i16_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 8 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 8, i32 0 @@ -297,7 +297,7 @@ define <2 x i32> @uadd_v2i32_vv(<2 x i32> %va, <2 x i32> %b) { ; CHECK-LABEL: uadd_v2i32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> %va, <2 x i32> %b) @@ -307,7 +307,7 @@ define <2 x i32> @uadd_v2i32_vx(<2 x i32> %va, i32 %b) { ; CHECK-LABEL: uadd_v2i32_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0 @@ -319,7 +319,7 @@ define <2 x i32> @uadd_v2i32_vx_commute(<2 x i32> %va, i32 %b) { ; CHECK-LABEL: uadd_v2i32_vx_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0 @@ -331,7 +331,7 @@ define <2 x i32> @uadd_v2i32_vi(<2 x i32> %va) { ; CHECK-LABEL: uadd_v2i32_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 8 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 8, i32 0 @@ -345,7 +345,7 @@ define <4 x i32> @uadd_v4i32_vv(<4 x i32> %va, <4 x i32> %b) { ; CHECK-LABEL: uadd_v4i32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %va, <4 x i32> %b) @@ -355,7 +355,7 @@ define <4 x i32> @uadd_v4i32_vx(<4 x i32> %va, i32 %b) { ; CHECK-LABEL: uadd_v4i32_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0 @@ -367,7 +367,7 @@ define <4 x i32> @uadd_v4i32_vi(<4 x i32> %va) { ; CHECK-LABEL: uadd_v4i32_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 8 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 8, i32 0 @@ -381,7 +381,7 @@ define <8 x i32> @uadd_v8i32_vv(<8 x i32> %va, <8 x i32> %b) { ; CHECK-LABEL: uadd_v8i32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v10 ; CHECK-NEXT: ret %v = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> %va, <8 x i32> %b) @@ -391,7 +391,7 @@ define <8 x i32> @uadd_v8i32_vx(<8 x i32> %va, i32 %b) { ; CHECK-LABEL: uadd_v8i32_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 @@ -403,7 +403,7 @@ define <8 x i32> @uadd_v8i32_vi(<8 x i32> %va) { ; CHECK-LABEL: uadd_v8i32_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 8 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 8, i32 0 @@ -417,7 +417,7 @@ define <16 x i32> @uadd_v16i32_vv(<16 x i32> %va, <16 x i32> %b) { ; CHECK-LABEL: uadd_v16i32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v12 ; CHECK-NEXT: ret %v = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> %va, <16 x i32> %b) @@ -427,7 +427,7 @@ define <16 x i32> @uadd_v16i32_vx(<16 x i32> %va, i32 %b) { ; CHECK-LABEL: uadd_v16i32_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0 @@ -439,7 +439,7 @@ define <16 x i32> @uadd_v16i32_vi(<16 x i32> %va) { ; CHECK-LABEL: uadd_v16i32_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 8 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 8, i32 0 @@ -453,7 +453,7 @@ define <2 x i64> @uadd_v2i64_vv(<2 x i64> %va, <2 x i64> %b) { ; CHECK-LABEL: uadd_v2i64_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> %va, <2 x i64> %b) @@ -468,7 +468,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsaddu.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -476,7 +476,7 @@ ; ; RV64-LABEL: uadd_v2i64_vx: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vsaddu.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0 @@ -488,7 +488,7 @@ define <2 x i64> @uadd_v2i64_vi(<2 x i64> %va) { ; CHECK-LABEL: uadd_v2i64_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 8 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 8, i32 0 @@ -502,7 +502,7 @@ define <4 x i64> @uadd_v4i64_vv(<4 x i64> %va, <4 x i64> %b) { ; CHECK-LABEL: uadd_v4i64_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v10 ; CHECK-NEXT: ret %v = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> %va, <4 x i64> %b) @@ -517,7 +517,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsaddu.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -525,7 +525,7 @@ ; ; RV64-LABEL: uadd_v4i64_vx: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-NEXT: vsaddu.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0 @@ -537,7 +537,7 @@ define <4 x i64> @uadd_v4i64_vi(<4 x i64> %va) { ; CHECK-LABEL: uadd_v4i64_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 8 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 8, i32 0 @@ -551,7 +551,7 @@ define <8 x i64> @uadd_v8i64_vv(<8 x i64> %va, <8 x i64> %b) { ; CHECK-LABEL: uadd_v8i64_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v12 ; CHECK-NEXT: ret %v = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> %va, <8 x i64> %b) @@ -566,7 +566,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsaddu.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -574,7 +574,7 @@ ; ; RV64-LABEL: uadd_v8i64_vx: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vsaddu.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 @@ -586,7 +586,7 @@ define <8 x i64> @uadd_v8i64_vi(<8 x i64> %va) { ; CHECK-LABEL: uadd_v8i64_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 8 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 8, i32 0 @@ -600,7 +600,7 @@ define <16 x i64> @uadd_v16i64_vv(<16 x i64> %va, <16 x i64> %b) { ; CHECK-LABEL: uadd_v16i64_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v16 ; CHECK-NEXT: ret %v = call <16 x i64> @llvm.uadd.sat.v16i64(<16 x i64> %va, <16 x i64> %b) @@ -615,7 +615,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsaddu.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -623,7 +623,7 @@ ; ; RV64-LABEL: uadd_v16i64_vx: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vsaddu.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0 @@ -635,7 +635,7 @@ define <16 x i64> @uadd_v16i64_vi(<16 x i64> %va) { ; CHECK-LABEL: uadd_v16i64_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 8 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 8, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll @@ -9,7 +9,7 @@ define <1 x i1> @select_v1i1(<1 x i1> %a, <1 x i1> %b, <1 x i1> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmandn.mm v9, v9, v0 ; CHECK-NEXT: vmand.mm v8, v8, v0 ; CHECK-NEXT: vmor.mm v0, v8, v9 @@ -23,7 +23,7 @@ define <2 x i1> @select_v2i1(<2 x i1> %a, <2 x i1> %b, <2 x i1> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmandn.mm v9, v9, v0 ; CHECK-NEXT: vmand.mm v8, v8, v0 ; CHECK-NEXT: vmor.mm v0, v8, v9 @@ -37,7 +37,7 @@ define <4 x i1> @select_v4i1(<4 x i1> %a, <4 x i1> %b, <4 x i1> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmandn.mm v9, v9, v0 ; CHECK-NEXT: vmand.mm v8, v8, v0 ; CHECK-NEXT: vmor.mm v0, v8, v9 @@ -51,7 +51,7 @@ define <8 x i1> @select_v8i1(<8 x i1> %a, <8 x i1> %b, <8 x i1> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmandn.mm v9, v9, v0 ; CHECK-NEXT: vmand.mm v8, v8, v0 ; CHECK-NEXT: vmor.mm v0, v8, v9 @@ -65,7 +65,7 @@ define <16 x i1> @select_v16i1(<16 x i1> %a, <16 x i1> %b, <16 x i1> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmandn.mm v9, v9, v0 ; CHECK-NEXT: vmand.mm v8, v8, v0 ; CHECK-NEXT: vmor.mm v0, v8, v9 @@ -79,7 +79,7 @@ define <8 x i7> @select_v8i7(<8 x i1> %a, <8 x i7> %b, <8 x i7> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v8i7: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = call <8 x i7> @llvm.vp.select.v8i7(<8 x i1> %a, <8 x i7> %b, <8 x i7> %c, i32 %evl) @@ -91,7 +91,7 @@ define <2 x i8> @select_v2i8(<2 x i1> %a, <2 x i8> %b, <2 x i8> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.vp.select.v2i8(<2 x i1> %a, <2 x i8> %b, <2 x i8> %c, i32 %evl) @@ -103,7 +103,7 @@ define <4 x i8> @select_v4i8(<4 x i1> %a, <4 x i8> %b, <4 x i8> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.vp.select.v4i8(<4 x i1> %a, <4 x i8> %b, <4 x i8> %c, i32 %evl) @@ -115,7 +115,7 @@ define <5 x i8> @select_v5i8(<5 x i1> %a, <5 x i8> %b, <5 x i8> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v5i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = call <5 x i8> @llvm.vp.select.v5i8(<5 x i1> %a, <5 x i8> %b, <5 x i8> %c, i32 %evl) @@ -127,7 +127,7 @@ define <8 x i8> @select_v8i8(<8 x i1> %a, <8 x i8> %b, <8 x i8> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = call <8 x i8> @llvm.vp.select.v8i8(<8 x i1> %a, <8 x i8> %b, <8 x i8> %c, i32 %evl) @@ -139,7 +139,7 @@ define <16 x i8> @select_v16i8(<16 x i1> %a, <16 x i8> %b, <16 x i8> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = call <16 x i8> @llvm.vp.select.v16i8(<16 x i1> %a, <16 x i8> %b, <16 x i8> %c, i32 %evl) @@ -158,7 +158,7 @@ ; CHECK-NEXT: mul a2, a2, a4 ; CHECK-NEXT: sub sp, sp, a2 ; CHECK-NEXT: li a4, 128 -; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, ma ; CHECK-NEXT: vle8.v v24, (a1) ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: slli a2, a2, 4 @@ -182,7 +182,7 @@ ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill ; CHECK-NEXT: vle8.v v24, (a1) -; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma ; CHECK-NEXT: addi a0, a3, -128 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 4 @@ -199,7 +199,7 @@ ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a4, a0 ; CHECK-NEXT: .LBB11_4: -; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v1 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload @@ -224,7 +224,7 @@ ; CHECK-NEXT: mul a2, a2, a3 ; CHECK-NEXT: sub sp, sp, a2 ; CHECK-NEXT: li a2, 128 -; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma ; CHECK-NEXT: vle8.v v24, (a0) ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 @@ -242,7 +242,7 @@ ; CHECK-NEXT: vle8.v v16, (a1) ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill -; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 @@ -250,7 +250,7 @@ ; CHECK-NEXT: addi a0, a0, 16 ; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vmerge.vvm v24, v24, v16, v0 -; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 @@ -276,7 +276,7 @@ define <2 x i16> @select_v2i16(<2 x i1> %a, <2 x i16> %b, <2 x i16> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = call <2 x i16> @llvm.vp.select.v2i16(<2 x i1> %a, <2 x i16> %b, <2 x i16> %c, i32 %evl) @@ -288,7 +288,7 @@ define <4 x i16> @select_v4i16(<4 x i1> %a, <4 x i16> %b, <4 x i16> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.vp.select.v4i16(<4 x i1> %a, <4 x i16> %b, <4 x i16> %c, i32 %evl) @@ -300,7 +300,7 @@ define <8 x i16> @select_v8i16(<8 x i1> %a, <8 x i16> %b, <8 x i16> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = call <8 x i16> @llvm.vp.select.v8i16(<8 x i1> %a, <8 x i16> %b, <8 x i16> %c, i32 %evl) @@ -312,7 +312,7 @@ define <16 x i16> @select_v16i16(<16 x i1> %a, <16 x i16> %b, <16 x i16> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %v = call <16 x i16> @llvm.vp.select.v16i16(<16 x i1> %a, <16 x i16> %b, <16 x i16> %c, i32 %evl) @@ -324,7 +324,7 @@ define <2 x i32> @select_v2i32(<2 x i1> %a, <2 x i32> %b, <2 x i32> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = call <2 x i32> @llvm.vp.select.v2i32(<2 x i1> %a, <2 x i32> %b, <2 x i32> %c, i32 %evl) @@ -336,7 +336,7 @@ define <4 x i32> @select_v4i32(<4 x i1> %a, <4 x i32> %b, <4 x i32> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = call <4 x i32> @llvm.vp.select.v4i32(<4 x i1> %a, <4 x i32> %b, <4 x i32> %c, i32 %evl) @@ -348,7 +348,7 @@ define <8 x i32> @select_v8i32(<8 x i1> %a, <8 x i32> %b, <8 x i32> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %v = call <8 x i32> @llvm.vp.select.v8i32(<8 x i1> %a, <8 x i32> %b, <8 x i32> %c, i32 %evl) @@ -360,7 +360,7 @@ define <16 x i32> @select_v16i32(<16 x i1> %a, <16 x i32> %b, <16 x i32> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 ; CHECK-NEXT: ret %v = call <16 x i32> @llvm.vp.select.v16i32(<16 x i1> %a, <16 x i32> %b, <16 x i32> %c, i32 %evl) @@ -372,7 +372,7 @@ define <2 x i64> @select_v2i64(<2 x i1> %a, <2 x i64> %b, <2 x i64> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = call <2 x i64> @llvm.vp.select.v2i64(<2 x i1> %a, <2 x i64> %b, <2 x i64> %c, i32 %evl) @@ -384,7 +384,7 @@ define <4 x i64> @select_v4i64(<4 x i1> %a, <4 x i64> %b, <4 x i64> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %v = call <4 x i64> @llvm.vp.select.v4i64(<4 x i1> %a, <4 x i64> %b, <4 x i64> %c, i32 %evl) @@ -396,7 +396,7 @@ define <8 x i64> @select_v8i64(<8 x i1> %a, <8 x i64> %b, <8 x i64> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 ; CHECK-NEXT: ret %v = call <8 x i64> @llvm.vp.select.v8i64(<8 x i1> %a, <8 x i64> %b, <8 x i64> %c, i32 %evl) @@ -408,7 +408,7 @@ define <16 x i64> @select_v16i64(<16 x i1> %a, <16 x i64> %b, <16 x i64> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v16i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 ; CHECK-NEXT: ret %v = call <16 x i64> @llvm.vp.select.v16i64(<16 x i1> %a, <16 x i64> %b, <16 x i64> %c, i32 %evl) @@ -427,7 +427,7 @@ ; CHECK-NEXT: mul a1, a1, a3 ; CHECK-NEXT: sub sp, sp, a1 ; CHECK-NEXT: addi a1, a0, 128 -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v24, (a1) ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 @@ -449,9 +449,9 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v0, v24, 2 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 @@ -463,7 +463,7 @@ ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a2, 16 ; CHECK-NEXT: .LBB25_4: -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 @@ -486,14 +486,14 @@ define <32 x i64> @select_evl_v32i64(<32 x i1> %a, <32 x i64> %b, <32 x i64> %c) { ; CHECK-LABEL: select_evl_v32i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v24, (a0) ; CHECK-NEXT: vmerge.vvm v8, v24, v8, v0 ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle64.v v24, (a0) -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v0, v0, 2 -; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v16, v24, v16, v0 ; CHECK-NEXT: ret %v = call <32 x i64> @llvm.vp.select.v32i64(<32 x i1> %a, <32 x i64> %b, <32 x i64> %c, i32 17) @@ -505,7 +505,7 @@ define <2 x half> @select_v2f16(<2 x i1> %a, <2 x half> %b, <2 x half> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = call <2 x half> @llvm.vp.select.v2f16(<2 x i1> %a, <2 x half> %b, <2 x half> %c, i32 %evl) @@ -517,7 +517,7 @@ define <4 x half> @select_v4f16(<4 x i1> %a, <4 x half> %b, <4 x half> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = call <4 x half> @llvm.vp.select.v4f16(<4 x i1> %a, <4 x half> %b, <4 x half> %c, i32 %evl) @@ -529,7 +529,7 @@ define <8 x half> @select_v8f16(<8 x i1> %a, <8 x half> %b, <8 x half> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = call <8 x half> @llvm.vp.select.v8f16(<8 x i1> %a, <8 x half> %b, <8 x half> %c, i32 %evl) @@ -541,7 +541,7 @@ define <16 x half> @select_v16f16(<16 x i1> %a, <16 x half> %b, <16 x half> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %v = call <16 x half> @llvm.vp.select.v16f16(<16 x i1> %a, <16 x half> %b, <16 x half> %c, i32 %evl) @@ -553,7 +553,7 @@ define <2 x float> @select_v2f32(<2 x i1> %a, <2 x float> %b, <2 x float> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = call <2 x float> @llvm.vp.select.v2f32(<2 x i1> %a, <2 x float> %b, <2 x float> %c, i32 %evl) @@ -565,7 +565,7 @@ define <4 x float> @select_v4f32(<4 x i1> %a, <4 x float> %b, <4 x float> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = call <4 x float> @llvm.vp.select.v4f32(<4 x i1> %a, <4 x float> %b, <4 x float> %c, i32 %evl) @@ -577,7 +577,7 @@ define <8 x float> @select_v8f32(<8 x i1> %a, <8 x float> %b, <8 x float> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %v = call <8 x float> @llvm.vp.select.v8f32(<8 x i1> %a, <8 x float> %b, <8 x float> %c, i32 %evl) @@ -589,7 +589,7 @@ define <16 x float> @select_v16f32(<16 x i1> %a, <16 x float> %b, <16 x float> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 ; CHECK-NEXT: ret %v = call <16 x float> @llvm.vp.select.v16f32(<16 x i1> %a, <16 x float> %b, <16 x float> %c, i32 %evl) @@ -607,7 +607,7 @@ ; CHECK-NEXT: slli a1, a1, 4 ; CHECK-NEXT: sub sp, sp, a1 ; CHECK-NEXT: li a3, 32 -; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v24, (a0) ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 @@ -624,7 +624,7 @@ ; CHECK-NEXT: .LBB35_2: ; CHECK-NEXT: li a3, 0 ; CHECK-NEXT: vle32.v v16, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: addi a0, a2, -32 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 @@ -636,9 +636,9 @@ ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a3, a0 ; CHECK-NEXT: .LBB35_4: -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v0, v0, 4 -; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vmerge.vvm v16, v16, v24, v0 @@ -656,7 +656,7 @@ define <2 x double> @select_v2f64(<2 x i1> %a, <2 x double> %b, <2 x double> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = call <2 x double> @llvm.vp.select.v2f64(<2 x i1> %a, <2 x double> %b, <2 x double> %c, i32 %evl) @@ -668,7 +668,7 @@ define <4 x double> @select_v4f64(<4 x i1> %a, <4 x double> %b, <4 x double> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %v = call <4 x double> @llvm.vp.select.v4f64(<4 x i1> %a, <4 x double> %b, <4 x double> %c, i32 %evl) @@ -680,7 +680,7 @@ define <8 x double> @select_v8f64(<8 x i1> %a, <8 x double> %b, <8 x double> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 ; CHECK-NEXT: ret %v = call <8 x double> @llvm.vp.select.v8f64(<8 x i1> %a, <8 x double> %b, <8 x double> %c, i32 %evl) @@ -692,7 +692,7 @@ define <16 x double> @select_v16f64(<16 x i1> %a, <16 x double> %b, <16 x double> %c, i32 zeroext %evl) { ; CHECK-LABEL: select_v16f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 ; CHECK-NEXT: ret %v = call <16 x double> @llvm.vp.select.v16f64(<16 x i1> %a, <16 x double> %b, <16 x double> %c, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll @@ -22,7 +22,7 @@ define void @vselect_vx_v8i32(i32 %a, <8 x i32>* %b, <8 x i1>* %cc, <8 x i32>* %z) { ; CHECK-LABEL: vselect_vx_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vlm.v v0, (a2) ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 @@ -40,7 +40,7 @@ define void @vselect_vi_v8i32(<8 x i32>* %b, <8 x i1>* %cc, <8 x i32>* %z) { ; CHECK-LABEL: vselect_vi_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vlm.v v0, (a1) ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 @@ -75,7 +75,7 @@ define void @vselect_vx_v8f32(float %a, <8 x float>* %b, <8 x i1>* %cc, <8 x float>* %z) { ; CHECK-LABEL: vselect_vx_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vlm.v v0, (a1) ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 @@ -93,7 +93,7 @@ define void @vselect_vfpzero_v8f32(<8 x float>* %b, <8 x i1>* %cc, <8 x float>* %z) { ; CHECK-LABEL: vselect_vfpzero_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vlm.v v0, (a1) ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 @@ -128,7 +128,7 @@ define void @vselect_vx_v16i16(i16 signext %a, <16 x i16>* %b, <16 x i1>* %cc, <16 x i16>* %z) { ; CHECK-LABEL: vselect_vx_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vlm.v v0, (a2) ; CHECK-NEXT: vle16.v v8, (a1) ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 @@ -146,7 +146,7 @@ define void @vselect_vi_v16i16(<16 x i16>* %b, <16 x i1>* %cc, <16 x i16>* %z) { ; CHECK-LABEL: vselect_vi_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vlm.v v0, (a1) ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmerge.vim v8, v8, 4, v0 @@ -183,7 +183,7 @@ ; CHECK-LABEL: vselect_vx_v32f16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a3, 32 -; CHECK-NEXT: vsetvli zero, a3, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e16, m4, ta, ma ; CHECK-NEXT: vlm.v v0, (a1) ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 @@ -202,7 +202,7 @@ ; CHECK-LABEL: vselect_vfpzero_v32f16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a3, 32 -; CHECK-NEXT: vsetvli zero, a3, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e16, m4, ta, ma ; CHECK-NEXT: vlm.v v0, (a1) ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 @@ -220,7 +220,7 @@ define <2 x i1> @vselect_v2i1(<2 x i1> %a, <2 x i1> %b, <2 x i1> %cc) { ; CHECK-LABEL: vselect_v2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmandn.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 @@ -232,7 +232,7 @@ define <4 x i1> @vselect_v4i1(<4 x i1> %a, <4 x i1> %b, <4 x i1> %cc) { ; CHECK-LABEL: vselect_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmandn.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 @@ -244,7 +244,7 @@ define <8 x i1> @vselect_v8i1(<8 x i1> %a, <8 x i1> %b, <8 x i1> %cc) { ; CHECK-LABEL: vselect_v8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmandn.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 @@ -256,7 +256,7 @@ define <16 x i1> @vselect_v16i1(<16 x i1> %a, <16 x i1> %b, <16 x i1> %cc) { ; CHECK-LABEL: vselect_v16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmandn.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 @@ -269,7 +269,7 @@ ; CHECK-LABEL: vselect_v32i1: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmandn.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 @@ -282,7 +282,7 @@ ; CHECK-LABEL: vselect_v64i1: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 64 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmandn.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vshl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vshl-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vshl-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vshl-vp.ll @@ -10,7 +10,7 @@ ; CHECK-LABEL: vsll_vv_v8i7: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 127 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vand.vx v9, v9, a1 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t @@ -34,7 +34,7 @@ define <2 x i8> @vsll_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -58,7 +58,7 @@ define <2 x i8> @vsll_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_v2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0 @@ -84,7 +84,7 @@ define <2 x i8> @vsll_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_v2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 3, i32 0 @@ -122,7 +122,7 @@ define <4 x i8> @vsll_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -146,7 +146,7 @@ define <4 x i8> @vsll_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_v4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0 @@ -172,7 +172,7 @@ define <4 x i8> @vsll_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_v4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 3, i32 0 @@ -198,7 +198,7 @@ define <8 x i8> @vsll_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -222,7 +222,7 @@ define <8 x i8> @vsll_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_v8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 @@ -248,7 +248,7 @@ define <8 x i8> @vsll_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_v8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 3, i32 0 @@ -274,7 +274,7 @@ define <16 x i8> @vsll_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -298,7 +298,7 @@ define <16 x i8> @vsll_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_v16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0 @@ -324,7 +324,7 @@ define <16 x i8> @vsll_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_v16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 3, i32 0 @@ -350,7 +350,7 @@ define <2 x i16> @vsll_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -374,7 +374,7 @@ define <2 x i16> @vsll_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_v2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0 @@ -400,7 +400,7 @@ define <2 x i16> @vsll_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_v2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 3, i32 0 @@ -426,7 +426,7 @@ define <4 x i16> @vsll_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -450,7 +450,7 @@ define <4 x i16> @vsll_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_v4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0 @@ -476,7 +476,7 @@ define <4 x i16> @vsll_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_v4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 3, i32 0 @@ -502,7 +502,7 @@ define <8 x i16> @vsll_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -526,7 +526,7 @@ define <8 x i16> @vsll_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_v8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0 @@ -552,7 +552,7 @@ define <8 x i16> @vsll_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_v8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 3, i32 0 @@ -578,7 +578,7 @@ define <16 x i16> @vsll_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -602,7 +602,7 @@ define <16 x i16> @vsll_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_v16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0 @@ -628,7 +628,7 @@ define <16 x i16> @vsll_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_v16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 3, i32 0 @@ -654,7 +654,7 @@ define <2 x i32> @vsll_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -678,7 +678,7 @@ define <2 x i32> @vsll_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_v2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0 @@ -704,7 +704,7 @@ define <2 x i32> @vsll_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_v2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 3, i32 0 @@ -730,7 +730,7 @@ define <4 x i32> @vsll_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -754,7 +754,7 @@ define <4 x i32> @vsll_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_v4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0 @@ -780,7 +780,7 @@ define <4 x i32> @vsll_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_v4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 3, i32 0 @@ -806,7 +806,7 @@ define <8 x i32> @vsll_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -830,7 +830,7 @@ define <8 x i32> @vsll_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_v8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 @@ -856,7 +856,7 @@ define <8 x i32> @vsll_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_v8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 3, i32 0 @@ -882,7 +882,7 @@ define <16 x i32> @vsll_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -906,7 +906,7 @@ define <16 x i32> @vsll_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_v16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0 @@ -932,7 +932,7 @@ define <16 x i32> @vsll_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_v16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 3, i32 0 @@ -958,7 +958,7 @@ define <2 x i64> @vsll_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -988,13 +988,13 @@ define <2 x i64> @vsll_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl) { ; RV32-LABEL: vsll_vx_v2i64_unmasked: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vsll.vx v8, v8, a0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsll_vx_v2i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vsll.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0 @@ -1020,7 +1020,7 @@ define <2 x i64> @vsll_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_v2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 3, i32 0 @@ -1046,7 +1046,7 @@ define <4 x i64> @vsll_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v4i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -1076,13 +1076,13 @@ define <4 x i64> @vsll_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl) { ; RV32-LABEL: vsll_vx_v4i64_unmasked: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vsll.vx v8, v8, a0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsll_vx_v4i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vsll.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0 @@ -1108,7 +1108,7 @@ define <4 x i64> @vsll_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_v4i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 3, i32 0 @@ -1134,7 +1134,7 @@ define <8 x i64> @vsll_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v8i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -1164,13 +1164,13 @@ define <8 x i64> @vsll_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl) { ; RV32-LABEL: vsll_vx_v8i64_unmasked: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vsll.vx v8, v8, a0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsll_vx_v8i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vsll.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 @@ -1196,7 +1196,7 @@ define <8 x i64> @vsll_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_v8i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 3, i32 0 @@ -1222,7 +1222,7 @@ define <16 x i64> @vsll_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_v16i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -1252,13 +1252,13 @@ define <16 x i64> @vsll_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %evl) { ; RV32-LABEL: vsll_vx_v16i64_unmasked: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vsll.vx v8, v8, a0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsll_vx_v16i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vsll.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0 @@ -1284,7 +1284,7 @@ define <16 x i64> @vsll_vi_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_v16i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 3, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsra-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsra-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsra-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsra-vp.ll @@ -10,7 +10,7 @@ ; CHECK-LABEL: vsra_vv_v8i7: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 127 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vand.vx v9, v9, a1 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vsra.vi v8, v8, 1 @@ -36,7 +36,7 @@ define <2 x i8> @vsra_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -60,7 +60,7 @@ define <2 x i8> @vsra_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_v2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0 @@ -86,7 +86,7 @@ define <2 x i8> @vsra_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_v2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 5, i32 0 @@ -112,7 +112,7 @@ define <4 x i8> @vsra_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -136,7 +136,7 @@ define <4 x i8> @vsra_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_v4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0 @@ -162,7 +162,7 @@ define <4 x i8> @vsra_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_v4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 5, i32 0 @@ -200,7 +200,7 @@ define <8 x i8> @vsra_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -224,7 +224,7 @@ define <8 x i8> @vsra_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_v8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 @@ -250,7 +250,7 @@ define <8 x i8> @vsra_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_v8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 5, i32 0 @@ -276,7 +276,7 @@ define <16 x i8> @vsra_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -300,7 +300,7 @@ define <16 x i8> @vsra_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_v16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0 @@ -326,7 +326,7 @@ define <16 x i8> @vsra_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_v16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 5, i32 0 @@ -352,7 +352,7 @@ define <2 x i16> @vsra_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -376,7 +376,7 @@ define <2 x i16> @vsra_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_v2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0 @@ -402,7 +402,7 @@ define <2 x i16> @vsra_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_v2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 5, i32 0 @@ -428,7 +428,7 @@ define <4 x i16> @vsra_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -452,7 +452,7 @@ define <4 x i16> @vsra_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_v4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0 @@ -478,7 +478,7 @@ define <4 x i16> @vsra_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_v4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 5, i32 0 @@ -504,7 +504,7 @@ define <8 x i16> @vsra_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -528,7 +528,7 @@ define <8 x i16> @vsra_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_v8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0 @@ -554,7 +554,7 @@ define <8 x i16> @vsra_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_v8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 5, i32 0 @@ -580,7 +580,7 @@ define <16 x i16> @vsra_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -604,7 +604,7 @@ define <16 x i16> @vsra_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_v16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0 @@ -630,7 +630,7 @@ define <16 x i16> @vsra_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_v16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 5, i32 0 @@ -656,7 +656,7 @@ define <2 x i32> @vsra_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -680,7 +680,7 @@ define <2 x i32> @vsra_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_v2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0 @@ -706,7 +706,7 @@ define <2 x i32> @vsra_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_v2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 5, i32 0 @@ -732,7 +732,7 @@ define <4 x i32> @vsra_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -756,7 +756,7 @@ define <4 x i32> @vsra_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_v4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0 @@ -782,7 +782,7 @@ define <4 x i32> @vsra_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_v4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 5, i32 0 @@ -808,7 +808,7 @@ define <8 x i32> @vsra_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -832,7 +832,7 @@ define <8 x i32> @vsra_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_v8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 @@ -858,7 +858,7 @@ define <8 x i32> @vsra_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_v8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 5, i32 0 @@ -884,7 +884,7 @@ define <16 x i32> @vsra_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -908,7 +908,7 @@ define <16 x i32> @vsra_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_v16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0 @@ -934,7 +934,7 @@ define <16 x i32> @vsra_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_v16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 5, i32 0 @@ -960,7 +960,7 @@ define <2 x i64> @vsra_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -990,13 +990,13 @@ define <2 x i64> @vsra_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl) { ; RV32-LABEL: vsra_vx_v2i64_unmasked: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vsra.vx v8, v8, a0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsra_vx_v2i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vsra.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0 @@ -1022,7 +1022,7 @@ define <2 x i64> @vsra_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_v2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 5, i32 0 @@ -1048,7 +1048,7 @@ define <4 x i64> @vsra_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v4i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -1078,13 +1078,13 @@ define <4 x i64> @vsra_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl) { ; RV32-LABEL: vsra_vx_v4i64_unmasked: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vsra.vx v8, v8, a0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsra_vx_v4i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vsra.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0 @@ -1110,7 +1110,7 @@ define <4 x i64> @vsra_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_v4i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 5, i32 0 @@ -1136,7 +1136,7 @@ define <8 x i64> @vsra_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v8i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -1166,13 +1166,13 @@ define <8 x i64> @vsra_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl) { ; RV32-LABEL: vsra_vx_v8i64_unmasked: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vsra.vx v8, v8, a0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsra_vx_v8i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vsra.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 @@ -1198,7 +1198,7 @@ define <8 x i64> @vsra_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_v8i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 5, i32 0 @@ -1224,7 +1224,7 @@ define <16 x i64> @vsra_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_v16i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -1254,13 +1254,13 @@ define <16 x i64> @vsra_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %evl) { ; RV32-LABEL: vsra_vx_v16i64_unmasked: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vsra.vx v8, v8, a0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsra_vx_v16i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vsra.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0 @@ -1286,7 +1286,7 @@ define <16 x i64> @vsra_vi_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_v16i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 5, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsrl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsrl-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsrl-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsrl-vp.ll @@ -10,7 +10,7 @@ ; CHECK-LABEL: vsrl_vv_v8i7: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 127 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vand.vx v9, v9, a1 ; CHECK-NEXT: vand.vx v8, v8, a1 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu @@ -35,7 +35,7 @@ define <2 x i8> @vsrl_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -59,7 +59,7 @@ define <2 x i8> @vsrl_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_v2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0 @@ -85,7 +85,7 @@ define <2 x i8> @vsrl_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_v2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 4, i32 0 @@ -111,7 +111,7 @@ define <4 x i8> @vsrl_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -135,7 +135,7 @@ define <4 x i8> @vsrl_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_v4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0 @@ -161,7 +161,7 @@ define <4 x i8> @vsrl_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_v4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 4, i32 0 @@ -199,7 +199,7 @@ define <8 x i8> @vsrl_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -223,7 +223,7 @@ define <8 x i8> @vsrl_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_v8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 @@ -249,7 +249,7 @@ define <8 x i8> @vsrl_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_v8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 4, i32 0 @@ -275,7 +275,7 @@ define <16 x i8> @vsrl_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -299,7 +299,7 @@ define <16 x i8> @vsrl_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_v16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0 @@ -325,7 +325,7 @@ define <16 x i8> @vsrl_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_v16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 4, i32 0 @@ -351,7 +351,7 @@ define <2 x i16> @vsrl_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -375,7 +375,7 @@ define <2 x i16> @vsrl_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_v2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0 @@ -401,7 +401,7 @@ define <2 x i16> @vsrl_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_v2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 4, i32 0 @@ -427,7 +427,7 @@ define <4 x i16> @vsrl_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -451,7 +451,7 @@ define <4 x i16> @vsrl_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_v4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0 @@ -477,7 +477,7 @@ define <4 x i16> @vsrl_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_v4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 4, i32 0 @@ -503,7 +503,7 @@ define <8 x i16> @vsrl_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -527,7 +527,7 @@ define <8 x i16> @vsrl_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_v8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0 @@ -553,7 +553,7 @@ define <8 x i16> @vsrl_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_v8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 4, i32 0 @@ -579,7 +579,7 @@ define <16 x i16> @vsrl_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -603,7 +603,7 @@ define <16 x i16> @vsrl_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_v16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0 @@ -629,7 +629,7 @@ define <16 x i16> @vsrl_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_v16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 4, i32 0 @@ -655,7 +655,7 @@ define <2 x i32> @vsrl_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -679,7 +679,7 @@ define <2 x i32> @vsrl_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_v2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0 @@ -705,7 +705,7 @@ define <2 x i32> @vsrl_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_v2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 4, i32 0 @@ -731,7 +731,7 @@ define <4 x i32> @vsrl_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -755,7 +755,7 @@ define <4 x i32> @vsrl_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_v4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0 @@ -781,7 +781,7 @@ define <4 x i32> @vsrl_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_v4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 4, i32 0 @@ -807,7 +807,7 @@ define <8 x i32> @vsrl_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -831,7 +831,7 @@ define <8 x i32> @vsrl_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_v8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 @@ -857,7 +857,7 @@ define <8 x i32> @vsrl_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_v8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 4, i32 0 @@ -883,7 +883,7 @@ define <16 x i32> @vsrl_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -907,7 +907,7 @@ define <16 x i32> @vsrl_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_v16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0 @@ -933,7 +933,7 @@ define <16 x i32> @vsrl_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_v16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 4, i32 0 @@ -959,7 +959,7 @@ define <2 x i64> @vsrl_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -989,13 +989,13 @@ define <2 x i64> @vsrl_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl) { ; RV32-LABEL: vsrl_vx_v2i64_unmasked: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsrl_vx_v2i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vsrl.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0 @@ -1021,7 +1021,7 @@ define <2 x i64> @vsrl_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_v2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 4, i32 0 @@ -1047,7 +1047,7 @@ define <4 x i64> @vsrl_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v4i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -1077,13 +1077,13 @@ define <4 x i64> @vsrl_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl) { ; RV32-LABEL: vsrl_vx_v4i64_unmasked: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsrl_vx_v4i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vsrl.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0 @@ -1109,7 +1109,7 @@ define <4 x i64> @vsrl_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_v4i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 4, i32 0 @@ -1135,7 +1135,7 @@ define <8 x i64> @vsrl_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v8i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -1165,13 +1165,13 @@ define <8 x i64> @vsrl_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl) { ; RV32-LABEL: vsrl_vx_v8i64_unmasked: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsrl_vx_v8i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vsrl.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 @@ -1197,7 +1197,7 @@ define <8 x i64> @vsrl_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_v8i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 4, i32 0 @@ -1223,7 +1223,7 @@ define <16 x i64> @vsrl_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_v16i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -1253,13 +1253,13 @@ define <16 x i64> @vsrl_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %evl) { ; RV32-LABEL: vsrl_vx_v16i64_unmasked: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsrl_vx_v16i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vsrl.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0 @@ -1285,7 +1285,7 @@ define <16 x i64> @vsrl_vi_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_v16i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 4, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub.ll @@ -9,7 +9,7 @@ define <2 x i8> @ssub_v2i8_vv(<2 x i8> %va, <2 x i8> %b) { ; CHECK-LABEL: ssub_v2i8_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %va, <2 x i8> %b) @@ -19,7 +19,7 @@ define <2 x i8> @ssub_v2i8_vx(<2 x i8> %va, i8 %b) { ; CHECK-LABEL: ssub_v2i8_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0 @@ -32,7 +32,7 @@ ; CHECK-LABEL: ssub_v2i8_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 1, i32 0 @@ -46,7 +46,7 @@ define <4 x i8> @ssub_v4i8_vv(<4 x i8> %va, <4 x i8> %b) { ; CHECK-LABEL: ssub_v4i8_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8> %va, <4 x i8> %b) @@ -56,7 +56,7 @@ define <4 x i8> @ssub_v4i8_vx(<4 x i8> %va, i8 %b) { ; CHECK-LABEL: ssub_v4i8_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0 @@ -69,7 +69,7 @@ ; CHECK-LABEL: ssub_v4i8_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 1, i32 0 @@ -83,7 +83,7 @@ define <8 x i8> @ssub_v8i8_vv(<8 x i8> %va, <8 x i8> %b) { ; CHECK-LABEL: ssub_v8i8_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <8 x i8> @llvm.ssub.sat.v8i8(<8 x i8> %va, <8 x i8> %b) @@ -93,7 +93,7 @@ define <8 x i8> @ssub_v8i8_vx(<8 x i8> %va, i8 %b) { ; CHECK-LABEL: ssub_v8i8_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 @@ -106,7 +106,7 @@ ; CHECK-LABEL: ssub_v8i8_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 1, i32 0 @@ -120,7 +120,7 @@ define <16 x i8> @ssub_v16i8_vv(<16 x i8> %va, <16 x i8> %b) { ; CHECK-LABEL: ssub_v16i8_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %va, <16 x i8> %b) @@ -130,7 +130,7 @@ define <16 x i8> @ssub_v16i8_vx(<16 x i8> %va, i8 %b) { ; CHECK-LABEL: ssub_v16i8_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0 @@ -143,7 +143,7 @@ ; CHECK-LABEL: ssub_v16i8_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 1, i32 0 @@ -157,7 +157,7 @@ define <2 x i16> @ssub_v2i16_vv(<2 x i16> %va, <2 x i16> %b) { ; CHECK-LABEL: ssub_v2i16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16> %va, <2 x i16> %b) @@ -167,7 +167,7 @@ define <2 x i16> @ssub_v2i16_vx(<2 x i16> %va, i16 %b) { ; CHECK-LABEL: ssub_v2i16_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0 @@ -180,7 +180,7 @@ ; CHECK-LABEL: ssub_v2i16_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 1, i32 0 @@ -194,7 +194,7 @@ define <4 x i16> @ssub_v4i16_vv(<4 x i16> %va, <4 x i16> %b) { ; CHECK-LABEL: ssub_v4i16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16> %va, <4 x i16> %b) @@ -204,7 +204,7 @@ define <4 x i16> @ssub_v4i16_vx(<4 x i16> %va, i16 %b) { ; CHECK-LABEL: ssub_v4i16_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0 @@ -217,7 +217,7 @@ ; CHECK-LABEL: ssub_v4i16_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 1, i32 0 @@ -231,7 +231,7 @@ define <8 x i16> @ssub_v8i16_vv(<8 x i16> %va, <8 x i16> %b) { ; CHECK-LABEL: ssub_v8i16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %va, <8 x i16> %b) @@ -241,7 +241,7 @@ define <8 x i16> @ssub_v8i16_vx(<8 x i16> %va, i16 %b) { ; CHECK-LABEL: ssub_v8i16_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0 @@ -254,7 +254,7 @@ ; CHECK-LABEL: ssub_v8i16_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 1, i32 0 @@ -268,7 +268,7 @@ define <16 x i16> @ssub_v16i16_vv(<16 x i16> %va, <16 x i16> %b) { ; CHECK-LABEL: ssub_v16i16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v10 ; CHECK-NEXT: ret %v = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> %va, <16 x i16> %b) @@ -278,7 +278,7 @@ define <16 x i16> @ssub_v16i16_vx(<16 x i16> %va, i16 %b) { ; CHECK-LABEL: ssub_v16i16_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0 @@ -291,7 +291,7 @@ ; CHECK-LABEL: ssub_v16i16_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 1, i32 0 @@ -305,7 +305,7 @@ define <2 x i32> @ssub_v2i32_vv(<2 x i32> %va, <2 x i32> %b) { ; CHECK-LABEL: ssub_v2i32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32> %va, <2 x i32> %b) @@ -315,7 +315,7 @@ define <2 x i32> @ssub_v2i32_vx(<2 x i32> %va, i32 %b) { ; CHECK-LABEL: ssub_v2i32_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0 @@ -328,7 +328,7 @@ ; CHECK-LABEL: ssub_v2i32_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 1, i32 0 @@ -342,7 +342,7 @@ define <4 x i32> @ssub_v4i32_vv(<4 x i32> %va, <4 x i32> %b) { ; CHECK-LABEL: ssub_v4i32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> %va, <4 x i32> %b) @@ -352,7 +352,7 @@ define <4 x i32> @ssub_v4i32_vx(<4 x i32> %va, i32 %b) { ; CHECK-LABEL: ssub_v4i32_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0 @@ -365,7 +365,7 @@ ; CHECK-LABEL: ssub_v4i32_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 1, i32 0 @@ -379,7 +379,7 @@ define <8 x i32> @ssub_v8i32_vv(<8 x i32> %va, <8 x i32> %b) { ; CHECK-LABEL: ssub_v8i32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v10 ; CHECK-NEXT: ret %v = call <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32> %va, <8 x i32> %b) @@ -389,7 +389,7 @@ define <8 x i32> @ssub_v8i32_vx(<8 x i32> %va, i32 %b) { ; CHECK-LABEL: ssub_v8i32_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 @@ -402,7 +402,7 @@ ; CHECK-LABEL: ssub_v8i32_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 1, i32 0 @@ -416,7 +416,7 @@ define <16 x i32> @ssub_v16i32_vv(<16 x i32> %va, <16 x i32> %b) { ; CHECK-LABEL: ssub_v16i32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v12 ; CHECK-NEXT: ret %v = call <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32> %va, <16 x i32> %b) @@ -426,7 +426,7 @@ define <16 x i32> @ssub_v16i32_vx(<16 x i32> %va, i32 %b) { ; CHECK-LABEL: ssub_v16i32_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0 @@ -439,7 +439,7 @@ ; CHECK-LABEL: ssub_v16i32_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 1, i32 0 @@ -453,7 +453,7 @@ define <2 x i64> @ssub_v2i64_vv(<2 x i64> %va, <2 x i64> %b) { ; CHECK-LABEL: ssub_v2i64_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> %va, <2 x i64> %b) @@ -468,7 +468,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vssub.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -476,7 +476,7 @@ ; ; RV64-LABEL: ssub_v2i64_vx: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vssub.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0 @@ -489,7 +489,7 @@ ; CHECK-LABEL: ssub_v2i64_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 1, i32 0 @@ -503,7 +503,7 @@ define <4 x i64> @ssub_v4i64_vv(<4 x i64> %va, <4 x i64> %b) { ; CHECK-LABEL: ssub_v4i64_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v10 ; CHECK-NEXT: ret %v = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> %va, <4 x i64> %b) @@ -518,7 +518,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vssub.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -526,7 +526,7 @@ ; ; RV64-LABEL: ssub_v4i64_vx: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-NEXT: vssub.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0 @@ -539,7 +539,7 @@ ; CHECK-LABEL: ssub_v4i64_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 1, i32 0 @@ -553,7 +553,7 @@ define <8 x i64> @ssub_v8i64_vv(<8 x i64> %va, <8 x i64> %b) { ; CHECK-LABEL: ssub_v8i64_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v12 ; CHECK-NEXT: ret %v = call <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64> %va, <8 x i64> %b) @@ -568,7 +568,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vssub.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -576,7 +576,7 @@ ; ; RV64-LABEL: ssub_v8i64_vx: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vssub.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 @@ -589,7 +589,7 @@ ; CHECK-LABEL: ssub_v8i64_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 1, i32 0 @@ -603,7 +603,7 @@ define <16 x i64> @ssub_v16i64_vv(<16 x i64> %va, <16 x i64> %b) { ; CHECK-LABEL: ssub_v16i64_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v16 ; CHECK-NEXT: ret %v = call <16 x i64> @llvm.ssub.sat.v16i64(<16 x i64> %va, <16 x i64> %b) @@ -618,7 +618,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vssub.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -626,7 +626,7 @@ ; ; RV64-LABEL: ssub_v16i64_vx: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vssub.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0 @@ -639,7 +639,7 @@ ; CHECK-LABEL: ssub_v16i64_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 1, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu.ll @@ -9,7 +9,7 @@ define <2 x i8> @usub_v2i8_vv(<2 x i8> %va, <2 x i8> %b) { ; CHECK-LABEL: usub_v2i8_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %va, <2 x i8> %b) @@ -19,7 +19,7 @@ define <2 x i8> @usub_v2i8_vx(<2 x i8> %va, i8 %b) { ; CHECK-LABEL: usub_v2i8_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0 @@ -32,7 +32,7 @@ ; CHECK-LABEL: usub_v2i8_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 2, i32 0 @@ -46,7 +46,7 @@ define <4 x i8> @usub_v4i8_vv(<4 x i8> %va, <4 x i8> %b) { ; CHECK-LABEL: usub_v4i8_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.usub.sat.v4i8(<4 x i8> %va, <4 x i8> %b) @@ -56,7 +56,7 @@ define <4 x i8> @usub_v4i8_vx(<4 x i8> %va, i8 %b) { ; CHECK-LABEL: usub_v4i8_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0 @@ -69,7 +69,7 @@ ; CHECK-LABEL: usub_v4i8_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 2, i32 0 @@ -83,7 +83,7 @@ define <8 x i8> @usub_v8i8_vv(<8 x i8> %va, <8 x i8> %b) { ; CHECK-LABEL: usub_v8i8_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <8 x i8> @llvm.usub.sat.v8i8(<8 x i8> %va, <8 x i8> %b) @@ -93,7 +93,7 @@ define <8 x i8> @usub_v8i8_vx(<8 x i8> %va, i8 %b) { ; CHECK-LABEL: usub_v8i8_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 @@ -106,7 +106,7 @@ ; CHECK-LABEL: usub_v8i8_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 2, i32 0 @@ -120,7 +120,7 @@ define <16 x i8> @usub_v16i8_vv(<16 x i8> %va, <16 x i8> %b) { ; CHECK-LABEL: usub_v16i8_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %va, <16 x i8> %b) @@ -130,7 +130,7 @@ define <16 x i8> @usub_v16i8_vx(<16 x i8> %va, i8 %b) { ; CHECK-LABEL: usub_v16i8_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0 @@ -143,7 +143,7 @@ ; CHECK-LABEL: usub_v16i8_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 2, i32 0 @@ -157,7 +157,7 @@ define <2 x i16> @usub_v2i16_vv(<2 x i16> %va, <2 x i16> %b) { ; CHECK-LABEL: usub_v2i16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <2 x i16> @llvm.usub.sat.v2i16(<2 x i16> %va, <2 x i16> %b) @@ -167,7 +167,7 @@ define <2 x i16> @usub_v2i16_vx(<2 x i16> %va, i16 %b) { ; CHECK-LABEL: usub_v2i16_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0 @@ -180,7 +180,7 @@ ; CHECK-LABEL: usub_v2i16_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 2, i32 0 @@ -194,7 +194,7 @@ define <4 x i16> @usub_v4i16_vv(<4 x i16> %va, <4 x i16> %b) { ; CHECK-LABEL: usub_v4i16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.usub.sat.v4i16(<4 x i16> %va, <4 x i16> %b) @@ -204,7 +204,7 @@ define <4 x i16> @usub_v4i16_vx(<4 x i16> %va, i16 %b) { ; CHECK-LABEL: usub_v4i16_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0 @@ -217,7 +217,7 @@ ; CHECK-LABEL: usub_v4i16_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 2, i32 0 @@ -231,7 +231,7 @@ define <8 x i16> @usub_v8i16_vv(<8 x i16> %va, <8 x i16> %b) { ; CHECK-LABEL: usub_v8i16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %va, <8 x i16> %b) @@ -241,7 +241,7 @@ define <8 x i16> @usub_v8i16_vx(<8 x i16> %va, i16 %b) { ; CHECK-LABEL: usub_v8i16_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0 @@ -254,7 +254,7 @@ ; CHECK-LABEL: usub_v8i16_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 2, i32 0 @@ -268,7 +268,7 @@ define <16 x i16> @usub_v16i16_vv(<16 x i16> %va, <16 x i16> %b) { ; CHECK-LABEL: usub_v16i16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v10 ; CHECK-NEXT: ret %v = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> %va, <16 x i16> %b) @@ -278,7 +278,7 @@ define <16 x i16> @usub_v16i16_vx(<16 x i16> %va, i16 %b) { ; CHECK-LABEL: usub_v16i16_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0 @@ -291,7 +291,7 @@ ; CHECK-LABEL: usub_v16i16_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 2, i32 0 @@ -305,7 +305,7 @@ define <2 x i32> @usub_v2i32_vv(<2 x i32> %va, <2 x i32> %b) { ; CHECK-LABEL: usub_v2i32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <2 x i32> @llvm.usub.sat.v2i32(<2 x i32> %va, <2 x i32> %b) @@ -315,7 +315,7 @@ define <2 x i32> @usub_v2i32_vx(<2 x i32> %va, i32 %b) { ; CHECK-LABEL: usub_v2i32_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0 @@ -328,7 +328,7 @@ ; CHECK-LABEL: usub_v2i32_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 2, i32 0 @@ -342,7 +342,7 @@ define <4 x i32> @usub_v4i32_vv(<4 x i32> %va, <4 x i32> %b) { ; CHECK-LABEL: usub_v4i32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %va, <4 x i32> %b) @@ -352,7 +352,7 @@ define <4 x i32> @usub_v4i32_vx(<4 x i32> %va, i32 %b) { ; CHECK-LABEL: usub_v4i32_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0 @@ -365,7 +365,7 @@ ; CHECK-LABEL: usub_v4i32_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 2, i32 0 @@ -379,7 +379,7 @@ define <8 x i32> @usub_v8i32_vv(<8 x i32> %va, <8 x i32> %b) { ; CHECK-LABEL: usub_v8i32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v10 ; CHECK-NEXT: ret %v = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> %va, <8 x i32> %b) @@ -389,7 +389,7 @@ define <8 x i32> @usub_v8i32_vx(<8 x i32> %va, i32 %b) { ; CHECK-LABEL: usub_v8i32_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 @@ -402,7 +402,7 @@ ; CHECK-LABEL: usub_v8i32_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 2, i32 0 @@ -416,7 +416,7 @@ define <16 x i32> @usub_v16i32_vv(<16 x i32> %va, <16 x i32> %b) { ; CHECK-LABEL: usub_v16i32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v12 ; CHECK-NEXT: ret %v = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> %va, <16 x i32> %b) @@ -426,7 +426,7 @@ define <16 x i32> @usub_v16i32_vx(<16 x i32> %va, i32 %b) { ; CHECK-LABEL: usub_v16i32_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0 @@ -439,7 +439,7 @@ ; CHECK-LABEL: usub_v16i32_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 2, i32 0 @@ -453,7 +453,7 @@ define <2 x i64> @usub_v2i64_vv(<2 x i64> %va, <2 x i64> %b) { ; CHECK-LABEL: usub_v2i64_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> %va, <2 x i64> %b) @@ -468,7 +468,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vssubu.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -476,7 +476,7 @@ ; ; RV64-LABEL: usub_v2i64_vx: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vssubu.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0 @@ -489,7 +489,7 @@ ; CHECK-LABEL: usub_v2i64_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 2, i32 0 @@ -503,7 +503,7 @@ define <4 x i64> @usub_v4i64_vv(<4 x i64> %va, <4 x i64> %b) { ; CHECK-LABEL: usub_v4i64_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v10 ; CHECK-NEXT: ret %v = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> %va, <4 x i64> %b) @@ -518,7 +518,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vssubu.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -526,7 +526,7 @@ ; ; RV64-LABEL: usub_v4i64_vx: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-NEXT: vssubu.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0 @@ -539,7 +539,7 @@ ; CHECK-LABEL: usub_v4i64_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 2, i32 0 @@ -553,7 +553,7 @@ define <8 x i64> @usub_v8i64_vv(<8 x i64> %va, <8 x i64> %b) { ; CHECK-LABEL: usub_v8i64_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v12 ; CHECK-NEXT: ret %v = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> %va, <8 x i64> %b) @@ -568,7 +568,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vssubu.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -576,7 +576,7 @@ ; ; RV64-LABEL: usub_v8i64_vx: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-NEXT: vssubu.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 @@ -589,7 +589,7 @@ ; CHECK-LABEL: usub_v8i64_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 2, i32 0 @@ -603,7 +603,7 @@ define <16 x i64> @usub_v16i64_vv(<16 x i64> %va, <16 x i64> %b) { ; CHECK-LABEL: usub_v16i64_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v16 ; CHECK-NEXT: ret %v = call <16 x i64> @llvm.usub.sat.v16i64(<16 x i64> %va, <16 x i64> %b) @@ -618,7 +618,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vssubu.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -626,7 +626,7 @@ ; ; RV64-LABEL: usub_v16i64_vx: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV64-NEXT: vssubu.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0 @@ -639,7 +639,7 @@ ; CHECK-LABEL: usub_v16i64_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 2, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp-mask.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp-mask.ll @@ -9,7 +9,7 @@ define <2 x i1> @vsub_vv_v2i1(<2 x i1> %va, <2 x i1> %b, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <2 x i1> @llvm.vp.sub.v2i1(<2 x i1> %va, <2 x i1> %b, <2 x i1> %m, i32 %evl) @@ -21,7 +21,7 @@ define <4 x i1> @vsub_vv_v4i1(<4 x i1> %va, <4 x i1> %b, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <4 x i1> @llvm.vp.sub.v4i1(<4 x i1> %va, <4 x i1> %b, <4 x i1> %m, i32 %evl) @@ -33,7 +33,7 @@ define <8 x i1> @vsub_vv_v8i1(<8 x i1> %va, <8 x i1> %b, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <8 x i1> @llvm.vp.sub.v8i1(<8 x i1> %va, <8 x i1> %b, <8 x i1> %m, i32 %evl) @@ -45,7 +45,7 @@ define <16 x i1> @vsub_vv_v16i1(<16 x i1> %va, <16 x i1> %b, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <16 x i1> @llvm.vp.sub.v16i1(<16 x i1> %va, <16 x i1> %b, <16 x i1> %m, i32 %evl) @@ -57,7 +57,7 @@ define <32 x i1> @vsub_vv_v32i1(<32 x i1> %va, <32 x i1> %b, <32 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <32 x i1> @llvm.vp.sub.v32i1(<32 x i1> %va, <32 x i1> %b, <32 x i1> %m, i32 %evl) @@ -69,7 +69,7 @@ define <64 x i1> @vsub_vv_v64i1(<64 x i1> %va, <64 x i1> %b, <64 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v64i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <64 x i1> @llvm.vp.sub.v64i1(<64 x i1> %va, <64 x i1> %b, <64 x i1> %m, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp.ll @@ -31,7 +31,7 @@ define <2 x i8> @vsub_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -55,7 +55,7 @@ define <2 x i8> @vsub_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_v2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0 @@ -81,7 +81,7 @@ define <3 x i8> @vsub_vv_v3i8_unmasked(<3 x i8> %va, <3 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v3i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <3 x i1> poison, i1 true, i32 0 @@ -105,7 +105,7 @@ define <3 x i8> @vsub_vx_v3i8_unmasked(<3 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_v3i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <3 x i8> poison, i8 %b, i32 0 @@ -131,7 +131,7 @@ define <4 x i8> @vsub_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -155,7 +155,7 @@ define <4 x i8> @vsub_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_v4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0 @@ -181,7 +181,7 @@ define <8 x i8> @vsub_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -205,7 +205,7 @@ define <8 x i8> @vsub_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_v8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 @@ -231,7 +231,7 @@ define <16 x i8> @vsub_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -255,7 +255,7 @@ define <16 x i8> @vsub_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_v16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0 @@ -281,7 +281,7 @@ define <2 x i16> @vsub_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -305,7 +305,7 @@ define <2 x i16> @vsub_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_v2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0 @@ -331,7 +331,7 @@ define <4 x i16> @vsub_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -355,7 +355,7 @@ define <4 x i16> @vsub_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_v4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0 @@ -381,7 +381,7 @@ define <8 x i16> @vsub_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -405,7 +405,7 @@ define <8 x i16> @vsub_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_v8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0 @@ -431,7 +431,7 @@ define <16 x i16> @vsub_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -455,7 +455,7 @@ define <16 x i16> @vsub_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_v16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0 @@ -481,7 +481,7 @@ define <2 x i32> @vsub_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -505,7 +505,7 @@ define <2 x i32> @vsub_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_v2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0 @@ -531,7 +531,7 @@ define <4 x i32> @vsub_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -555,7 +555,7 @@ define <4 x i32> @vsub_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_v4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0 @@ -581,7 +581,7 @@ define <8 x i32> @vsub_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -605,7 +605,7 @@ define <8 x i32> @vsub_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_v8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 @@ -631,7 +631,7 @@ define <16 x i32> @vsub_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -655,7 +655,7 @@ define <16 x i32> @vsub_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_v16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0 @@ -681,7 +681,7 @@ define <2 x i64> @vsub_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -698,7 +698,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vsub.vv v8, v8, v9, v0.t @@ -724,16 +724,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vsub.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vsub_vx_v2i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vsub.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0 @@ -759,7 +759,7 @@ define <4 x i64> @vsub_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v4i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -776,7 +776,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vsub.vv v8, v8, v10, v0.t @@ -802,16 +802,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vsub.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vsub_vx_v4i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vsub.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0 @@ -837,7 +837,7 @@ define <8 x i64> @vsub_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v8i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -854,7 +854,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vsub.vv v8, v8, v12, v0.t @@ -880,16 +880,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vsub.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vsub_vx_v8i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vsub.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 @@ -915,7 +915,7 @@ define <16 x i64> @vsub_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_v16i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -932,7 +932,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vsub.vv v8, v8, v16, v0.t @@ -958,16 +958,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vsub.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vsub_vx_v16i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vsub.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vw-web-simplification.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vw-web-simplification.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vw-web-simplification.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vw-web-simplification.ll @@ -18,7 +18,7 @@ define <2 x i16> @vwmul_v2i16_multiple_users(<2 x i8>* %x, <2 x i8>* %y, <2 x i8> *%z) { ; NO_FOLDING-LABEL: vwmul_v2i16_multiple_users: ; NO_FOLDING: # %bb.0: -; NO_FOLDING-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; NO_FOLDING-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; NO_FOLDING-NEXT: vle8.v v8, (a0) ; NO_FOLDING-NEXT: vle8.v v9, (a1) ; NO_FOLDING-NEXT: vle8.v v10, (a2) @@ -34,14 +34,14 @@ ; ; FOLDING-LABEL: vwmul_v2i16_multiple_users: ; FOLDING: # %bb.0: -; FOLDING-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; FOLDING-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; FOLDING-NEXT: vle8.v v8, (a0) ; FOLDING-NEXT: vle8.v v9, (a1) ; FOLDING-NEXT: vle8.v v10, (a2) ; FOLDING-NEXT: vwmul.vv v11, v8, v9 ; FOLDING-NEXT: vwadd.vv v9, v8, v10 ; FOLDING-NEXT: vwsub.vv v12, v8, v10 -; FOLDING-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; FOLDING-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; FOLDING-NEXT: vor.vv v8, v11, v9 ; FOLDING-NEXT: vor.vv v8, v8, v12 ; FOLDING-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll @@ -5,7 +5,7 @@ define <2 x i16> @vwadd_v2i16(<2 x i8>* %x, <2 x i8>* %y) { ; CHECK-LABEL: vwadd_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vle8.v v10, (a1) ; CHECK-NEXT: vwadd.vv v8, v9, v10 @@ -21,7 +21,7 @@ define <4 x i16> @vwadd_v4i16(<4 x i8>* %x, <4 x i8>* %y) { ; CHECK-LABEL: vwadd_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vle8.v v10, (a1) ; CHECK-NEXT: vwadd.vv v8, v9, v10 @@ -37,7 +37,7 @@ define <2 x i32> @vwadd_v2i32(<2 x i16>* %x, <2 x i16>* %y) { ; CHECK-LABEL: vwadd_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vle16.v v10, (a1) ; CHECK-NEXT: vwadd.vv v8, v9, v10 @@ -53,7 +53,7 @@ define <8 x i16> @vwadd_v8i16(<8 x i8>* %x, <8 x i8>* %y) { ; CHECK-LABEL: vwadd_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vle8.v v10, (a1) ; CHECK-NEXT: vwadd.vv v8, v9, v10 @@ -69,7 +69,7 @@ define <4 x i32> @vwadd_v4i32(<4 x i16>* %x, <4 x i16>* %y) { ; CHECK-LABEL: vwadd_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vle16.v v10, (a1) ; CHECK-NEXT: vwadd.vv v8, v9, v10 @@ -85,7 +85,7 @@ define <2 x i64> @vwadd_v2i64(<2 x i32>* %x, <2 x i32>* %y) { ; CHECK-LABEL: vwadd_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vle32.v v10, (a1) ; CHECK-NEXT: vwadd.vv v8, v9, v10 @@ -101,7 +101,7 @@ define <16 x i16> @vwadd_v16i16(<16 x i8>* %x, <16 x i8>* %y) { ; CHECK-LABEL: vwadd_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v10, (a0) ; CHECK-NEXT: vle8.v v11, (a1) ; CHECK-NEXT: vwadd.vv v8, v10, v11 @@ -117,7 +117,7 @@ define <8 x i32> @vwadd_v8i32(<8 x i16>* %x, <8 x i16>* %y) { ; CHECK-LABEL: vwadd_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v10, (a0) ; CHECK-NEXT: vle16.v v11, (a1) ; CHECK-NEXT: vwadd.vv v8, v10, v11 @@ -133,7 +133,7 @@ define <4 x i64> @vwadd_v4i64(<4 x i32>* %x, <4 x i32>* %y) { ; CHECK-LABEL: vwadd_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v10, (a0) ; CHECK-NEXT: vle32.v v11, (a1) ; CHECK-NEXT: vwadd.vv v8, v10, v11 @@ -150,7 +150,7 @@ ; CHECK-LABEL: vwadd_v32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vle8.v v12, (a0) ; CHECK-NEXT: vle8.v v14, (a1) ; CHECK-NEXT: vwadd.vv v8, v12, v14 @@ -166,7 +166,7 @@ define <16 x i32> @vwadd_v16i32(<16 x i16>* %x, <16 x i16>* %y) { ; CHECK-LABEL: vwadd_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v12, (a0) ; CHECK-NEXT: vle16.v v14, (a1) ; CHECK-NEXT: vwadd.vv v8, v12, v14 @@ -182,7 +182,7 @@ define <8 x i64> @vwadd_v8i64(<8 x i32>* %x, <8 x i32>* %y) { ; CHECK-LABEL: vwadd_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v12, (a0) ; CHECK-NEXT: vle32.v v14, (a1) ; CHECK-NEXT: vwadd.vv v8, v12, v14 @@ -199,7 +199,7 @@ ; CHECK-LABEL: vwadd_v64i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 64 -; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vle8.v v20, (a1) ; CHECK-NEXT: vwadd.vv v8, v16, v20 @@ -216,7 +216,7 @@ ; CHECK-LABEL: vwadd_v32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vle16.v v20, (a1) ; CHECK-NEXT: vwadd.vv v8, v16, v20 @@ -232,7 +232,7 @@ define <16 x i64> @vwadd_v16i64(<16 x i32>* %x, <16 x i32>* %y) { ; CHECK-LABEL: vwadd_v16i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vle32.v v20, (a1) ; CHECK-NEXT: vwadd.vv v8, v16, v20 @@ -253,16 +253,16 @@ ; CHECK-NEXT: slli a2, a2, 3 ; CHECK-NEXT: sub sp, sp, a2 ; CHECK-NEXT: li a2, 128 -; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vle8.v v24, (a1) ; CHECK-NEXT: li a0, 64 -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v16, a0 ; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: vslidedown.vx v0, v24, a0 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwadd.vv v8, v16, v24 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload @@ -288,16 +288,16 @@ ; CHECK-NEXT: slli a2, a2, 3 ; CHECK-NEXT: sub sp, sp, a2 ; CHECK-NEXT: li a2, 64 -; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vle16.v v24, (a1) ; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v16, a0 ; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: vslidedown.vx v0, v24, a0 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vwadd.vv v8, v16, v24 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload @@ -323,15 +323,15 @@ ; CHECK-NEXT: slli a2, a2, 3 ; CHECK-NEXT: sub sp, sp, a2 ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vle32.v v24, (a1) -; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v16, 16 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill ; CHECK-NEXT: vslidedown.vi v0, v24, 16 -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vwadd.vv v8, v16, v24 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload @@ -352,7 +352,7 @@ define <2 x i32> @vwadd_v2i32_v2i8(<2 x i8>* %x, <2 x i8>* %y) { ; CHECK-LABEL: vwadd_v2i32_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle8.v v8, (a1) ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vsext.vf2 v10, v8 @@ -370,7 +370,7 @@ define <4 x i32> @vwadd_v4i32_v4i8_v4i16(<4 x i8>* %x, <4 x i16>* %y) { ; CHECK-LABEL: vwadd_v4i32_v4i8_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vsext.vf2 v10, v8 @@ -387,7 +387,7 @@ define <4 x i64> @vwadd_v4i64_v4i32_v4i8(<4 x i32>* %x, <4 x i8>* %y) { ; CHECK-LABEL: vwadd_v4i64_v4i32_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a1) ; CHECK-NEXT: vle32.v v10, (a0) ; CHECK-NEXT: vsext.vf4 v11, v8 @@ -404,7 +404,7 @@ define <2 x i16> @vwadd_vx_v2i16(<2 x i8>* %x, i8 %y) { ; CHECK-LABEL: vwadd_vx_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vwadd.vx v8, v9, a1 ; CHECK-NEXT: ret @@ -420,7 +420,7 @@ define <4 x i16> @vwadd_vx_v4i16(<4 x i8>* %x, i8 %y) { ; CHECK-LABEL: vwadd_vx_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vwadd.vx v8, v9, a1 ; CHECK-NEXT: ret @@ -436,7 +436,7 @@ define <2 x i32> @vwadd_vx_v2i32(<2 x i16>* %x, i16 %y) { ; CHECK-LABEL: vwadd_vx_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vwadd.vx v8, v9, a1 ; CHECK-NEXT: ret @@ -452,7 +452,7 @@ define <8 x i16> @vwadd_vx_v8i16(<8 x i8>* %x, i8 %y) { ; CHECK-LABEL: vwadd_vx_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vwadd.vx v8, v9, a1 ; CHECK-NEXT: ret @@ -468,7 +468,7 @@ define <4 x i32> @vwadd_vx_v4i32(<4 x i16>* %x, i16 %y) { ; CHECK-LABEL: vwadd_vx_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vwadd.vx v8, v9, a1 ; CHECK-NEXT: ret @@ -484,7 +484,7 @@ define <2 x i64> @vwadd_vx_v2i64(<2 x i32>* %x, i32 %y) { ; CHECK-LABEL: vwadd_vx_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vwadd.vx v8, v9, a1 ; CHECK-NEXT: ret @@ -500,7 +500,7 @@ define <16 x i16> @vwadd_vx_v16i16(<16 x i8>* %x, i8 %y) { ; CHECK-LABEL: vwadd_vx_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v10, (a0) ; CHECK-NEXT: vwadd.vx v8, v10, a1 ; CHECK-NEXT: ret @@ -516,7 +516,7 @@ define <8 x i32> @vwadd_vx_v8i32(<8 x i16>* %x, i16 %y) { ; CHECK-LABEL: vwadd_vx_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v10, (a0) ; CHECK-NEXT: vwadd.vx v8, v10, a1 ; CHECK-NEXT: ret @@ -532,7 +532,7 @@ define <4 x i64> @vwadd_vx_v4i64(<4 x i32>* %x, i32 %y) { ; CHECK-LABEL: vwadd_vx_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v10, (a0) ; CHECK-NEXT: vwadd.vx v8, v10, a1 ; CHECK-NEXT: ret @@ -549,7 +549,7 @@ ; CHECK-LABEL: vwadd_vx_v32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vle8.v v12, (a0) ; CHECK-NEXT: vwadd.vx v8, v12, a1 ; CHECK-NEXT: ret @@ -565,7 +565,7 @@ define <16 x i32> @vwadd_vx_v16i32(<16 x i16>* %x, i16 %y) { ; CHECK-LABEL: vwadd_vx_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v12, (a0) ; CHECK-NEXT: vwadd.vx v8, v12, a1 ; CHECK-NEXT: ret @@ -581,7 +581,7 @@ define <8 x i64> @vwadd_vx_v8i64(<8 x i32>* %x, i32 %y) { ; CHECK-LABEL: vwadd_vx_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v12, (a0) ; CHECK-NEXT: vwadd.vx v8, v12, a1 ; CHECK-NEXT: ret @@ -598,7 +598,7 @@ ; CHECK-LABEL: vwadd_vx_v64i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 64 -; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vwadd.vx v8, v16, a1 ; CHECK-NEXT: ret @@ -615,7 +615,7 @@ ; CHECK-LABEL: vwadd_vx_v32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vwadd.vx v8, v16, a1 ; CHECK-NEXT: ret @@ -631,7 +631,7 @@ define <16 x i64> @vwadd_vx_v16i64(<16 x i32>* %x, i32 %y) { ; CHECK-LABEL: vwadd_vx_v16i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vwadd.vx v8, v16, a1 ; CHECK-NEXT: ret @@ -647,7 +647,7 @@ define <8 x i16> @vwadd_vx_v8i16_i8(<8 x i8>* %x, i8* %y) { ; CHECK-LABEL: vwadd_vx_v8i16_i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: lb a0, 0(a1) ; CHECK-NEXT: vwadd.vx v8, v9, a0 @@ -665,7 +665,7 @@ define <8 x i16> @vwadd_vx_v8i16_i16(<8 x i8>* %x, i16* %y) { ; CHECK-LABEL: vwadd_vx_v8i16_i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vlse16.v v8, (a1), zero ; CHECK-NEXT: vwadd.wv v8, v8, v9 @@ -682,7 +682,7 @@ define <4 x i32> @vwadd_vx_v4i32_i8(<4 x i16>* %x, i8* %y) { ; CHECK-LABEL: vwadd_vx_v4i32_i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: lb a0, 0(a1) ; CHECK-NEXT: vwadd.vx v8, v9, a0 @@ -700,7 +700,7 @@ define <4 x i32> @vwadd_vx_v4i32_i16(<4 x i16>* %x, i16* %y) { ; CHECK-LABEL: vwadd_vx_v4i32_i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: lh a0, 0(a1) ; CHECK-NEXT: vwadd.vx v8, v9, a0 @@ -718,7 +718,7 @@ define <4 x i32> @vwadd_vx_v4i32_i32(<4 x i16>* %x, i32* %y) { ; CHECK-LABEL: vwadd_vx_v4i32_i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vlse32.v v8, (a1), zero ; CHECK-NEXT: vwadd.wv v8, v8, v9 @@ -736,7 +736,7 @@ ; RV32-LABEL: vwadd_vx_v2i64_i8: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 -; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV32-NEXT: lb a1, 0(a1) ; RV32-NEXT: vle32.v v9, (a0) ; RV32-NEXT: srai a0, a1, 31 @@ -750,7 +750,7 @@ ; ; RV64-LABEL: vwadd_vx_v2i64_i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-NEXT: vle32.v v9, (a0) ; RV64-NEXT: lb a0, 0(a1) ; RV64-NEXT: vwadd.vx v8, v9, a0 @@ -769,7 +769,7 @@ ; RV32-LABEL: vwadd_vx_v2i64_i16: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 -; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV32-NEXT: lh a1, 0(a1) ; RV32-NEXT: vle32.v v9, (a0) ; RV32-NEXT: srai a0, a1, 31 @@ -783,7 +783,7 @@ ; ; RV64-LABEL: vwadd_vx_v2i64_i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-NEXT: vle32.v v9, (a0) ; RV64-NEXT: lh a0, 0(a1) ; RV64-NEXT: vwadd.vx v8, v9, a0 @@ -802,7 +802,7 @@ ; RV32-LABEL: vwadd_vx_v2i64_i32: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 -; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV32-NEXT: lw a1, 0(a1) ; RV32-NEXT: vle32.v v9, (a0) ; RV32-NEXT: srai a0, a1, 31 @@ -816,7 +816,7 @@ ; ; RV64-LABEL: vwadd_vx_v2i64_i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-NEXT: vle32.v v9, (a0) ; RV64-NEXT: lw a0, 0(a1) ; RV64-NEXT: vwadd.vx v8, v9, a0 @@ -835,7 +835,7 @@ ; RV32-LABEL: vwadd_vx_v2i64_i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 -; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV32-NEXT: lw a2, 4(a1) ; RV32-NEXT: lw a1, 0(a1) ; RV32-NEXT: vle32.v v9, (a0) @@ -849,7 +849,7 @@ ; ; RV64-LABEL: vwadd_vx_v2i64_i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-NEXT: vle32.v v9, (a0) ; RV64-NEXT: vlse64.v v8, (a1), zero ; RV64-NEXT: vwadd.wv v8, v8, v9 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll @@ -5,7 +5,7 @@ define <2 x i16> @vwaddu_v2i16(<2 x i8>* %x, <2 x i8>* %y) { ; CHECK-LABEL: vwaddu_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vle8.v v10, (a1) ; CHECK-NEXT: vwaddu.vv v8, v9, v10 @@ -21,7 +21,7 @@ define <4 x i16> @vwaddu_v4i16(<4 x i8>* %x, <4 x i8>* %y) { ; CHECK-LABEL: vwaddu_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vle8.v v10, (a1) ; CHECK-NEXT: vwaddu.vv v8, v9, v10 @@ -37,7 +37,7 @@ define <2 x i32> @vwaddu_v2i32(<2 x i16>* %x, <2 x i16>* %y) { ; CHECK-LABEL: vwaddu_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vle16.v v10, (a1) ; CHECK-NEXT: vwaddu.vv v8, v9, v10 @@ -53,7 +53,7 @@ define <8 x i16> @vwaddu_v8i16(<8 x i8>* %x, <8 x i8>* %y) { ; CHECK-LABEL: vwaddu_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vle8.v v10, (a1) ; CHECK-NEXT: vwaddu.vv v8, v9, v10 @@ -69,7 +69,7 @@ define <4 x i32> @vwaddu_v4i32(<4 x i16>* %x, <4 x i16>* %y) { ; CHECK-LABEL: vwaddu_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vle16.v v10, (a1) ; CHECK-NEXT: vwaddu.vv v8, v9, v10 @@ -85,7 +85,7 @@ define <2 x i64> @vwaddu_v2i64(<2 x i32>* %x, <2 x i32>* %y) { ; CHECK-LABEL: vwaddu_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vle32.v v10, (a1) ; CHECK-NEXT: vwaddu.vv v8, v9, v10 @@ -101,7 +101,7 @@ define <16 x i16> @vwaddu_v16i16(<16 x i8>* %x, <16 x i8>* %y) { ; CHECK-LABEL: vwaddu_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v10, (a0) ; CHECK-NEXT: vle8.v v11, (a1) ; CHECK-NEXT: vwaddu.vv v8, v10, v11 @@ -117,7 +117,7 @@ define <8 x i32> @vwaddu_v8i32(<8 x i16>* %x, <8 x i16>* %y) { ; CHECK-LABEL: vwaddu_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v10, (a0) ; CHECK-NEXT: vle16.v v11, (a1) ; CHECK-NEXT: vwaddu.vv v8, v10, v11 @@ -133,7 +133,7 @@ define <4 x i64> @vwaddu_v4i64(<4 x i32>* %x, <4 x i32>* %y) { ; CHECK-LABEL: vwaddu_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v10, (a0) ; CHECK-NEXT: vle32.v v11, (a1) ; CHECK-NEXT: vwaddu.vv v8, v10, v11 @@ -150,7 +150,7 @@ ; CHECK-LABEL: vwaddu_v32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vle8.v v12, (a0) ; CHECK-NEXT: vle8.v v14, (a1) ; CHECK-NEXT: vwaddu.vv v8, v12, v14 @@ -166,7 +166,7 @@ define <16 x i32> @vwaddu_v16i32(<16 x i16>* %x, <16 x i16>* %y) { ; CHECK-LABEL: vwaddu_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v12, (a0) ; CHECK-NEXT: vle16.v v14, (a1) ; CHECK-NEXT: vwaddu.vv v8, v12, v14 @@ -182,7 +182,7 @@ define <8 x i64> @vwaddu_v8i64(<8 x i32>* %x, <8 x i32>* %y) { ; CHECK-LABEL: vwaddu_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v12, (a0) ; CHECK-NEXT: vle32.v v14, (a1) ; CHECK-NEXT: vwaddu.vv v8, v12, v14 @@ -199,7 +199,7 @@ ; CHECK-LABEL: vwaddu_v64i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 64 -; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vle8.v v20, (a1) ; CHECK-NEXT: vwaddu.vv v8, v16, v20 @@ -216,7 +216,7 @@ ; CHECK-LABEL: vwaddu_v32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vle16.v v20, (a1) ; CHECK-NEXT: vwaddu.vv v8, v16, v20 @@ -232,7 +232,7 @@ define <16 x i64> @vwaddu_v16i64(<16 x i32>* %x, <16 x i32>* %y) { ; CHECK-LABEL: vwaddu_v16i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vle32.v v20, (a1) ; CHECK-NEXT: vwaddu.vv v8, v16, v20 @@ -253,16 +253,16 @@ ; CHECK-NEXT: slli a2, a2, 3 ; CHECK-NEXT: sub sp, sp, a2 ; CHECK-NEXT: li a2, 128 -; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vle8.v v24, (a1) ; CHECK-NEXT: li a0, 64 -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v16, a0 ; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: vslidedown.vx v0, v24, a0 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwaddu.vv v8, v16, v24 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload @@ -288,16 +288,16 @@ ; CHECK-NEXT: slli a2, a2, 3 ; CHECK-NEXT: sub sp, sp, a2 ; CHECK-NEXT: li a2, 64 -; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vle16.v v24, (a1) ; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v16, a0 ; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: vslidedown.vx v0, v24, a0 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vwaddu.vv v8, v16, v24 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload @@ -323,15 +323,15 @@ ; CHECK-NEXT: slli a2, a2, 3 ; CHECK-NEXT: sub sp, sp, a2 ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vle32.v v24, (a1) -; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v16, 16 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill ; CHECK-NEXT: vslidedown.vi v0, v24, 16 -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vwaddu.vv v8, v16, v24 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload @@ -352,7 +352,7 @@ define <2 x i32> @vwaddu_v2i32_v2i8(<2 x i8>* %x, <2 x i8>* %y) { ; CHECK-LABEL: vwaddu_v2i32_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle8.v v8, (a1) ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vzext.vf2 v10, v8 @@ -370,7 +370,7 @@ define <4 x i32> @vwaddu_v4i32_v4i8_v4i16(<4 x i8>* %x, <4 x i16>* %y) { ; CHECK-LABEL: vwaddu_v4i32_v4i8_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vzext.vf2 v10, v8 @@ -387,7 +387,7 @@ define <4 x i64> @vwaddu_v4i64_v4i32_v4i8(<4 x i32>* %x, <4 x i8>* %y) { ; CHECK-LABEL: vwaddu_v4i64_v4i32_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a1) ; CHECK-NEXT: vle32.v v10, (a0) ; CHECK-NEXT: vzext.vf4 v11, v8 @@ -404,7 +404,7 @@ define <2 x i16> @vwaddu_vx_v2i16(<2 x i8>* %x, i8 %y) { ; CHECK-LABEL: vwaddu_vx_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vwaddu.vx v8, v9, a1 ; CHECK-NEXT: ret @@ -420,7 +420,7 @@ define <4 x i16> @vwaddu_vx_v4i16(<4 x i8>* %x, i8 %y) { ; CHECK-LABEL: vwaddu_vx_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vwaddu.vx v8, v9, a1 ; CHECK-NEXT: ret @@ -436,7 +436,7 @@ define <2 x i32> @vwaddu_vx_v2i32(<2 x i16>* %x, i16 %y) { ; CHECK-LABEL: vwaddu_vx_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vwaddu.vx v8, v9, a1 ; CHECK-NEXT: ret @@ -452,7 +452,7 @@ define <8 x i16> @vwaddu_vx_v8i16(<8 x i8>* %x, i8 %y) { ; CHECK-LABEL: vwaddu_vx_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vwaddu.vx v8, v9, a1 ; CHECK-NEXT: ret @@ -468,7 +468,7 @@ define <4 x i32> @vwaddu_vx_v4i32(<4 x i16>* %x, i16 %y) { ; CHECK-LABEL: vwaddu_vx_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vwaddu.vx v8, v9, a1 ; CHECK-NEXT: ret @@ -484,7 +484,7 @@ define <2 x i64> @vwaddu_vx_v2i64(<2 x i32>* %x, i32 %y) { ; CHECK-LABEL: vwaddu_vx_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vwaddu.vx v8, v9, a1 ; CHECK-NEXT: ret @@ -500,7 +500,7 @@ define <16 x i16> @vwaddu_vx_v16i16(<16 x i8>* %x, i8 %y) { ; CHECK-LABEL: vwaddu_vx_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v10, (a0) ; CHECK-NEXT: vwaddu.vx v8, v10, a1 ; CHECK-NEXT: ret @@ -516,7 +516,7 @@ define <8 x i32> @vwaddu_vx_v8i32(<8 x i16>* %x, i16 %y) { ; CHECK-LABEL: vwaddu_vx_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v10, (a0) ; CHECK-NEXT: vwaddu.vx v8, v10, a1 ; CHECK-NEXT: ret @@ -532,7 +532,7 @@ define <4 x i64> @vwaddu_vx_v4i64(<4 x i32>* %x, i32 %y) { ; CHECK-LABEL: vwaddu_vx_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v10, (a0) ; CHECK-NEXT: vwaddu.vx v8, v10, a1 ; CHECK-NEXT: ret @@ -549,7 +549,7 @@ ; CHECK-LABEL: vwaddu_vx_v32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vle8.v v12, (a0) ; CHECK-NEXT: vwaddu.vx v8, v12, a1 ; CHECK-NEXT: ret @@ -565,7 +565,7 @@ define <16 x i32> @vwaddu_vx_v16i32(<16 x i16>* %x, i16 %y) { ; CHECK-LABEL: vwaddu_vx_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v12, (a0) ; CHECK-NEXT: vwaddu.vx v8, v12, a1 ; CHECK-NEXT: ret @@ -581,7 +581,7 @@ define <8 x i64> @vwaddu_vx_v8i64(<8 x i32>* %x, i32 %y) { ; CHECK-LABEL: vwaddu_vx_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v12, (a0) ; CHECK-NEXT: vwaddu.vx v8, v12, a1 ; CHECK-NEXT: ret @@ -598,7 +598,7 @@ ; CHECK-LABEL: vwaddu_vx_v64i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 64 -; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vwaddu.vx v8, v16, a1 ; CHECK-NEXT: ret @@ -615,7 +615,7 @@ ; CHECK-LABEL: vwaddu_vx_v32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vwaddu.vx v8, v16, a1 ; CHECK-NEXT: ret @@ -631,7 +631,7 @@ define <16 x i64> @vwaddu_vx_v16i64(<16 x i32>* %x, i32 %y) { ; CHECK-LABEL: vwaddu_vx_v16i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vwaddu.vx v8, v16, a1 ; CHECK-NEXT: ret @@ -647,7 +647,7 @@ define <8 x i16> @vwaddu_vx_v8i16_i8(<8 x i8>* %x, i8* %y) { ; CHECK-LABEL: vwaddu_vx_v8i16_i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: lbu a0, 0(a1) ; CHECK-NEXT: vwaddu.vx v8, v9, a0 @@ -665,7 +665,7 @@ define <8 x i16> @vwaddu_vx_v8i16_i16(<8 x i8>* %x, i16* %y) { ; CHECK-LABEL: vwaddu_vx_v8i16_i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vlse16.v v8, (a1), zero ; CHECK-NEXT: vwaddu.wv v8, v8, v9 @@ -682,7 +682,7 @@ define <4 x i32> @vwaddu_vx_v4i32_i8(<4 x i16>* %x, i8* %y) { ; CHECK-LABEL: vwaddu_vx_v4i32_i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: lbu a0, 0(a1) ; CHECK-NEXT: vwaddu.vx v8, v9, a0 @@ -700,7 +700,7 @@ define <4 x i32> @vwaddu_vx_v4i32_i16(<4 x i16>* %x, i16* %y) { ; CHECK-LABEL: vwaddu_vx_v4i32_i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: lhu a0, 0(a1) ; CHECK-NEXT: vwaddu.vx v8, v9, a0 @@ -718,7 +718,7 @@ define <4 x i32> @vwaddu_vx_v4i32_i32(<4 x i16>* %x, i32* %y) { ; CHECK-LABEL: vwaddu_vx_v4i32_i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vlse32.v v8, (a1), zero ; CHECK-NEXT: vwaddu.wv v8, v8, v9 @@ -736,7 +736,7 @@ ; RV32-LABEL: vwaddu_vx_v2i64_i8: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 -; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV32-NEXT: lbu a1, 0(a1) ; RV32-NEXT: vle32.v v9, (a0) ; RV32-NEXT: sw zero, 12(sp) @@ -749,7 +749,7 @@ ; ; RV64-LABEL: vwaddu_vx_v2i64_i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-NEXT: vle32.v v9, (a0) ; RV64-NEXT: lbu a0, 0(a1) ; RV64-NEXT: vwaddu.vx v8, v9, a0 @@ -768,7 +768,7 @@ ; RV32-LABEL: vwaddu_vx_v2i64_i16: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 -; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV32-NEXT: lhu a1, 0(a1) ; RV32-NEXT: vle32.v v9, (a0) ; RV32-NEXT: sw zero, 12(sp) @@ -781,7 +781,7 @@ ; ; RV64-LABEL: vwaddu_vx_v2i64_i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-NEXT: vle32.v v9, (a0) ; RV64-NEXT: lhu a0, 0(a1) ; RV64-NEXT: vwaddu.vx v8, v9, a0 @@ -800,7 +800,7 @@ ; RV32-LABEL: vwaddu_vx_v2i64_i32: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 -; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV32-NEXT: lw a1, 0(a1) ; RV32-NEXT: vle32.v v9, (a0) ; RV32-NEXT: sw zero, 12(sp) @@ -813,7 +813,7 @@ ; ; RV64-LABEL: vwaddu_vx_v2i64_i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-NEXT: vle32.v v9, (a0) ; RV64-NEXT: lwu a0, 0(a1) ; RV64-NEXT: vwaddu.vx v8, v9, a0 @@ -832,7 +832,7 @@ ; RV32-LABEL: vwaddu_vx_v2i64_i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 -; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV32-NEXT: lw a2, 4(a1) ; RV32-NEXT: lw a1, 0(a1) ; RV32-NEXT: vle32.v v9, (a0) @@ -846,7 +846,7 @@ ; ; RV64-LABEL: vwaddu_vx_v2i64_i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-NEXT: vle32.v v9, (a0) ; RV64-NEXT: vlse64.v v8, (a1), zero ; RV64-NEXT: vwaddu.wv v8, v8, v9 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmacc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmacc.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmacc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmacc.ll @@ -5,7 +5,7 @@ define <2 x i16> @vwmacc_v2i16(<2 x i8>* %x, <2 x i8>* %y, <2 x i16> %z) { ; CHECK-LABEL: vwmacc_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vle8.v v10, (a1) ; CHECK-NEXT: vwmacc.vv v8, v9, v10 @@ -22,7 +22,7 @@ define <4 x i16> @vwmacc_v4i16(<4 x i8>* %x, <4 x i8>* %y, <4 x i16> %z) { ; CHECK-LABEL: vwmacc_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vle8.v v10, (a1) ; CHECK-NEXT: vwmacc.vv v8, v9, v10 @@ -39,7 +39,7 @@ define <2 x i32> @vwmacc_v2i32(<2 x i16>* %x, <2 x i16>* %y, <2 x i32> %z) { ; CHECK-LABEL: vwmacc_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vle16.v v10, (a1) ; CHECK-NEXT: vwmacc.vv v8, v9, v10 @@ -56,7 +56,7 @@ define <8 x i16> @vwmacc_v8i16(<8 x i8>* %x, <8 x i8>* %y, <8 x i16> %z) { ; CHECK-LABEL: vwmacc_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vle8.v v10, (a1) ; CHECK-NEXT: vwmacc.vv v8, v9, v10 @@ -73,7 +73,7 @@ define <4 x i32> @vwmacc_v4i32(<4 x i16>* %x, <4 x i16>* %y, <4 x i32> %z) { ; CHECK-LABEL: vwmacc_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vle16.v v10, (a1) ; CHECK-NEXT: vwmacc.vv v8, v9, v10 @@ -90,7 +90,7 @@ define <2 x i64> @vwmacc_v2i64(<2 x i32>* %x, <2 x i32>* %y, <2 x i64> %z) { ; CHECK-LABEL: vwmacc_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vle32.v v10, (a1) ; CHECK-NEXT: vwmacc.vv v8, v9, v10 @@ -107,7 +107,7 @@ define <16 x i16> @vwmacc_v16i16(<16 x i8>* %x, <16 x i8>* %y, <16 x i16> %z) { ; CHECK-LABEL: vwmacc_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v10, (a0) ; CHECK-NEXT: vle8.v v11, (a1) ; CHECK-NEXT: vwmacc.vv v8, v10, v11 @@ -124,7 +124,7 @@ define <8 x i32> @vwmacc_v8i32(<8 x i16>* %x, <8 x i16>* %y, <8 x i32> %z) { ; CHECK-LABEL: vwmacc_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v10, (a0) ; CHECK-NEXT: vle16.v v11, (a1) ; CHECK-NEXT: vwmacc.vv v8, v10, v11 @@ -141,7 +141,7 @@ define <4 x i64> @vwmacc_v4i64(<4 x i32>* %x, <4 x i32>* %y, <4 x i64> %z) { ; CHECK-LABEL: vwmacc_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v10, (a0) ; CHECK-NEXT: vle32.v v11, (a1) ; CHECK-NEXT: vwmacc.vv v8, v10, v11 @@ -159,7 +159,7 @@ ; CHECK-LABEL: vwmacc_v32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vle8.v v12, (a0) ; CHECK-NEXT: vle8.v v14, (a1) ; CHECK-NEXT: vwmacc.vv v8, v12, v14 @@ -176,7 +176,7 @@ define <16 x i32> @vwmacc_v16i32(<16 x i16>* %x, <16 x i16>* %y, <16 x i32> %z) { ; CHECK-LABEL: vwmacc_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v12, (a0) ; CHECK-NEXT: vle16.v v14, (a1) ; CHECK-NEXT: vwmacc.vv v8, v12, v14 @@ -193,7 +193,7 @@ define <8 x i64> @vwmacc_v8i64(<8 x i32>* %x, <8 x i32>* %y, <8 x i64> %z) { ; CHECK-LABEL: vwmacc_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v12, (a0) ; CHECK-NEXT: vle32.v v14, (a1) ; CHECK-NEXT: vwmacc.vv v8, v12, v14 @@ -211,7 +211,7 @@ ; CHECK-LABEL: vwmacc_v64i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 64 -; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vle8.v v20, (a1) ; CHECK-NEXT: vwmacc.vv v8, v16, v20 @@ -229,7 +229,7 @@ ; CHECK-LABEL: vwmacc_v32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vle16.v v20, (a1) ; CHECK-NEXT: vwmacc.vv v8, v16, v20 @@ -246,7 +246,7 @@ define <16 x i64> @vwmacc_v16i64(<16 x i32>* %x, <16 x i32>* %y, <16 x i64> %z) { ; CHECK-LABEL: vwmacc_v16i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vle32.v v20, (a1) ; CHECK-NEXT: vwmacc.vv v8, v16, v20 @@ -263,7 +263,7 @@ define <2 x i16> @vwmacc_vx_v2i16(<2 x i8>* %x, i8 %y, <2 x i16> %z) { ; CHECK-LABEL: vwmacc_vx_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vwmacc.vx v8, a1, v9 ; CHECK-NEXT: ret @@ -280,7 +280,7 @@ define <4 x i16> @vwmacc_vx_v4i16(<4 x i8>* %x, i8 %y, <4 x i16> %z) { ; CHECK-LABEL: vwmacc_vx_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vwmacc.vx v8, a1, v9 ; CHECK-NEXT: ret @@ -297,7 +297,7 @@ define <2 x i32> @vwmacc_vx_v2i32(<2 x i16>* %x, i16 %y, <2 x i32> %z) { ; CHECK-LABEL: vwmacc_vx_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vwmacc.vx v8, a1, v9 ; CHECK-NEXT: ret @@ -314,7 +314,7 @@ define <8 x i16> @vwmacc_vx_v8i16(<8 x i8>* %x, i8 %y, <8 x i16> %z) { ; CHECK-LABEL: vwmacc_vx_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vwmacc.vx v8, a1, v9 ; CHECK-NEXT: ret @@ -331,7 +331,7 @@ define <4 x i32> @vwmacc_vx_v4i32(<4 x i16>* %x, i16 %y, <4 x i32> %z) { ; CHECK-LABEL: vwmacc_vx_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vwmacc.vx v8, a1, v9 ; CHECK-NEXT: ret @@ -348,7 +348,7 @@ define <2 x i64> @vwmacc_vx_v2i64(<2 x i32>* %x, i32 %y, <2 x i64> %z) { ; CHECK-LABEL: vwmacc_vx_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vwmacc.vx v8, a1, v9 ; CHECK-NEXT: ret @@ -365,7 +365,7 @@ define <16 x i16> @vwmacc_vx_v16i16(<16 x i8>* %x, i8 %y, <16 x i16> %z) { ; CHECK-LABEL: vwmacc_vx_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v10, (a0) ; CHECK-NEXT: vwmacc.vx v8, a1, v10 ; CHECK-NEXT: ret @@ -382,7 +382,7 @@ define <8 x i32> @vwmacc_vx_v8i32(<8 x i16>* %x, i16 %y, <8 x i32> %z) { ; CHECK-LABEL: vwmacc_vx_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v10, (a0) ; CHECK-NEXT: vwmacc.vx v8, a1, v10 ; CHECK-NEXT: ret @@ -399,7 +399,7 @@ define <4 x i64> @vwmacc_vx_v4i64(<4 x i32>* %x, i32 %y, <4 x i64> %z) { ; CHECK-LABEL: vwmacc_vx_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v10, (a0) ; CHECK-NEXT: vwmacc.vx v8, a1, v10 ; CHECK-NEXT: ret @@ -417,7 +417,7 @@ ; CHECK-LABEL: vwmacc_vx_v32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vle8.v v12, (a0) ; CHECK-NEXT: vwmacc.vx v8, a1, v12 ; CHECK-NEXT: ret @@ -434,7 +434,7 @@ define <16 x i32> @vwmacc_vx_v16i32(<16 x i16>* %x, i16 %y, <16 x i32> %z) { ; CHECK-LABEL: vwmacc_vx_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v12, (a0) ; CHECK-NEXT: vwmacc.vx v8, a1, v12 ; CHECK-NEXT: ret @@ -451,7 +451,7 @@ define <8 x i64> @vwmacc_vx_v8i64(<8 x i32>* %x, i32 %y, <8 x i64> %z) { ; CHECK-LABEL: vwmacc_vx_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v12, (a0) ; CHECK-NEXT: vwmacc.vx v8, a1, v12 ; CHECK-NEXT: ret @@ -469,7 +469,7 @@ ; CHECK-LABEL: vwmacc_vx_v64i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 64 -; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vwmacc.vx v8, a1, v16 ; CHECK-NEXT: ret @@ -487,7 +487,7 @@ ; CHECK-LABEL: vwmacc_vx_v32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vwmacc.vx v8, a1, v16 ; CHECK-NEXT: ret @@ -504,7 +504,7 @@ define <16 x i64> @vwmacc_vx_v16i64(<16 x i32>* %x, i32 %y, <16 x i64> %z) { ; CHECK-LABEL: vwmacc_vx_v16i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vwmacc.vx v8, a1, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmaccsu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmaccsu.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmaccsu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmaccsu.ll @@ -5,7 +5,7 @@ define <2 x i16> @vwmaccsu_v2i16(<2 x i8>* %x, <2 x i8>* %y, <2 x i16> %z) { ; CHECK-LABEL: vwmaccsu_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vle8.v v10, (a1) ; CHECK-NEXT: vwmaccsu.vv v8, v9, v10 @@ -22,7 +22,7 @@ define <4 x i16> @vwmaccsu_v4i16(<4 x i8>* %x, <4 x i8>* %y, <4 x i16> %z) { ; CHECK-LABEL: vwmaccsu_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vle8.v v10, (a1) ; CHECK-NEXT: vwmaccsu.vv v8, v9, v10 @@ -39,7 +39,7 @@ define <2 x i32> @vwmaccsu_v2i32(<2 x i16>* %x, <2 x i16>* %y, <2 x i32> %z) { ; CHECK-LABEL: vwmaccsu_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vle16.v v10, (a1) ; CHECK-NEXT: vwmaccsu.vv v8, v9, v10 @@ -56,7 +56,7 @@ define <8 x i16> @vwmaccsu_v8i16(<8 x i8>* %x, <8 x i8>* %y, <8 x i16> %z) { ; CHECK-LABEL: vwmaccsu_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vle8.v v10, (a1) ; CHECK-NEXT: vwmaccsu.vv v8, v9, v10 @@ -73,7 +73,7 @@ define <4 x i32> @vwmaccsu_v4i32(<4 x i16>* %x, <4 x i16>* %y, <4 x i32> %z) { ; CHECK-LABEL: vwmaccsu_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vle16.v v10, (a1) ; CHECK-NEXT: vwmaccsu.vv v8, v9, v10 @@ -90,7 +90,7 @@ define <2 x i64> @vwmaccsu_v2i64(<2 x i32>* %x, <2 x i32>* %y, <2 x i64> %z) { ; CHECK-LABEL: vwmaccsu_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vle32.v v10, (a1) ; CHECK-NEXT: vwmaccsu.vv v8, v9, v10 @@ -107,7 +107,7 @@ define <16 x i16> @vwmaccsu_v16i16(<16 x i8>* %x, <16 x i8>* %y, <16 x i16> %z) { ; CHECK-LABEL: vwmaccsu_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v10, (a0) ; CHECK-NEXT: vle8.v v11, (a1) ; CHECK-NEXT: vwmaccsu.vv v8, v10, v11 @@ -124,7 +124,7 @@ define <8 x i32> @vwmaccsu_v8i32(<8 x i16>* %x, <8 x i16>* %y, <8 x i32> %z) { ; CHECK-LABEL: vwmaccsu_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v10, (a0) ; CHECK-NEXT: vle16.v v11, (a1) ; CHECK-NEXT: vwmaccsu.vv v8, v10, v11 @@ -141,7 +141,7 @@ define <4 x i64> @vwmaccsu_v4i64(<4 x i32>* %x, <4 x i32>* %y, <4 x i64> %z) { ; CHECK-LABEL: vwmaccsu_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v10, (a0) ; CHECK-NEXT: vle32.v v11, (a1) ; CHECK-NEXT: vwmaccsu.vv v8, v10, v11 @@ -159,7 +159,7 @@ ; CHECK-LABEL: vwmaccsu_v32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vle8.v v12, (a0) ; CHECK-NEXT: vle8.v v14, (a1) ; CHECK-NEXT: vwmaccsu.vv v8, v12, v14 @@ -176,7 +176,7 @@ define <16 x i32> @vwmaccsu_v16i32(<16 x i16>* %x, <16 x i16>* %y, <16 x i32> %z) { ; CHECK-LABEL: vwmaccsu_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v12, (a0) ; CHECK-NEXT: vle16.v v14, (a1) ; CHECK-NEXT: vwmaccsu.vv v8, v12, v14 @@ -193,7 +193,7 @@ define <8 x i64> @vwmaccsu_v8i64(<8 x i32>* %x, <8 x i32>* %y, <8 x i64> %z) { ; CHECK-LABEL: vwmaccsu_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v12, (a0) ; CHECK-NEXT: vle32.v v14, (a1) ; CHECK-NEXT: vwmaccsu.vv v8, v12, v14 @@ -211,7 +211,7 @@ ; CHECK-LABEL: vwmaccsu_v64i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 64 -; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vle8.v v20, (a1) ; CHECK-NEXT: vwmaccsu.vv v8, v16, v20 @@ -229,7 +229,7 @@ ; CHECK-LABEL: vwmaccsu_v32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vle16.v v20, (a1) ; CHECK-NEXT: vwmaccsu.vv v8, v16, v20 @@ -246,7 +246,7 @@ define <16 x i64> @vwmaccsu_v16i64(<16 x i32>* %x, <16 x i32>* %y, <16 x i64> %z) { ; CHECK-LABEL: vwmaccsu_v16i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vle32.v v20, (a1) ; CHECK-NEXT: vwmaccsu.vv v8, v16, v20 @@ -263,7 +263,7 @@ define <2 x i16> @vwmaccsu_vx_v2i16(<2 x i8>* %x, i8 %y, <2 x i16> %z) { ; CHECK-LABEL: vwmaccsu_vx_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vwmaccsu.vx v8, a1, v9 ; CHECK-NEXT: ret @@ -280,7 +280,7 @@ define <4 x i16> @vwmaccsu_vx_v4i16(<4 x i8>* %x, i8 %y, <4 x i16> %z) { ; CHECK-LABEL: vwmaccsu_vx_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vwmaccsu.vx v8, a1, v9 ; CHECK-NEXT: ret @@ -297,7 +297,7 @@ define <2 x i32> @vwmaccsu_vx_v2i32(<2 x i16>* %x, i16 %y, <2 x i32> %z) { ; CHECK-LABEL: vwmaccsu_vx_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vwmaccsu.vx v8, a1, v9 ; CHECK-NEXT: ret @@ -314,7 +314,7 @@ define <8 x i16> @vwmaccsu_vx_v8i16(<8 x i8>* %x, i8 %y, <8 x i16> %z) { ; CHECK-LABEL: vwmaccsu_vx_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vwmaccsu.vx v8, a1, v9 ; CHECK-NEXT: ret @@ -331,7 +331,7 @@ define <4 x i32> @vwmaccsu_vx_v4i32(<4 x i16>* %x, i16 %y, <4 x i32> %z) { ; CHECK-LABEL: vwmaccsu_vx_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vwmaccsu.vx v8, a1, v9 ; CHECK-NEXT: ret @@ -348,7 +348,7 @@ define <2 x i64> @vwmaccsu_vx_v2i64(<2 x i32>* %x, i32 %y, <2 x i64> %z) { ; CHECK-LABEL: vwmaccsu_vx_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vwmaccsu.vx v8, a1, v9 ; CHECK-NEXT: ret @@ -365,7 +365,7 @@ define <16 x i16> @vwmaccsu_vx_v16i16(<16 x i8>* %x, i8 %y, <16 x i16> %z) { ; CHECK-LABEL: vwmaccsu_vx_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v10, (a0) ; CHECK-NEXT: vwmaccsu.vx v8, a1, v10 ; CHECK-NEXT: ret @@ -382,7 +382,7 @@ define <8 x i32> @vwmaccsu_vx_v8i32(<8 x i16>* %x, i16 %y, <8 x i32> %z) { ; CHECK-LABEL: vwmaccsu_vx_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v10, (a0) ; CHECK-NEXT: vwmaccsu.vx v8, a1, v10 ; CHECK-NEXT: ret @@ -399,7 +399,7 @@ define <4 x i64> @vwmaccsu_vx_v4i64(<4 x i32>* %x, i32 %y, <4 x i64> %z) { ; CHECK-LABEL: vwmaccsu_vx_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v10, (a0) ; CHECK-NEXT: vwmaccsu.vx v8, a1, v10 ; CHECK-NEXT: ret @@ -417,7 +417,7 @@ ; CHECK-LABEL: vwmaccsu_vx_v32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vle8.v v12, (a0) ; CHECK-NEXT: vwmaccsu.vx v8, a1, v12 ; CHECK-NEXT: ret @@ -434,7 +434,7 @@ define <16 x i32> @vwmaccsu_vx_v16i32(<16 x i16>* %x, i16 %y, <16 x i32> %z) { ; CHECK-LABEL: vwmaccsu_vx_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v12, (a0) ; CHECK-NEXT: vwmaccsu.vx v8, a1, v12 ; CHECK-NEXT: ret @@ -451,7 +451,7 @@ define <8 x i64> @vwmaccsu_vx_v8i64(<8 x i32>* %x, i32 %y, <8 x i64> %z) { ; CHECK-LABEL: vwmaccsu_vx_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v12, (a0) ; CHECK-NEXT: vwmaccsu.vx v8, a1, v12 ; CHECK-NEXT: ret @@ -469,7 +469,7 @@ ; CHECK-LABEL: vwmaccsu_vx_v64i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 64 -; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vwmaccsu.vx v8, a1, v16 ; CHECK-NEXT: ret @@ -487,7 +487,7 @@ ; CHECK-LABEL: vwmaccsu_vx_v32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vwmaccsu.vx v8, a1, v16 ; CHECK-NEXT: ret @@ -504,7 +504,7 @@ define <16 x i64> @vwmaccsu_vx_v16i64(<16 x i32>* %x, i32 %y, <16 x i64> %z) { ; CHECK-LABEL: vwmaccsu_vx_v16i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vwmaccsu.vx v8, a1, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmaccu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmaccu.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmaccu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmaccu.ll @@ -5,7 +5,7 @@ define <2 x i16> @vwmaccu_v2i16(<2 x i8>* %x, <2 x i8>* %y, <2 x i16> %z) { ; CHECK-LABEL: vwmaccu_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vle8.v v10, (a1) ; CHECK-NEXT: vwmaccu.vv v8, v9, v10 @@ -22,7 +22,7 @@ define <4 x i16> @vwmaccu_v4i16(<4 x i8>* %x, <4 x i8>* %y, <4 x i16> %z) { ; CHECK-LABEL: vwmaccu_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vle8.v v10, (a1) ; CHECK-NEXT: vwmaccu.vv v8, v9, v10 @@ -39,7 +39,7 @@ define <2 x i32> @vwmaccu_v2i32(<2 x i16>* %x, <2 x i16>* %y, <2 x i32> %z) { ; CHECK-LABEL: vwmaccu_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vle16.v v10, (a1) ; CHECK-NEXT: vwmaccu.vv v8, v9, v10 @@ -56,7 +56,7 @@ define <8 x i16> @vwmaccu_v8i16(<8 x i8>* %x, <8 x i8>* %y, <8 x i16> %z) { ; CHECK-LABEL: vwmaccu_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vle8.v v10, (a1) ; CHECK-NEXT: vwmaccu.vv v8, v9, v10 @@ -73,7 +73,7 @@ define <4 x i32> @vwmaccu_v4i32(<4 x i16>* %x, <4 x i16>* %y, <4 x i32> %z) { ; CHECK-LABEL: vwmaccu_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vle16.v v10, (a1) ; CHECK-NEXT: vwmaccu.vv v8, v9, v10 @@ -90,7 +90,7 @@ define <2 x i64> @vwmaccu_v2i64(<2 x i32>* %x, <2 x i32>* %y, <2 x i64> %z) { ; CHECK-LABEL: vwmaccu_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vle32.v v10, (a1) ; CHECK-NEXT: vwmaccu.vv v8, v9, v10 @@ -107,7 +107,7 @@ define <16 x i16> @vwmaccu_v16i16(<16 x i8>* %x, <16 x i8>* %y, <16 x i16> %z) { ; CHECK-LABEL: vwmaccu_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v10, (a0) ; CHECK-NEXT: vle8.v v11, (a1) ; CHECK-NEXT: vwmaccu.vv v8, v10, v11 @@ -124,7 +124,7 @@ define <8 x i32> @vwmaccu_v8i32(<8 x i16>* %x, <8 x i16>* %y, <8 x i32> %z) { ; CHECK-LABEL: vwmaccu_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v10, (a0) ; CHECK-NEXT: vle16.v v11, (a1) ; CHECK-NEXT: vwmaccu.vv v8, v10, v11 @@ -141,7 +141,7 @@ define <4 x i64> @vwmaccu_v4i64(<4 x i32>* %x, <4 x i32>* %y, <4 x i64> %z) { ; CHECK-LABEL: vwmaccu_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v10, (a0) ; CHECK-NEXT: vle32.v v11, (a1) ; CHECK-NEXT: vwmaccu.vv v8, v10, v11 @@ -159,7 +159,7 @@ ; CHECK-LABEL: vwmaccu_v32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vle8.v v12, (a0) ; CHECK-NEXT: vle8.v v14, (a1) ; CHECK-NEXT: vwmaccu.vv v8, v12, v14 @@ -176,7 +176,7 @@ define <16 x i32> @vwmaccu_v16i32(<16 x i16>* %x, <16 x i16>* %y, <16 x i32> %z) { ; CHECK-LABEL: vwmaccu_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v12, (a0) ; CHECK-NEXT: vle16.v v14, (a1) ; CHECK-NEXT: vwmaccu.vv v8, v12, v14 @@ -193,7 +193,7 @@ define <8 x i64> @vwmaccu_v8i64(<8 x i32>* %x, <8 x i32>* %y, <8 x i64> %z) { ; CHECK-LABEL: vwmaccu_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v12, (a0) ; CHECK-NEXT: vle32.v v14, (a1) ; CHECK-NEXT: vwmaccu.vv v8, v12, v14 @@ -211,7 +211,7 @@ ; CHECK-LABEL: vwmaccu_v64i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 64 -; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vle8.v v20, (a1) ; CHECK-NEXT: vwmaccu.vv v8, v16, v20 @@ -229,7 +229,7 @@ ; CHECK-LABEL: vwmaccu_v32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vle16.v v20, (a1) ; CHECK-NEXT: vwmaccu.vv v8, v16, v20 @@ -246,7 +246,7 @@ define <16 x i64> @vwmaccu_v16i64(<16 x i32>* %x, <16 x i32>* %y, <16 x i64> %z) { ; CHECK-LABEL: vwmaccu_v16i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vle32.v v20, (a1) ; CHECK-NEXT: vwmaccu.vv v8, v16, v20 @@ -263,7 +263,7 @@ define <2 x i16> @vwmaccu_vx_v2i16(<2 x i8>* %x, i8 %y, <2 x i16> %z) { ; CHECK-LABEL: vwmaccu_vx_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vwmaccu.vx v8, a1, v9 ; CHECK-NEXT: ret @@ -280,7 +280,7 @@ define <4 x i16> @vwmaccu_vx_v4i16(<4 x i8>* %x, i8 %y, <4 x i16> %z) { ; CHECK-LABEL: vwmaccu_vx_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vwmaccu.vx v8, a1, v9 ; CHECK-NEXT: ret @@ -297,7 +297,7 @@ define <2 x i32> @vwmaccu_vx_v2i32(<2 x i16>* %x, i16 %y, <2 x i32> %z) { ; CHECK-LABEL: vwmaccu_vx_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vwmaccu.vx v8, a1, v9 ; CHECK-NEXT: ret @@ -314,7 +314,7 @@ define <8 x i16> @vwmaccu_vx_v8i16(<8 x i8>* %x, i8 %y, <8 x i16> %z) { ; CHECK-LABEL: vwmaccu_vx_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vwmaccu.vx v8, a1, v9 ; CHECK-NEXT: ret @@ -331,7 +331,7 @@ define <4 x i32> @vwmaccu_vx_v4i32(<4 x i16>* %x, i16 %y, <4 x i32> %z) { ; CHECK-LABEL: vwmaccu_vx_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vwmaccu.vx v8, a1, v9 ; CHECK-NEXT: ret @@ -348,7 +348,7 @@ define <2 x i64> @vwmaccu_vx_v2i64(<2 x i32>* %x, i32 %y, <2 x i64> %z) { ; CHECK-LABEL: vwmaccu_vx_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vwmaccu.vx v8, a1, v9 ; CHECK-NEXT: ret @@ -365,7 +365,7 @@ define <16 x i16> @vwmaccu_vx_v16i16(<16 x i8>* %x, i8 %y, <16 x i16> %z) { ; CHECK-LABEL: vwmaccu_vx_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v10, (a0) ; CHECK-NEXT: vwmaccu.vx v8, a1, v10 ; CHECK-NEXT: ret @@ -382,7 +382,7 @@ define <8 x i32> @vwmaccu_vx_v8i32(<8 x i16>* %x, i16 %y, <8 x i32> %z) { ; CHECK-LABEL: vwmaccu_vx_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v10, (a0) ; CHECK-NEXT: vwmaccu.vx v8, a1, v10 ; CHECK-NEXT: ret @@ -399,7 +399,7 @@ define <4 x i64> @vwmaccu_vx_v4i64(<4 x i32>* %x, i32 %y, <4 x i64> %z) { ; CHECK-LABEL: vwmaccu_vx_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v10, (a0) ; CHECK-NEXT: vwmaccu.vx v8, a1, v10 ; CHECK-NEXT: ret @@ -417,7 +417,7 @@ ; CHECK-LABEL: vwmaccu_vx_v32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vle8.v v12, (a0) ; CHECK-NEXT: vwmaccu.vx v8, a1, v12 ; CHECK-NEXT: ret @@ -434,7 +434,7 @@ define <16 x i32> @vwmaccu_vx_v16i32(<16 x i16>* %x, i16 %y, <16 x i32> %z) { ; CHECK-LABEL: vwmaccu_vx_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v12, (a0) ; CHECK-NEXT: vwmaccu.vx v8, a1, v12 ; CHECK-NEXT: ret @@ -451,7 +451,7 @@ define <8 x i64> @vwmaccu_vx_v8i64(<8 x i32>* %x, i32 %y, <8 x i64> %z) { ; CHECK-LABEL: vwmaccu_vx_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v12, (a0) ; CHECK-NEXT: vwmaccu.vx v8, a1, v12 ; CHECK-NEXT: ret @@ -469,7 +469,7 @@ ; CHECK-LABEL: vwmaccu_vx_v64i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 64 -; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vwmaccu.vx v8, a1, v16 ; CHECK-NEXT: ret @@ -487,7 +487,7 @@ ; CHECK-LABEL: vwmaccu_vx_v32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vwmaccu.vx v8, a1, v16 ; CHECK-NEXT: ret @@ -504,7 +504,7 @@ define <16 x i64> @vwmaccu_vx_v16i64(<16 x i32>* %x, i32 %y, <16 x i64> %z) { ; CHECK-LABEL: vwmaccu_vx_v16i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vwmaccu.vx v8, a1, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmaccus.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmaccus.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmaccus.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmaccus.ll @@ -5,7 +5,7 @@ define <2 x i16> @vwmaccus_vx_v2i16(<2 x i8>* %x, i8 %y, <2 x i16> %z) { ; CHECK-LABEL: vwmaccus_vx_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vwmaccus.vx v8, a1, v9 ; CHECK-NEXT: ret @@ -22,7 +22,7 @@ define <4 x i16> @vwmaccus_vx_v4i16(<4 x i8>* %x, i8 %y, <4 x i16> %z) { ; CHECK-LABEL: vwmaccus_vx_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vwmaccus.vx v8, a1, v9 ; CHECK-NEXT: ret @@ -39,7 +39,7 @@ define <2 x i32> @vwmaccus_vx_v2i32(<2 x i16>* %x, i16 %y, <2 x i32> %z) { ; CHECK-LABEL: vwmaccus_vx_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vwmaccus.vx v8, a1, v9 ; CHECK-NEXT: ret @@ -56,7 +56,7 @@ define <8 x i16> @vwmaccus_vx_v8i16(<8 x i8>* %x, i8 %y, <8 x i16> %z) { ; CHECK-LABEL: vwmaccus_vx_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vwmaccus.vx v8, a1, v9 ; CHECK-NEXT: ret @@ -73,7 +73,7 @@ define <4 x i32> @vwmaccus_vx_v4i32(<4 x i16>* %x, i16 %y, <4 x i32> %z) { ; CHECK-LABEL: vwmaccus_vx_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vwmaccus.vx v8, a1, v9 ; CHECK-NEXT: ret @@ -90,7 +90,7 @@ define <2 x i64> @vwmaccus_vx_v2i64(<2 x i32>* %x, i32 %y, <2 x i64> %z) { ; CHECK-LABEL: vwmaccus_vx_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vwmaccus.vx v8, a1, v9 ; CHECK-NEXT: ret @@ -107,7 +107,7 @@ define <16 x i16> @vwmaccus_vx_v16i16(<16 x i8>* %x, i8 %y, <16 x i16> %z) { ; CHECK-LABEL: vwmaccus_vx_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v10, (a0) ; CHECK-NEXT: vwmaccus.vx v8, a1, v10 ; CHECK-NEXT: ret @@ -124,7 +124,7 @@ define <8 x i32> @vwmaccus_vx_v8i32(<8 x i16>* %x, i16 %y, <8 x i32> %z) { ; CHECK-LABEL: vwmaccus_vx_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v10, (a0) ; CHECK-NEXT: vwmaccus.vx v8, a1, v10 ; CHECK-NEXT: ret @@ -141,7 +141,7 @@ define <4 x i64> @vwmaccus_vx_v4i64(<4 x i32>* %x, i32 %y, <4 x i64> %z) { ; CHECK-LABEL: vwmaccus_vx_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v10, (a0) ; CHECK-NEXT: vwmaccus.vx v8, a1, v10 ; CHECK-NEXT: ret @@ -159,7 +159,7 @@ ; CHECK-LABEL: vwmaccus_vx_v32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vle8.v v12, (a0) ; CHECK-NEXT: vwmaccus.vx v8, a1, v12 ; CHECK-NEXT: ret @@ -176,7 +176,7 @@ define <16 x i32> @vwmaccus_vx_v16i32(<16 x i16>* %x, i16 %y, <16 x i32> %z) { ; CHECK-LABEL: vwmaccus_vx_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v12, (a0) ; CHECK-NEXT: vwmaccus.vx v8, a1, v12 ; CHECK-NEXT: ret @@ -193,7 +193,7 @@ define <8 x i64> @vwmaccus_vx_v8i64(<8 x i32>* %x, i32 %y, <8 x i64> %z) { ; CHECK-LABEL: vwmaccus_vx_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v12, (a0) ; CHECK-NEXT: vwmaccus.vx v8, a1, v12 ; CHECK-NEXT: ret @@ -211,7 +211,7 @@ ; CHECK-LABEL: vwmaccus_vx_v64i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 64 -; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vwmaccus.vx v8, a1, v16 ; CHECK-NEXT: ret @@ -229,7 +229,7 @@ ; CHECK-LABEL: vwmaccus_vx_v32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vwmaccus.vx v8, a1, v16 ; CHECK-NEXT: ret @@ -246,7 +246,7 @@ define <16 x i64> @vwmaccus_vx_v16i64(<16 x i32>* %x, i32 %y, <16 x i64> %z) { ; CHECK-LABEL: vwmaccus_vx_v16i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vwmaccus.vx v8, a1, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll @@ -5,7 +5,7 @@ define <2 x i16> @vwmul_v2i16(<2 x i8>* %x, <2 x i8>* %y) { ; CHECK-LABEL: vwmul_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vle8.v v10, (a1) ; CHECK-NEXT: vwmul.vv v8, v9, v10 @@ -21,13 +21,13 @@ define <2 x i16> @vwmul_v2i16_multiple_users(<2 x i8>* %x, <2 x i8>* %y, <2 x i8> *%z) { ; CHECK-LABEL: vwmul_v2i16_multiple_users: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vle8.v v10, (a2) ; CHECK-NEXT: vwmul.vv v11, v8, v9 ; CHECK-NEXT: vwmul.vv v9, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vor.vv v8, v11, v9 ; CHECK-NEXT: ret %a = load <2 x i8>, <2 x i8>* %x @@ -45,7 +45,7 @@ define <4 x i16> @vwmul_v4i16(<4 x i8>* %x, <4 x i8>* %y) { ; CHECK-LABEL: vwmul_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vle8.v v10, (a1) ; CHECK-NEXT: vwmul.vv v8, v9, v10 @@ -61,7 +61,7 @@ define <2 x i32> @vwmul_v2i32(<2 x i16>* %x, <2 x i16>* %y) { ; CHECK-LABEL: vwmul_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vle16.v v10, (a1) ; CHECK-NEXT: vwmul.vv v8, v9, v10 @@ -77,7 +77,7 @@ define <8 x i16> @vwmul_v8i16(<8 x i8>* %x, <8 x i8>* %y) { ; CHECK-LABEL: vwmul_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vle8.v v10, (a1) ; CHECK-NEXT: vwmul.vv v8, v9, v10 @@ -93,7 +93,7 @@ define <4 x i32> @vwmul_v4i32(<4 x i16>* %x, <4 x i16>* %y) { ; CHECK-LABEL: vwmul_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vle16.v v10, (a1) ; CHECK-NEXT: vwmul.vv v8, v9, v10 @@ -109,7 +109,7 @@ define <2 x i64> @vwmul_v2i64(<2 x i32>* %x, <2 x i32>* %y) { ; CHECK-LABEL: vwmul_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vle32.v v10, (a1) ; CHECK-NEXT: vwmul.vv v8, v9, v10 @@ -125,7 +125,7 @@ define <16 x i16> @vwmul_v16i16(<16 x i8>* %x, <16 x i8>* %y) { ; CHECK-LABEL: vwmul_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v10, (a0) ; CHECK-NEXT: vle8.v v11, (a1) ; CHECK-NEXT: vwmul.vv v8, v10, v11 @@ -141,7 +141,7 @@ define <8 x i32> @vwmul_v8i32(<8 x i16>* %x, <8 x i16>* %y) { ; CHECK-LABEL: vwmul_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v10, (a0) ; CHECK-NEXT: vle16.v v11, (a1) ; CHECK-NEXT: vwmul.vv v8, v10, v11 @@ -157,7 +157,7 @@ define <4 x i64> @vwmul_v4i64(<4 x i32>* %x, <4 x i32>* %y) { ; CHECK-LABEL: vwmul_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v10, (a0) ; CHECK-NEXT: vle32.v v11, (a1) ; CHECK-NEXT: vwmul.vv v8, v10, v11 @@ -174,7 +174,7 @@ ; CHECK-LABEL: vwmul_v32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vle8.v v12, (a0) ; CHECK-NEXT: vle8.v v14, (a1) ; CHECK-NEXT: vwmul.vv v8, v12, v14 @@ -190,7 +190,7 @@ define <16 x i32> @vwmul_v16i32(<16 x i16>* %x, <16 x i16>* %y) { ; CHECK-LABEL: vwmul_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v12, (a0) ; CHECK-NEXT: vle16.v v14, (a1) ; CHECK-NEXT: vwmul.vv v8, v12, v14 @@ -206,7 +206,7 @@ define <8 x i64> @vwmul_v8i64(<8 x i32>* %x, <8 x i32>* %y) { ; CHECK-LABEL: vwmul_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v12, (a0) ; CHECK-NEXT: vle32.v v14, (a1) ; CHECK-NEXT: vwmul.vv v8, v12, v14 @@ -223,7 +223,7 @@ ; CHECK-LABEL: vwmul_v64i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 64 -; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vle8.v v20, (a1) ; CHECK-NEXT: vwmul.vv v8, v16, v20 @@ -240,7 +240,7 @@ ; CHECK-LABEL: vwmul_v32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vle16.v v20, (a1) ; CHECK-NEXT: vwmul.vv v8, v16, v20 @@ -256,7 +256,7 @@ define <16 x i64> @vwmul_v16i64(<16 x i32>* %x, <16 x i32>* %y) { ; CHECK-LABEL: vwmul_v16i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vle32.v v20, (a1) ; CHECK-NEXT: vwmul.vv v8, v16, v20 @@ -278,16 +278,16 @@ ; CHECK-NEXT: slli a2, a2, 3 ; CHECK-NEXT: sub sp, sp, a2 ; CHECK-NEXT: li a2, 128 -; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vle8.v v24, (a1) ; CHECK-NEXT: li a0, 64 -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v16, a0 ; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: vslidedown.vx v0, v24, a0 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwmul.vv v8, v16, v24 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload @@ -314,16 +314,16 @@ ; CHECK-NEXT: slli a2, a2, 3 ; CHECK-NEXT: sub sp, sp, a2 ; CHECK-NEXT: li a2, 64 -; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vle16.v v24, (a1) ; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v16, a0 ; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: vslidedown.vx v0, v24, a0 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vwmul.vv v8, v16, v24 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload @@ -350,15 +350,15 @@ ; CHECK-NEXT: slli a2, a2, 3 ; CHECK-NEXT: sub sp, sp, a2 ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vle32.v v24, (a1) -; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v16, 16 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill ; CHECK-NEXT: vslidedown.vi v0, v24, 16 -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vwmul.vv v8, v16, v24 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload @@ -379,7 +379,7 @@ define <2 x i32> @vwmul_v2i32_v2i8(<2 x i8>* %x, <2 x i8>* %y) { ; CHECK-LABEL: vwmul_v2i32_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle8.v v8, (a1) ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vsext.vf2 v10, v8 @@ -397,7 +397,7 @@ define <4 x i32> @vwmul_v4i32_v4i8_v4i16(<4 x i8>* %x, <4 x i16>* %y) { ; CHECK-LABEL: vwmul_v4i32_v4i8_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vsext.vf2 v10, v8 @@ -414,7 +414,7 @@ define <4 x i64> @vwmul_v4i64_v4i32_v4i8(<4 x i32>* %x, <4 x i8>* %y) { ; CHECK-LABEL: vwmul_v4i64_v4i32_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a1) ; CHECK-NEXT: vle32.v v10, (a0) ; CHECK-NEXT: vsext.vf4 v11, v8 @@ -431,7 +431,7 @@ define <2 x i16> @vwmul_vx_v2i16(<2 x i8>* %x, i8 %y) { ; CHECK-LABEL: vwmul_vx_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vwmul.vx v8, v9, a1 ; CHECK-NEXT: ret @@ -447,7 +447,7 @@ define <4 x i16> @vwmul_vx_v4i16(<4 x i8>* %x, i8 %y) { ; CHECK-LABEL: vwmul_vx_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vwmul.vx v8, v9, a1 ; CHECK-NEXT: ret @@ -463,7 +463,7 @@ define <2 x i32> @vwmul_vx_v2i32(<2 x i16>* %x, i16 %y) { ; CHECK-LABEL: vwmul_vx_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vwmul.vx v8, v9, a1 ; CHECK-NEXT: ret @@ -479,7 +479,7 @@ define <8 x i16> @vwmul_vx_v8i16(<8 x i8>* %x, i8 %y) { ; CHECK-LABEL: vwmul_vx_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vwmul.vx v8, v9, a1 ; CHECK-NEXT: ret @@ -495,7 +495,7 @@ define <4 x i32> @vwmul_vx_v4i32(<4 x i16>* %x, i16 %y) { ; CHECK-LABEL: vwmul_vx_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vwmul.vx v8, v9, a1 ; CHECK-NEXT: ret @@ -511,7 +511,7 @@ define <2 x i64> @vwmul_vx_v2i64(<2 x i32>* %x, i32 %y) { ; CHECK-LABEL: vwmul_vx_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vwmul.vx v8, v9, a1 ; CHECK-NEXT: ret @@ -527,7 +527,7 @@ define <16 x i16> @vwmul_vx_v16i16(<16 x i8>* %x, i8 %y) { ; CHECK-LABEL: vwmul_vx_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v10, (a0) ; CHECK-NEXT: vwmul.vx v8, v10, a1 ; CHECK-NEXT: ret @@ -543,7 +543,7 @@ define <8 x i32> @vwmul_vx_v8i32(<8 x i16>* %x, i16 %y) { ; CHECK-LABEL: vwmul_vx_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v10, (a0) ; CHECK-NEXT: vwmul.vx v8, v10, a1 ; CHECK-NEXT: ret @@ -559,7 +559,7 @@ define <4 x i64> @vwmul_vx_v4i64(<4 x i32>* %x, i32 %y) { ; CHECK-LABEL: vwmul_vx_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v10, (a0) ; CHECK-NEXT: vwmul.vx v8, v10, a1 ; CHECK-NEXT: ret @@ -576,7 +576,7 @@ ; CHECK-LABEL: vwmul_vx_v32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vle8.v v12, (a0) ; CHECK-NEXT: vwmul.vx v8, v12, a1 ; CHECK-NEXT: ret @@ -592,7 +592,7 @@ define <16 x i32> @vwmul_vx_v16i32(<16 x i16>* %x, i16 %y) { ; CHECK-LABEL: vwmul_vx_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v12, (a0) ; CHECK-NEXT: vwmul.vx v8, v12, a1 ; CHECK-NEXT: ret @@ -608,7 +608,7 @@ define <8 x i64> @vwmul_vx_v8i64(<8 x i32>* %x, i32 %y) { ; CHECK-LABEL: vwmul_vx_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v12, (a0) ; CHECK-NEXT: vwmul.vx v8, v12, a1 ; CHECK-NEXT: ret @@ -625,7 +625,7 @@ ; CHECK-LABEL: vwmul_vx_v64i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 64 -; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vwmul.vx v8, v16, a1 ; CHECK-NEXT: ret @@ -642,7 +642,7 @@ ; CHECK-LABEL: vwmul_vx_v32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vwmul.vx v8, v16, a1 ; CHECK-NEXT: ret @@ -658,7 +658,7 @@ define <16 x i64> @vwmul_vx_v16i64(<16 x i32>* %x, i32 %y) { ; CHECK-LABEL: vwmul_vx_v16i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vwmul.vx v8, v16, a1 ; CHECK-NEXT: ret @@ -674,7 +674,7 @@ define <8 x i16> @vwmul_vx_v8i16_i8(<8 x i8>* %x, i8* %y) { ; CHECK-LABEL: vwmul_vx_v8i16_i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: lb a0, 0(a1) ; CHECK-NEXT: vwmul.vx v8, v9, a0 @@ -692,7 +692,7 @@ define <8 x i16> @vwmul_vx_v8i16_i16(<8 x i8>* %x, i16* %y) { ; CHECK-LABEL: vwmul_vx_v8i16_i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: lh a0, 0(a1) ; CHECK-NEXT: vsext.vf2 v9, v8 @@ -710,7 +710,7 @@ define <4 x i32> @vwmul_vx_v4i32_i8(<4 x i16>* %x, i8* %y) { ; CHECK-LABEL: vwmul_vx_v4i32_i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: lb a0, 0(a1) ; CHECK-NEXT: vwmul.vx v8, v9, a0 @@ -728,7 +728,7 @@ define <4 x i32> @vwmul_vx_v4i32_i16(<4 x i16>* %x, i16* %y) { ; CHECK-LABEL: vwmul_vx_v4i32_i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: lh a0, 0(a1) ; CHECK-NEXT: vwmul.vx v8, v9, a0 @@ -746,7 +746,7 @@ define <4 x i32> @vwmul_vx_v4i32_i32(<4 x i16>* %x, i32* %y) { ; CHECK-LABEL: vwmul_vx_v4i32_i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: lw a0, 0(a1) ; CHECK-NEXT: vsext.vf2 v9, v8 @@ -766,7 +766,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: lb a1, 0(a1) ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: srai a0, a1, 31 @@ -781,7 +781,7 @@ ; ; RV64-LABEL: vwmul_vx_v2i64_i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-NEXT: vle32.v v9, (a0) ; RV64-NEXT: lb a0, 0(a1) ; RV64-NEXT: vwmul.vx v8, v9, a0 @@ -801,7 +801,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: lh a1, 0(a1) ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: srai a0, a1, 31 @@ -816,7 +816,7 @@ ; ; RV64-LABEL: vwmul_vx_v2i64_i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-NEXT: vle32.v v9, (a0) ; RV64-NEXT: lh a0, 0(a1) ; RV64-NEXT: vwmul.vx v8, v9, a0 @@ -836,7 +836,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: lw a1, 0(a1) ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: srai a0, a1, 31 @@ -851,7 +851,7 @@ ; ; RV64-LABEL: vwmul_vx_v2i64_i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-NEXT: vle32.v v9, (a0) ; RV64-NEXT: lw a0, 0(a1) ; RV64-NEXT: vwmul.vx v8, v9, a0 @@ -871,7 +871,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: lw a2, 4(a1) ; RV32-NEXT: lw a1, 0(a1) ; RV32-NEXT: vle32.v v8, (a0) @@ -886,7 +886,7 @@ ; ; RV64-LABEL: vwmul_vx_v2i64_i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vle32.v v8, (a0) ; RV64-NEXT: ld a0, 0(a1) ; RV64-NEXT: vsext.vf2 v9, v8 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll @@ -5,7 +5,7 @@ define <2 x i16> @vwmulsu_v2i16(<2 x i8>* %x, <2 x i8>* %y) { ; CHECK-LABEL: vwmulsu_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vle8.v v10, (a1) ; CHECK-NEXT: vwmulsu.vv v8, v10, v9 @@ -21,7 +21,7 @@ define <2 x i16> @vwmulsu_v2i16_swap(<2 x i8>* %x, <2 x i8>* %y) { ; CHECK-LABEL: vwmulsu_v2i16_swap: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vle8.v v10, (a1) ; CHECK-NEXT: vwmulsu.vv v8, v9, v10 @@ -37,7 +37,7 @@ define <4 x i16> @vwmulsu_v4i16(<4 x i8>* %x, <4 x i8>* %y) { ; CHECK-LABEL: vwmulsu_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vle8.v v10, (a1) ; CHECK-NEXT: vwmulsu.vv v8, v10, v9 @@ -53,7 +53,7 @@ define <2 x i32> @vwmulsu_v2i32(<2 x i16>* %x, <2 x i16>* %y) { ; CHECK-LABEL: vwmulsu_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vle16.v v10, (a1) ; CHECK-NEXT: vwmulsu.vv v8, v10, v9 @@ -69,7 +69,7 @@ define <8 x i16> @vwmulsu_v8i16(<8 x i8>* %x, <8 x i8>* %y) { ; CHECK-LABEL: vwmulsu_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vle8.v v10, (a1) ; CHECK-NEXT: vwmulsu.vv v8, v10, v9 @@ -85,7 +85,7 @@ define <4 x i32> @vwmulsu_v4i32(<4 x i16>* %x, <4 x i16>* %y) { ; CHECK-LABEL: vwmulsu_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vle16.v v10, (a1) ; CHECK-NEXT: vwmulsu.vv v8, v10, v9 @@ -101,7 +101,7 @@ define <2 x i64> @vwmulsu_v2i64(<2 x i32>* %x, <2 x i32>* %y) { ; CHECK-LABEL: vwmulsu_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vle32.v v10, (a1) ; CHECK-NEXT: vwmulsu.vv v8, v10, v9 @@ -117,7 +117,7 @@ define <16 x i16> @vwmulsu_v16i16(<16 x i8>* %x, <16 x i8>* %y) { ; CHECK-LABEL: vwmulsu_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v10, (a0) ; CHECK-NEXT: vle8.v v11, (a1) ; CHECK-NEXT: vwmulsu.vv v8, v11, v10 @@ -133,7 +133,7 @@ define <8 x i32> @vwmulsu_v8i32(<8 x i16>* %x, <8 x i16>* %y) { ; CHECK-LABEL: vwmulsu_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v10, (a0) ; CHECK-NEXT: vle16.v v11, (a1) ; CHECK-NEXT: vwmulsu.vv v8, v11, v10 @@ -149,7 +149,7 @@ define <4 x i64> @vwmulsu_v4i64(<4 x i32>* %x, <4 x i32>* %y) { ; CHECK-LABEL: vwmulsu_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v10, (a0) ; CHECK-NEXT: vle32.v v11, (a1) ; CHECK-NEXT: vwmulsu.vv v8, v11, v10 @@ -166,7 +166,7 @@ ; CHECK-LABEL: vwmulsu_v32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vle8.v v12, (a0) ; CHECK-NEXT: vle8.v v14, (a1) ; CHECK-NEXT: vwmulsu.vv v8, v14, v12 @@ -182,7 +182,7 @@ define <16 x i32> @vwmulsu_v16i32(<16 x i16>* %x, <16 x i16>* %y) { ; CHECK-LABEL: vwmulsu_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v12, (a0) ; CHECK-NEXT: vle16.v v14, (a1) ; CHECK-NEXT: vwmulsu.vv v8, v14, v12 @@ -198,7 +198,7 @@ define <8 x i64> @vwmulsu_v8i64(<8 x i32>* %x, <8 x i32>* %y) { ; CHECK-LABEL: vwmulsu_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v12, (a0) ; CHECK-NEXT: vle32.v v14, (a1) ; CHECK-NEXT: vwmulsu.vv v8, v14, v12 @@ -215,7 +215,7 @@ ; CHECK-LABEL: vwmulsu_v64i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 64 -; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vle8.v v20, (a1) ; CHECK-NEXT: vwmulsu.vv v8, v20, v16 @@ -232,7 +232,7 @@ ; CHECK-LABEL: vwmulsu_v32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vle16.v v20, (a1) ; CHECK-NEXT: vwmulsu.vv v8, v20, v16 @@ -248,7 +248,7 @@ define <16 x i64> @vwmulsu_v16i64(<16 x i32>* %x, <16 x i32>* %y) { ; CHECK-LABEL: vwmulsu_v16i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vle32.v v20, (a1) ; CHECK-NEXT: vwmulsu.vv v8, v20, v16 @@ -270,16 +270,16 @@ ; CHECK-NEXT: slli a2, a2, 3 ; CHECK-NEXT: sub sp, sp, a2 ; CHECK-NEXT: li a2, 128 -; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vle8.v v24, (a1) ; CHECK-NEXT: li a0, 64 -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v16, a0 ; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: vslidedown.vx v0, v24, a0 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwmulsu.vv v8, v24, v16 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload @@ -306,16 +306,16 @@ ; CHECK-NEXT: slli a2, a2, 3 ; CHECK-NEXT: sub sp, sp, a2 ; CHECK-NEXT: li a2, 64 -; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vle16.v v24, (a1) ; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v16, a0 ; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: vslidedown.vx v0, v24, a0 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vwmulsu.vv v8, v24, v16 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload @@ -342,15 +342,15 @@ ; CHECK-NEXT: slli a2, a2, 3 ; CHECK-NEXT: sub sp, sp, a2 ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vle32.v v24, (a1) -; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v16, 16 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill ; CHECK-NEXT: vslidedown.vi v0, v24, 16 -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vwmulsu.vv v8, v24, v16 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload @@ -371,7 +371,7 @@ define <2 x i32> @vwmulsu_v2i32_v2i8(<2 x i8>* %x, <2 x i8>* %y) { ; CHECK-LABEL: vwmulsu_v2i32_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vzext.vf2 v10, v8 @@ -389,7 +389,7 @@ define <4 x i32> @vwmulsu_v4i32_v4i8_v4i16(<4 x i8>* %x, <4 x i16>* %y) { ; CHECK-LABEL: vwmulsu_v4i32_v4i8_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vzext.vf2 v10, v8 @@ -406,7 +406,7 @@ define <4 x i64> @vwmulsu_v4i64_v4i32_v4i8(<4 x i32>* %x, <4 x i8>* %y) { ; CHECK-LABEL: vwmulsu_v4i64_v4i32_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a1) ; CHECK-NEXT: vle32.v v10, (a0) ; CHECK-NEXT: vsext.vf4 v11, v8 @@ -423,7 +423,7 @@ define <2 x i16> @vwmulsu_vx_v2i16(<2 x i8>* %x, i8 %y) { ; CHECK-LABEL: vwmulsu_vx_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vwmulsu.vx v8, v9, a1 ; CHECK-NEXT: ret @@ -439,7 +439,7 @@ define <2 x i16> @vwmulsu_vx_v2i16_swap(<2 x i8>* %x, i8 %y) { ; CHECK-LABEL: vwmulsu_vx_v2i16_swap: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vmv.v.x v10, a1 ; CHECK-NEXT: vwmulsu.vv v8, v10, v9 @@ -456,7 +456,7 @@ define <4 x i16> @vwmulsu_vx_v4i16(<4 x i8>* %x, i8 %y) { ; CHECK-LABEL: vwmulsu_vx_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vwmulsu.vx v8, v9, a1 ; CHECK-NEXT: ret @@ -472,7 +472,7 @@ define <2 x i32> @vwmulsu_vx_v2i32(<2 x i16>* %x, i16 %y) { ; CHECK-LABEL: vwmulsu_vx_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vwmulsu.vx v8, v9, a1 ; CHECK-NEXT: ret @@ -488,7 +488,7 @@ define <8 x i16> @vwmulsu_vx_v8i16(<8 x i8>* %x, i8 %y) { ; CHECK-LABEL: vwmulsu_vx_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vwmulsu.vx v8, v9, a1 ; CHECK-NEXT: ret @@ -504,7 +504,7 @@ define <4 x i32> @vwmulsu_vx_v4i32(<4 x i16>* %x, i16 %y) { ; CHECK-LABEL: vwmulsu_vx_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vwmulsu.vx v8, v9, a1 ; CHECK-NEXT: ret @@ -520,7 +520,7 @@ define <2 x i64> @vwmulsu_vx_v2i64(<2 x i32>* %x, i32 %y) { ; CHECK-LABEL: vwmulsu_vx_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vwmulsu.vx v8, v9, a1 ; CHECK-NEXT: ret @@ -536,7 +536,7 @@ define <16 x i16> @vwmulsu_vx_v16i16(<16 x i8>* %x, i8 %y) { ; CHECK-LABEL: vwmulsu_vx_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v10, (a0) ; CHECK-NEXT: vwmulsu.vx v8, v10, a1 ; CHECK-NEXT: ret @@ -552,7 +552,7 @@ define <8 x i32> @vwmulsu_vx_v8i32(<8 x i16>* %x, i16 %y) { ; CHECK-LABEL: vwmulsu_vx_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v10, (a0) ; CHECK-NEXT: vwmulsu.vx v8, v10, a1 ; CHECK-NEXT: ret @@ -568,7 +568,7 @@ define <4 x i64> @vwmulsu_vx_v4i64(<4 x i32>* %x, i32 %y) { ; CHECK-LABEL: vwmulsu_vx_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v10, (a0) ; CHECK-NEXT: vwmulsu.vx v8, v10, a1 ; CHECK-NEXT: ret @@ -585,7 +585,7 @@ ; CHECK-LABEL: vwmulsu_vx_v32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vle8.v v12, (a0) ; CHECK-NEXT: vwmulsu.vx v8, v12, a1 ; CHECK-NEXT: ret @@ -601,7 +601,7 @@ define <16 x i32> @vwmulsu_vx_v16i32(<16 x i16>* %x, i16 %y) { ; CHECK-LABEL: vwmulsu_vx_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v12, (a0) ; CHECK-NEXT: vwmulsu.vx v8, v12, a1 ; CHECK-NEXT: ret @@ -617,7 +617,7 @@ define <8 x i64> @vwmulsu_vx_v8i64(<8 x i32>* %x, i32 %y) { ; CHECK-LABEL: vwmulsu_vx_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v12, (a0) ; CHECK-NEXT: vwmulsu.vx v8, v12, a1 ; CHECK-NEXT: ret @@ -634,7 +634,7 @@ ; CHECK-LABEL: vwmulsu_vx_v64i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 64 -; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vwmulsu.vx v8, v16, a1 ; CHECK-NEXT: ret @@ -651,7 +651,7 @@ ; CHECK-LABEL: vwmulsu_vx_v32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vwmulsu.vx v8, v16, a1 ; CHECK-NEXT: ret @@ -667,7 +667,7 @@ define <16 x i64> @vwmulsu_vx_v16i64(<16 x i32>* %x, i32 %y) { ; CHECK-LABEL: vwmulsu_vx_v16i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vwmulsu.vx v8, v16, a1 ; CHECK-NEXT: ret @@ -683,7 +683,7 @@ define <8 x i16> @vwmulsu_vx_v8i16_i8(<8 x i8>* %x, i8* %y) { ; CHECK-LABEL: vwmulsu_vx_v8i16_i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: lbu a0, 0(a1) ; CHECK-NEXT: vwmulsu.vx v8, v9, a0 @@ -701,7 +701,7 @@ define <8 x i16> @vwmulsu_vx_v8i16_i8_swap(<8 x i8>* %x, i8* %y) { ; CHECK-LABEL: vwmulsu_vx_v8i16_i8_swap: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vlse8.v v10, (a1), zero ; CHECK-NEXT: vwmulsu.vv v8, v10, v9 @@ -719,7 +719,7 @@ define <4 x i32> @vwmulsu_vx_v4i32_i8(<4 x i16>* %x, i8* %y) { ; CHECK-LABEL: vwmulsu_vx_v4i32_i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: lbu a0, 0(a1) ; CHECK-NEXT: vwmul.vx v8, v9, a0 @@ -737,7 +737,7 @@ define <4 x i32> @vwmulsu_vx_v4i32_i16(<4 x i16>* %x, i16* %y) { ; CHECK-LABEL: vwmulsu_vx_v4i32_i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: lhu a0, 0(a1) ; CHECK-NEXT: vwmulsu.vx v8, v9, a0 @@ -757,7 +757,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: lbu a1, 0(a1) ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: sw zero, 12(sp) @@ -771,7 +771,7 @@ ; ; RV64-LABEL: vwmulsu_vx_v2i64_i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-NEXT: vle32.v v9, (a0) ; RV64-NEXT: lbu a0, 0(a1) ; RV64-NEXT: vwmul.vx v8, v9, a0 @@ -791,7 +791,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: lhu a1, 0(a1) ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: sw zero, 12(sp) @@ -805,7 +805,7 @@ ; ; RV64-LABEL: vwmulsu_vx_v2i64_i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-NEXT: vle32.v v9, (a0) ; RV64-NEXT: lhu a0, 0(a1) ; RV64-NEXT: vwmul.vx v8, v9, a0 @@ -825,7 +825,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: lw a1, 0(a1) ; RV32-NEXT: vle32.v v8, (a0) ; RV32-NEXT: sw zero, 12(sp) @@ -839,7 +839,7 @@ ; ; RV64-LABEL: vwmulsu_vx_v2i64_i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-NEXT: vle32.v v9, (a0) ; RV64-NEXT: lwu a0, 0(a1) ; RV64-NEXT: vwmulsu.vx v8, v9, a0 @@ -857,7 +857,7 @@ define <8 x i16> @vwmulsu_vx_v8i16_i8_and(<8 x i8>* %x, i16 %y) { ; CHECK-LABEL: vwmulsu_vx_v8i16_i8_and: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vwmulsu.vx v8, v9, a1 ; CHECK-NEXT: ret @@ -873,7 +873,7 @@ define <8 x i16> @vwmulsu_vx_v8i16_i8_and1(<8 x i8>* %x, i16 %y) { ; CHECK-LABEL: vwmulsu_vx_v8i16_i8_and1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: andi a0, a1, 254 ; CHECK-NEXT: vwmulsu.vx v8, v9, a0 @@ -890,7 +890,7 @@ define <4 x i32> @vwmulsu_vx_v4i32_i16_and(<4 x i16>* %x, i32 %y) { ; CHECK-LABEL: vwmulsu_vx_v4i32_i16_and: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vwmulsu.vx v8, v9, a1 ; CHECK-NEXT: ret @@ -906,7 +906,7 @@ define <4 x i32> @vwmulsu_vx_v4i32_i16_zext(<4 x i16>* %x, i16 %y) { ; CHECK-LABEL: vwmulsu_vx_v4i32_i16_zext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vwmulsu.vx v8, v9, a1 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll @@ -5,7 +5,7 @@ define <2 x i16> @vwmulu_v2i16(<2 x i8>* %x, <2 x i8>* %y) { ; CHECK-LABEL: vwmulu_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vle8.v v10, (a1) ; CHECK-NEXT: vwmulu.vv v8, v9, v10 @@ -21,7 +21,7 @@ define <4 x i16> @vwmulu_v4i16(<4 x i8>* %x, <4 x i8>* %y) { ; CHECK-LABEL: vwmulu_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vle8.v v10, (a1) ; CHECK-NEXT: vwmulu.vv v8, v9, v10 @@ -37,7 +37,7 @@ define <2 x i32> @vwmulu_v2i32(<2 x i16>* %x, <2 x i16>* %y) { ; CHECK-LABEL: vwmulu_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vle16.v v10, (a1) ; CHECK-NEXT: vwmulu.vv v8, v9, v10 @@ -53,7 +53,7 @@ define <8 x i16> @vwmulu_v8i16(<8 x i8>* %x, <8 x i8>* %y) { ; CHECK-LABEL: vwmulu_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vle8.v v10, (a1) ; CHECK-NEXT: vwmulu.vv v8, v9, v10 @@ -69,7 +69,7 @@ define <4 x i32> @vwmulu_v4i32(<4 x i16>* %x, <4 x i16>* %y) { ; CHECK-LABEL: vwmulu_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vle16.v v10, (a1) ; CHECK-NEXT: vwmulu.vv v8, v9, v10 @@ -85,7 +85,7 @@ define <2 x i64> @vwmulu_v2i64(<2 x i32>* %x, <2 x i32>* %y) { ; CHECK-LABEL: vwmulu_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vle32.v v10, (a1) ; CHECK-NEXT: vwmulu.vv v8, v9, v10 @@ -101,7 +101,7 @@ define <16 x i16> @vwmulu_v16i16(<16 x i8>* %x, <16 x i8>* %y) { ; CHECK-LABEL: vwmulu_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v10, (a0) ; CHECK-NEXT: vle8.v v11, (a1) ; CHECK-NEXT: vwmulu.vv v8, v10, v11 @@ -117,7 +117,7 @@ define <8 x i32> @vwmulu_v8i32(<8 x i16>* %x, <8 x i16>* %y) { ; CHECK-LABEL: vwmulu_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v10, (a0) ; CHECK-NEXT: vle16.v v11, (a1) ; CHECK-NEXT: vwmulu.vv v8, v10, v11 @@ -133,7 +133,7 @@ define <4 x i64> @vwmulu_v4i64(<4 x i32>* %x, <4 x i32>* %y) { ; CHECK-LABEL: vwmulu_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v10, (a0) ; CHECK-NEXT: vle32.v v11, (a1) ; CHECK-NEXT: vwmulu.vv v8, v10, v11 @@ -150,7 +150,7 @@ ; CHECK-LABEL: vwmulu_v32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vle8.v v12, (a0) ; CHECK-NEXT: vle8.v v14, (a1) ; CHECK-NEXT: vwmulu.vv v8, v12, v14 @@ -166,7 +166,7 @@ define <16 x i32> @vwmulu_v16i32(<16 x i16>* %x, <16 x i16>* %y) { ; CHECK-LABEL: vwmulu_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v12, (a0) ; CHECK-NEXT: vle16.v v14, (a1) ; CHECK-NEXT: vwmulu.vv v8, v12, v14 @@ -182,7 +182,7 @@ define <8 x i64> @vwmulu_v8i64(<8 x i32>* %x, <8 x i32>* %y) { ; CHECK-LABEL: vwmulu_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v12, (a0) ; CHECK-NEXT: vle32.v v14, (a1) ; CHECK-NEXT: vwmulu.vv v8, v12, v14 @@ -199,7 +199,7 @@ ; CHECK-LABEL: vwmulu_v64i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 64 -; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vle8.v v20, (a1) ; CHECK-NEXT: vwmulu.vv v8, v16, v20 @@ -216,7 +216,7 @@ ; CHECK-LABEL: vwmulu_v32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vle16.v v20, (a1) ; CHECK-NEXT: vwmulu.vv v8, v16, v20 @@ -232,7 +232,7 @@ define <16 x i64> @vwmulu_v16i64(<16 x i32>* %x, <16 x i32>* %y) { ; CHECK-LABEL: vwmulu_v16i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vle32.v v20, (a1) ; CHECK-NEXT: vwmulu.vv v8, v16, v20 @@ -254,16 +254,16 @@ ; CHECK-NEXT: slli a2, a2, 3 ; CHECK-NEXT: sub sp, sp, a2 ; CHECK-NEXT: li a2, 128 -; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vle8.v v24, (a1) ; CHECK-NEXT: li a0, 64 -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v16, a0 ; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: vslidedown.vx v0, v24, a0 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwmulu.vv v8, v16, v24 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload @@ -290,16 +290,16 @@ ; CHECK-NEXT: slli a2, a2, 3 ; CHECK-NEXT: sub sp, sp, a2 ; CHECK-NEXT: li a2, 64 -; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vle16.v v24, (a1) ; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v16, a0 ; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: vslidedown.vx v0, v24, a0 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vwmulu.vv v8, v16, v24 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload @@ -326,15 +326,15 @@ ; CHECK-NEXT: slli a2, a2, 3 ; CHECK-NEXT: sub sp, sp, a2 ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vle32.v v24, (a1) -; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v16, 16 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill ; CHECK-NEXT: vslidedown.vi v0, v24, 16 -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vwmulu.vv v8, v16, v24 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload @@ -355,7 +355,7 @@ define <2 x i32> @vwmulu_v2i32_v2i8(<2 x i8>* %x, <2 x i8>* %y) { ; CHECK-LABEL: vwmulu_v2i32_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle8.v v8, (a1) ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vzext.vf2 v10, v8 @@ -373,7 +373,7 @@ define <4 x i32> @vwmulu_v4i32_v4i8_v4i16(<4 x i8>* %x, <4 x i16>* %y) { ; CHECK-LABEL: vwmulu_v4i32_v4i8_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vzext.vf2 v10, v8 @@ -390,7 +390,7 @@ define <4 x i64> @vwmulu_v4i64_v4i32_v4i8(<4 x i32>* %x, <4 x i8>* %y) { ; CHECK-LABEL: vwmulu_v4i64_v4i32_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a1) ; CHECK-NEXT: vle32.v v10, (a0) ; CHECK-NEXT: vzext.vf4 v11, v8 @@ -407,7 +407,7 @@ define <2 x i16> @vwmulu_vx_v2i16(<2 x i8>* %x, i8 %y) { ; CHECK-LABEL: vwmulu_vx_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vwmulu.vx v8, v9, a1 ; CHECK-NEXT: ret @@ -423,7 +423,7 @@ define <4 x i16> @vwmulu_vx_v4i16(<4 x i8>* %x, i8 %y) { ; CHECK-LABEL: vwmulu_vx_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vwmulu.vx v8, v9, a1 ; CHECK-NEXT: ret @@ -439,7 +439,7 @@ define <2 x i32> @vwmulu_vx_v2i32(<2 x i16>* %x, i16 %y) { ; CHECK-LABEL: vwmulu_vx_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vwmulu.vx v8, v9, a1 ; CHECK-NEXT: ret @@ -455,7 +455,7 @@ define <8 x i16> @vwmulu_vx_v8i16(<8 x i8>* %x, i8 %y) { ; CHECK-LABEL: vwmulu_vx_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vwmulu.vx v8, v9, a1 ; CHECK-NEXT: ret @@ -471,7 +471,7 @@ define <4 x i32> @vwmulu_vx_v4i32(<4 x i16>* %x, i16 %y) { ; CHECK-LABEL: vwmulu_vx_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vwmulu.vx v8, v9, a1 ; CHECK-NEXT: ret @@ -487,7 +487,7 @@ define <2 x i64> @vwmulu_vx_v2i64(<2 x i32>* %x, i32 %y) { ; CHECK-LABEL: vwmulu_vx_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vwmulu.vx v8, v9, a1 ; CHECK-NEXT: ret @@ -503,7 +503,7 @@ define <16 x i16> @vwmulu_vx_v16i16(<16 x i8>* %x, i8 %y) { ; CHECK-LABEL: vwmulu_vx_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v10, (a0) ; CHECK-NEXT: vwmulu.vx v8, v10, a1 ; CHECK-NEXT: ret @@ -519,7 +519,7 @@ define <8 x i32> @vwmulu_vx_v8i32(<8 x i16>* %x, i16 %y) { ; CHECK-LABEL: vwmulu_vx_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v10, (a0) ; CHECK-NEXT: vwmulu.vx v8, v10, a1 ; CHECK-NEXT: ret @@ -535,7 +535,7 @@ define <4 x i64> @vwmulu_vx_v4i64(<4 x i32>* %x, i32 %y) { ; CHECK-LABEL: vwmulu_vx_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v10, (a0) ; CHECK-NEXT: vwmulu.vx v8, v10, a1 ; CHECK-NEXT: ret @@ -552,7 +552,7 @@ ; CHECK-LABEL: vwmulu_vx_v32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vle8.v v12, (a0) ; CHECK-NEXT: vwmulu.vx v8, v12, a1 ; CHECK-NEXT: ret @@ -568,7 +568,7 @@ define <16 x i32> @vwmulu_vx_v16i32(<16 x i16>* %x, i16 %y) { ; CHECK-LABEL: vwmulu_vx_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v12, (a0) ; CHECK-NEXT: vwmulu.vx v8, v12, a1 ; CHECK-NEXT: ret @@ -584,7 +584,7 @@ define <8 x i64> @vwmulu_vx_v8i64(<8 x i32>* %x, i32 %y) { ; CHECK-LABEL: vwmulu_vx_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v12, (a0) ; CHECK-NEXT: vwmulu.vx v8, v12, a1 ; CHECK-NEXT: ret @@ -601,7 +601,7 @@ ; CHECK-LABEL: vwmulu_vx_v64i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 64 -; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vwmulu.vx v8, v16, a1 ; CHECK-NEXT: ret @@ -618,7 +618,7 @@ ; CHECK-LABEL: vwmulu_vx_v32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vwmulu.vx v8, v16, a1 ; CHECK-NEXT: ret @@ -634,7 +634,7 @@ define <16 x i64> @vwmulu_vx_v16i64(<16 x i32>* %x, i32 %y) { ; CHECK-LABEL: vwmulu_vx_v16i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vwmulu.vx v8, v16, a1 ; CHECK-NEXT: ret @@ -650,7 +650,7 @@ define <8 x i16> @vwmulu_vx_v8i16_i8(<8 x i8>* %x, i8* %y) { ; CHECK-LABEL: vwmulu_vx_v8i16_i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: lbu a0, 0(a1) ; CHECK-NEXT: vwmulu.vx v8, v9, a0 @@ -668,7 +668,7 @@ define <8 x i16> @vwmulu_vx_v8i16_i16(<8 x i8>* %x, i16* %y) { ; CHECK-LABEL: vwmulu_vx_v8i16_i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: lh a0, 0(a1) ; CHECK-NEXT: vzext.vf2 v9, v8 @@ -686,7 +686,7 @@ define <4 x i32> @vwmulu_vx_v4i32_i8(<4 x i16>* %x, i8* %y) { ; CHECK-LABEL: vwmulu_vx_v4i32_i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: lbu a0, 0(a1) ; CHECK-NEXT: vwmulu.vx v8, v9, a0 @@ -704,7 +704,7 @@ define <4 x i32> @vwmulu_vx_v4i32_i16(<4 x i16>* %x, i16* %y) { ; CHECK-LABEL: vwmulu_vx_v4i32_i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: lhu a0, 0(a1) ; CHECK-NEXT: vwmulu.vx v8, v9, a0 @@ -722,7 +722,7 @@ define <4 x i32> @vwmulu_vx_v4i32_i32(<4 x i16>* %x, i32* %y) { ; CHECK-LABEL: vwmulu_vx_v4i32_i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: lw a0, 0(a1) ; CHECK-NEXT: vzext.vf2 v9, v8 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsub.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsub.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsub.ll @@ -5,7 +5,7 @@ define <2 x i16> @vwsub_v2i16(<2 x i8>* %x, <2 x i8>* %y) { ; CHECK-LABEL: vwsub_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vle8.v v10, (a1) ; CHECK-NEXT: vwsub.vv v8, v9, v10 @@ -21,7 +21,7 @@ define <4 x i16> @vwsub_v4i16(<4 x i8>* %x, <4 x i8>* %y) { ; CHECK-LABEL: vwsub_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vle8.v v10, (a1) ; CHECK-NEXT: vwsub.vv v8, v9, v10 @@ -37,7 +37,7 @@ define <2 x i32> @vwsub_v2i32(<2 x i16>* %x, <2 x i16>* %y) { ; CHECK-LABEL: vwsub_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vle16.v v10, (a1) ; CHECK-NEXT: vwsub.vv v8, v9, v10 @@ -53,7 +53,7 @@ define <8 x i16> @vwsub_v8i16(<8 x i8>* %x, <8 x i8>* %y) { ; CHECK-LABEL: vwsub_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vle8.v v10, (a1) ; CHECK-NEXT: vwsub.vv v8, v9, v10 @@ -69,7 +69,7 @@ define <4 x i32> @vwsub_v4i32(<4 x i16>* %x, <4 x i16>* %y) { ; CHECK-LABEL: vwsub_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vle16.v v10, (a1) ; CHECK-NEXT: vwsub.vv v8, v9, v10 @@ -85,7 +85,7 @@ define <2 x i64> @vwsub_v2i64(<2 x i32>* %x, <2 x i32>* %y) { ; CHECK-LABEL: vwsub_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vle32.v v10, (a1) ; CHECK-NEXT: vwsub.vv v8, v9, v10 @@ -101,7 +101,7 @@ define <16 x i16> @vwsub_v16i16(<16 x i8>* %x, <16 x i8>* %y) { ; CHECK-LABEL: vwsub_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v10, (a0) ; CHECK-NEXT: vle8.v v11, (a1) ; CHECK-NEXT: vwsub.vv v8, v10, v11 @@ -117,7 +117,7 @@ define <8 x i32> @vwsub_v8i32(<8 x i16>* %x, <8 x i16>* %y) { ; CHECK-LABEL: vwsub_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v10, (a0) ; CHECK-NEXT: vle16.v v11, (a1) ; CHECK-NEXT: vwsub.vv v8, v10, v11 @@ -133,7 +133,7 @@ define <4 x i64> @vwsub_v4i64(<4 x i32>* %x, <4 x i32>* %y) { ; CHECK-LABEL: vwsub_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v10, (a0) ; CHECK-NEXT: vle32.v v11, (a1) ; CHECK-NEXT: vwsub.vv v8, v10, v11 @@ -150,7 +150,7 @@ ; CHECK-LABEL: vwsub_v32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vle8.v v12, (a0) ; CHECK-NEXT: vle8.v v14, (a1) ; CHECK-NEXT: vwsub.vv v8, v12, v14 @@ -166,7 +166,7 @@ define <16 x i32> @vwsub_v16i32(<16 x i16>* %x, <16 x i16>* %y) { ; CHECK-LABEL: vwsub_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v12, (a0) ; CHECK-NEXT: vle16.v v14, (a1) ; CHECK-NEXT: vwsub.vv v8, v12, v14 @@ -182,7 +182,7 @@ define <8 x i64> @vwsub_v8i64(<8 x i32>* %x, <8 x i32>* %y) { ; CHECK-LABEL: vwsub_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v12, (a0) ; CHECK-NEXT: vle32.v v14, (a1) ; CHECK-NEXT: vwsub.vv v8, v12, v14 @@ -199,7 +199,7 @@ ; CHECK-LABEL: vwsub_v64i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 64 -; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vle8.v v20, (a1) ; CHECK-NEXT: vwsub.vv v8, v16, v20 @@ -216,7 +216,7 @@ ; CHECK-LABEL: vwsub_v32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vle16.v v20, (a1) ; CHECK-NEXT: vwsub.vv v8, v16, v20 @@ -232,7 +232,7 @@ define <16 x i64> @vwsub_v16i64(<16 x i32>* %x, <16 x i32>* %y) { ; CHECK-LABEL: vwsub_v16i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vle32.v v20, (a1) ; CHECK-NEXT: vwsub.vv v8, v16, v20 @@ -253,16 +253,16 @@ ; CHECK-NEXT: slli a2, a2, 3 ; CHECK-NEXT: sub sp, sp, a2 ; CHECK-NEXT: li a2, 128 -; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vle8.v v24, (a1) ; CHECK-NEXT: li a0, 64 -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v16, a0 ; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: vslidedown.vx v0, v24, a0 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwsub.vv v8, v16, v24 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload @@ -288,16 +288,16 @@ ; CHECK-NEXT: slli a2, a2, 3 ; CHECK-NEXT: sub sp, sp, a2 ; CHECK-NEXT: li a2, 64 -; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vle16.v v24, (a1) ; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v16, a0 ; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: vslidedown.vx v0, v24, a0 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vwsub.vv v8, v16, v24 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload @@ -323,15 +323,15 @@ ; CHECK-NEXT: slli a2, a2, 3 ; CHECK-NEXT: sub sp, sp, a2 ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vle32.v v24, (a1) -; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v16, 16 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill ; CHECK-NEXT: vslidedown.vi v0, v24, 16 -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vwsub.vv v8, v16, v24 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload @@ -352,7 +352,7 @@ define <2 x i32> @vwsub_v2i32_v2i8(<2 x i8>* %x, <2 x i8>* %y) { ; CHECK-LABEL: vwsub_v2i32_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle8.v v8, (a1) ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vsext.vf2 v10, v8 @@ -370,7 +370,7 @@ define <4 x i32> @vwsub_v4i32_v4i8_v4i16(<4 x i8>* %x, <4 x i16>* %y) { ; CHECK-LABEL: vwsub_v4i32_v4i8_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vsext.vf2 v10, v8 @@ -387,7 +387,7 @@ define <4 x i64> @vwsub_v4i64_v4i32_v4i8(<4 x i32>* %x, <4 x i8>* %y) { ; CHECK-LABEL: vwsub_v4i64_v4i32_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a1) ; CHECK-NEXT: vle32.v v10, (a0) ; CHECK-NEXT: vsext.vf4 v11, v8 @@ -404,7 +404,7 @@ define <2 x i16> @vwsub_vx_v2i16(<2 x i8>* %x, i8 %y) { ; CHECK-LABEL: vwsub_vx_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vwsub.vx v8, v9, a1 ; CHECK-NEXT: ret @@ -420,7 +420,7 @@ define <4 x i16> @vwsub_vx_v4i16(<4 x i8>* %x, i8 %y) { ; CHECK-LABEL: vwsub_vx_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vwsub.vx v8, v9, a1 ; CHECK-NEXT: ret @@ -436,7 +436,7 @@ define <2 x i32> @vwsub_vx_v2i32(<2 x i16>* %x, i16 %y) { ; CHECK-LABEL: vwsub_vx_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vwsub.vx v8, v9, a1 ; CHECK-NEXT: ret @@ -452,7 +452,7 @@ define <8 x i16> @vwsub_vx_v8i16(<8 x i8>* %x, i8 %y) { ; CHECK-LABEL: vwsub_vx_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vwsub.vx v8, v9, a1 ; CHECK-NEXT: ret @@ -468,7 +468,7 @@ define <4 x i32> @vwsub_vx_v4i32(<4 x i16>* %x, i16 %y) { ; CHECK-LABEL: vwsub_vx_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vwsub.vx v8, v9, a1 ; CHECK-NEXT: ret @@ -484,7 +484,7 @@ define <2 x i64> @vwsub_vx_v2i64(<2 x i32>* %x, i32 %y) { ; CHECK-LABEL: vwsub_vx_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vwsub.vx v8, v9, a1 ; CHECK-NEXT: ret @@ -500,7 +500,7 @@ define <16 x i16> @vwsub_vx_v16i16(<16 x i8>* %x, i8 %y) { ; CHECK-LABEL: vwsub_vx_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v10, (a0) ; CHECK-NEXT: vwsub.vx v8, v10, a1 ; CHECK-NEXT: ret @@ -516,7 +516,7 @@ define <8 x i32> @vwsub_vx_v8i32(<8 x i16>* %x, i16 %y) { ; CHECK-LABEL: vwsub_vx_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v10, (a0) ; CHECK-NEXT: vwsub.vx v8, v10, a1 ; CHECK-NEXT: ret @@ -532,7 +532,7 @@ define <4 x i64> @vwsub_vx_v4i64(<4 x i32>* %x, i32 %y) { ; CHECK-LABEL: vwsub_vx_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v10, (a0) ; CHECK-NEXT: vwsub.vx v8, v10, a1 ; CHECK-NEXT: ret @@ -549,7 +549,7 @@ ; CHECK-LABEL: vwsub_vx_v32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vle8.v v12, (a0) ; CHECK-NEXT: vwsub.vx v8, v12, a1 ; CHECK-NEXT: ret @@ -565,7 +565,7 @@ define <16 x i32> @vwsub_vx_v16i32(<16 x i16>* %x, i16 %y) { ; CHECK-LABEL: vwsub_vx_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v12, (a0) ; CHECK-NEXT: vwsub.vx v8, v12, a1 ; CHECK-NEXT: ret @@ -581,7 +581,7 @@ define <8 x i64> @vwsub_vx_v8i64(<8 x i32>* %x, i32 %y) { ; CHECK-LABEL: vwsub_vx_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v12, (a0) ; CHECK-NEXT: vwsub.vx v8, v12, a1 ; CHECK-NEXT: ret @@ -598,7 +598,7 @@ ; CHECK-LABEL: vwsub_vx_v64i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 64 -; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vwsub.vx v8, v16, a1 ; CHECK-NEXT: ret @@ -615,7 +615,7 @@ ; CHECK-LABEL: vwsub_vx_v32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vwsub.vx v8, v16, a1 ; CHECK-NEXT: ret @@ -631,7 +631,7 @@ define <16 x i64> @vwsub_vx_v16i64(<16 x i32>* %x, i32 %y) { ; CHECK-LABEL: vwsub_vx_v16i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vwsub.vx v8, v16, a1 ; CHECK-NEXT: ret @@ -647,7 +647,7 @@ define <8 x i16> @vwsub_vx_v8i16_i8(<8 x i8>* %x, i8* %y) { ; CHECK-LABEL: vwsub_vx_v8i16_i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vlse8.v v10, (a1), zero ; CHECK-NEXT: vwsub.vv v8, v10, v9 @@ -665,7 +665,7 @@ define <8 x i16> @vwsub_vx_v8i16_i16(<8 x i8>* %x, i16* %y) { ; CHECK-LABEL: vwsub_vx_v8i16_i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vlse16.v v8, (a1), zero ; CHECK-NEXT: vwsub.wv v8, v8, v9 @@ -682,7 +682,7 @@ define <4 x i32> @vwsub_vx_v4i32_i8(<4 x i16>* %x, i8* %y) { ; CHECK-LABEL: vwsub_vx_v4i32_i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: lb a1, 0(a1) ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vmv.v.x v10, a1 @@ -701,7 +701,7 @@ define <4 x i32> @vwsub_vx_v4i32_i16(<4 x i16>* %x, i16* %y) { ; CHECK-LABEL: vwsub_vx_v4i32_i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vlse16.v v10, (a1), zero ; CHECK-NEXT: vwsub.vv v8, v10, v9 @@ -719,7 +719,7 @@ define <4 x i32> @vwsub_vx_v4i32_i32(<4 x i16>* %x, i32* %y) { ; CHECK-LABEL: vwsub_vx_v4i32_i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vlse32.v v8, (a1), zero ; CHECK-NEXT: vwsub.wv v8, v8, v9 @@ -737,7 +737,7 @@ ; RV32-LABEL: vwsub_vx_v2i64_i8: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 -; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV32-NEXT: lb a1, 0(a1) ; RV32-NEXT: vle32.v v9, (a0) ; RV32-NEXT: srai a0, a1, 31 @@ -751,7 +751,7 @@ ; ; RV64-LABEL: vwsub_vx_v2i64_i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-NEXT: lb a1, 0(a1) ; RV64-NEXT: vle32.v v9, (a0) ; RV64-NEXT: vmv.v.x v10, a1 @@ -771,7 +771,7 @@ ; RV32-LABEL: vwsub_vx_v2i64_i16: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 -; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV32-NEXT: lh a1, 0(a1) ; RV32-NEXT: vle32.v v9, (a0) ; RV32-NEXT: srai a0, a1, 31 @@ -785,7 +785,7 @@ ; ; RV64-LABEL: vwsub_vx_v2i64_i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-NEXT: lh a1, 0(a1) ; RV64-NEXT: vle32.v v9, (a0) ; RV64-NEXT: vmv.v.x v10, a1 @@ -805,7 +805,7 @@ ; RV32-LABEL: vwsub_vx_v2i64_i32: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 -; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV32-NEXT: lw a1, 0(a1) ; RV32-NEXT: vle32.v v9, (a0) ; RV32-NEXT: srai a0, a1, 31 @@ -819,7 +819,7 @@ ; ; RV64-LABEL: vwsub_vx_v2i64_i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-NEXT: vle32.v v9, (a0) ; RV64-NEXT: vlse32.v v10, (a1), zero ; RV64-NEXT: vwsub.vv v8, v10, v9 @@ -838,7 +838,7 @@ ; RV32-LABEL: vwsub_vx_v2i64_i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 -; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV32-NEXT: lw a2, 4(a1) ; RV32-NEXT: lw a1, 0(a1) ; RV32-NEXT: vle32.v v9, (a0) @@ -852,7 +852,7 @@ ; ; RV64-LABEL: vwsub_vx_v2i64_i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-NEXT: vle32.v v9, (a0) ; RV64-NEXT: vlse64.v v8, (a1), zero ; RV64-NEXT: vwsub.wv v8, v8, v9 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll @@ -5,7 +5,7 @@ define <2 x i16> @vwsubu_v2i16(<2 x i8>* %x, <2 x i8>* %y) { ; CHECK-LABEL: vwsubu_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vle8.v v10, (a1) ; CHECK-NEXT: vwsubu.vv v8, v9, v10 @@ -21,7 +21,7 @@ define <4 x i16> @vwsubu_v4i16(<4 x i8>* %x, <4 x i8>* %y) { ; CHECK-LABEL: vwsubu_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vle8.v v10, (a1) ; CHECK-NEXT: vwsubu.vv v8, v9, v10 @@ -37,7 +37,7 @@ define <2 x i32> @vwsubu_v2i32(<2 x i16>* %x, <2 x i16>* %y) { ; CHECK-LABEL: vwsubu_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vle16.v v10, (a1) ; CHECK-NEXT: vwsubu.vv v8, v9, v10 @@ -53,7 +53,7 @@ define <8 x i16> @vwsubu_v8i16(<8 x i8>* %x, <8 x i8>* %y) { ; CHECK-LABEL: vwsubu_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vle8.v v10, (a1) ; CHECK-NEXT: vwsubu.vv v8, v9, v10 @@ -69,7 +69,7 @@ define <4 x i32> @vwsubu_v4i32(<4 x i16>* %x, <4 x i16>* %y) { ; CHECK-LABEL: vwsubu_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vle16.v v10, (a1) ; CHECK-NEXT: vwsubu.vv v8, v9, v10 @@ -85,7 +85,7 @@ define <2 x i64> @vwsubu_v2i64(<2 x i32>* %x, <2 x i32>* %y) { ; CHECK-LABEL: vwsubu_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vle32.v v10, (a1) ; CHECK-NEXT: vwsubu.vv v8, v9, v10 @@ -101,7 +101,7 @@ define <16 x i16> @vwsubu_v16i16(<16 x i8>* %x, <16 x i8>* %y) { ; CHECK-LABEL: vwsubu_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v10, (a0) ; CHECK-NEXT: vle8.v v11, (a1) ; CHECK-NEXT: vwsubu.vv v8, v10, v11 @@ -117,7 +117,7 @@ define <8 x i32> @vwsubu_v8i32(<8 x i16>* %x, <8 x i16>* %y) { ; CHECK-LABEL: vwsubu_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v10, (a0) ; CHECK-NEXT: vle16.v v11, (a1) ; CHECK-NEXT: vwsubu.vv v8, v10, v11 @@ -133,7 +133,7 @@ define <4 x i64> @vwsubu_v4i64(<4 x i32>* %x, <4 x i32>* %y) { ; CHECK-LABEL: vwsubu_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v10, (a0) ; CHECK-NEXT: vle32.v v11, (a1) ; CHECK-NEXT: vwsubu.vv v8, v10, v11 @@ -150,7 +150,7 @@ ; CHECK-LABEL: vwsubu_v32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vle8.v v12, (a0) ; CHECK-NEXT: vle8.v v14, (a1) ; CHECK-NEXT: vwsubu.vv v8, v12, v14 @@ -166,7 +166,7 @@ define <16 x i32> @vwsubu_v16i32(<16 x i16>* %x, <16 x i16>* %y) { ; CHECK-LABEL: vwsubu_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v12, (a0) ; CHECK-NEXT: vle16.v v14, (a1) ; CHECK-NEXT: vwsubu.vv v8, v12, v14 @@ -182,7 +182,7 @@ define <8 x i64> @vwsubu_v8i64(<8 x i32>* %x, <8 x i32>* %y) { ; CHECK-LABEL: vwsubu_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v12, (a0) ; CHECK-NEXT: vle32.v v14, (a1) ; CHECK-NEXT: vwsubu.vv v8, v12, v14 @@ -199,7 +199,7 @@ ; CHECK-LABEL: vwsubu_v64i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 64 -; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vle8.v v20, (a1) ; CHECK-NEXT: vwsubu.vv v8, v16, v20 @@ -216,7 +216,7 @@ ; CHECK-LABEL: vwsubu_v32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vle16.v v20, (a1) ; CHECK-NEXT: vwsubu.vv v8, v16, v20 @@ -232,7 +232,7 @@ define <16 x i64> @vwsubu_v16i64(<16 x i32>* %x, <16 x i32>* %y) { ; CHECK-LABEL: vwsubu_v16i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vle32.v v20, (a1) ; CHECK-NEXT: vwsubu.vv v8, v16, v20 @@ -253,16 +253,16 @@ ; CHECK-NEXT: slli a2, a2, 3 ; CHECK-NEXT: sub sp, sp, a2 ; CHECK-NEXT: li a2, 128 -; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vle8.v v24, (a1) ; CHECK-NEXT: li a0, 64 -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v16, a0 ; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: vslidedown.vx v0, v24, a0 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwsubu.vv v8, v16, v24 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload @@ -288,16 +288,16 @@ ; CHECK-NEXT: slli a2, a2, 3 ; CHECK-NEXT: sub sp, sp, a2 ; CHECK-NEXT: li a2, 64 -; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vle16.v v24, (a1) ; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v16, a0 ; CHECK-NEXT: addi a1, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: vslidedown.vx v0, v24, a0 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vwsubu.vv v8, v16, v24 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload @@ -323,15 +323,15 @@ ; CHECK-NEXT: slli a2, a2, 3 ; CHECK-NEXT: sub sp, sp, a2 ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vle32.v v24, (a1) -; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v16, 16 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill ; CHECK-NEXT: vslidedown.vi v0, v24, 16 -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vwsubu.vv v8, v16, v24 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload @@ -352,7 +352,7 @@ define <2 x i32> @vwsubu_v2i32_v2i8(<2 x i8>* %x, <2 x i8>* %y) { ; CHECK-LABEL: vwsubu_v2i32_v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle8.v v8, (a1) ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vzext.vf2 v10, v8 @@ -370,7 +370,7 @@ define <4 x i32> @vwsubu_v4i32_v4i8_v4i16(<4 x i8>* %x, <4 x i16>* %y) { ; CHECK-LABEL: vwsubu_v4i32_v4i8_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vzext.vf2 v10, v8 @@ -387,7 +387,7 @@ define <4 x i64> @vwsubu_v4i64_v4i32_v4i8(<4 x i32>* %x, <4 x i8>* %y) { ; CHECK-LABEL: vwsubu_v4i64_v4i32_v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a1) ; CHECK-NEXT: vle32.v v10, (a0) ; CHECK-NEXT: vzext.vf4 v11, v8 @@ -404,7 +404,7 @@ define <2 x i16> @vwsubu_vx_v2i16(<2 x i8>* %x, i8 %y) { ; CHECK-LABEL: vwsubu_vx_v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vwsubu.vx v8, v9, a1 ; CHECK-NEXT: ret @@ -420,7 +420,7 @@ define <4 x i16> @vwsubu_vx_v4i16(<4 x i8>* %x, i8 %y) { ; CHECK-LABEL: vwsubu_vx_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vwsubu.vx v8, v9, a1 ; CHECK-NEXT: ret @@ -436,7 +436,7 @@ define <2 x i32> @vwsubu_vx_v2i32(<2 x i16>* %x, i16 %y) { ; CHECK-LABEL: vwsubu_vx_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vwsubu.vx v8, v9, a1 ; CHECK-NEXT: ret @@ -452,7 +452,7 @@ define <8 x i16> @vwsubu_vx_v8i16(<8 x i8>* %x, i8 %y) { ; CHECK-LABEL: vwsubu_vx_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vwsubu.vx v8, v9, a1 ; CHECK-NEXT: ret @@ -468,7 +468,7 @@ define <4 x i32> @vwsubu_vx_v4i32(<4 x i16>* %x, i16 %y) { ; CHECK-LABEL: vwsubu_vx_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vwsubu.vx v8, v9, a1 ; CHECK-NEXT: ret @@ -484,7 +484,7 @@ define <2 x i64> @vwsubu_vx_v2i64(<2 x i32>* %x, i32 %y) { ; CHECK-LABEL: vwsubu_vx_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vwsubu.vx v8, v9, a1 ; CHECK-NEXT: ret @@ -500,7 +500,7 @@ define <16 x i16> @vwsubu_vx_v16i16(<16 x i8>* %x, i8 %y) { ; CHECK-LABEL: vwsubu_vx_v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v10, (a0) ; CHECK-NEXT: vwsubu.vx v8, v10, a1 ; CHECK-NEXT: ret @@ -516,7 +516,7 @@ define <8 x i32> @vwsubu_vx_v8i32(<8 x i16>* %x, i16 %y) { ; CHECK-LABEL: vwsubu_vx_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v10, (a0) ; CHECK-NEXT: vwsubu.vx v8, v10, a1 ; CHECK-NEXT: ret @@ -532,7 +532,7 @@ define <4 x i64> @vwsubu_vx_v4i64(<4 x i32>* %x, i32 %y) { ; CHECK-LABEL: vwsubu_vx_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v10, (a0) ; CHECK-NEXT: vwsubu.vx v8, v10, a1 ; CHECK-NEXT: ret @@ -549,7 +549,7 @@ ; CHECK-LABEL: vwsubu_vx_v32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vle8.v v12, (a0) ; CHECK-NEXT: vwsubu.vx v8, v12, a1 ; CHECK-NEXT: ret @@ -565,7 +565,7 @@ define <16 x i32> @vwsubu_vx_v16i32(<16 x i16>* %x, i16 %y) { ; CHECK-LABEL: vwsubu_vx_v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v12, (a0) ; CHECK-NEXT: vwsubu.vx v8, v12, a1 ; CHECK-NEXT: ret @@ -581,7 +581,7 @@ define <8 x i64> @vwsubu_vx_v8i64(<8 x i32>* %x, i32 %y) { ; CHECK-LABEL: vwsubu_vx_v8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v12, (a0) ; CHECK-NEXT: vwsubu.vx v8, v12, a1 ; CHECK-NEXT: ret @@ -598,7 +598,7 @@ ; CHECK-LABEL: vwsubu_vx_v64i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 64 -; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma ; CHECK-NEXT: vle8.v v16, (a0) ; CHECK-NEXT: vwsubu.vx v8, v16, a1 ; CHECK-NEXT: ret @@ -615,7 +615,7 @@ ; CHECK-LABEL: vwsubu_vx_v32i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vwsubu.vx v8, v16, a1 ; CHECK-NEXT: ret @@ -631,7 +631,7 @@ define <16 x i64> @vwsubu_vx_v16i64(<16 x i32>* %x, i32 %y) { ; CHECK-LABEL: vwsubu_vx_v16i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vwsubu.vx v8, v16, a1 ; CHECK-NEXT: ret @@ -647,7 +647,7 @@ define <8 x i16> @vwsubu_vx_v8i16_i8(<8 x i8>* %x, i8* %y) { ; CHECK-LABEL: vwsubu_vx_v8i16_i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vlse8.v v10, (a1), zero ; CHECK-NEXT: vwsubu.vv v8, v10, v9 @@ -665,7 +665,7 @@ define <8 x i16> @vwsubu_vx_v8i16_i16(<8 x i8>* %x, i16* %y) { ; CHECK-LABEL: vwsubu_vx_v8i16_i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v9, (a0) ; CHECK-NEXT: vlse16.v v8, (a1), zero ; CHECK-NEXT: vwsubu.wv v8, v8, v9 @@ -682,7 +682,7 @@ define <4 x i32> @vwsubu_vx_v4i32_i8(<4 x i16>* %x, i8* %y) { ; CHECK-LABEL: vwsubu_vx_v4i32_i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: lbu a1, 0(a1) ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vmv.v.x v10, a1 @@ -701,7 +701,7 @@ define <4 x i32> @vwsubu_vx_v4i32_i16(<4 x i16>* %x, i16* %y) { ; CHECK-LABEL: vwsubu_vx_v4i32_i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vlse16.v v10, (a1), zero ; CHECK-NEXT: vwsubu.vv v8, v10, v9 @@ -719,7 +719,7 @@ define <4 x i32> @vwsubu_vx_v4i32_i32(<4 x i16>* %x, i32* %y) { ; CHECK-LABEL: vwsubu_vx_v4i32_i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a0) ; CHECK-NEXT: vlse32.v v8, (a1), zero ; CHECK-NEXT: vwsubu.wv v8, v8, v9 @@ -737,7 +737,7 @@ ; RV32-LABEL: vwsubu_vx_v2i64_i8: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 -; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV32-NEXT: lbu a1, 0(a1) ; RV32-NEXT: vle32.v v9, (a0) ; RV32-NEXT: sw zero, 12(sp) @@ -750,7 +750,7 @@ ; ; RV64-LABEL: vwsubu_vx_v2i64_i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-NEXT: lbu a1, 0(a1) ; RV64-NEXT: vle32.v v9, (a0) ; RV64-NEXT: vmv.v.x v10, a1 @@ -770,7 +770,7 @@ ; RV32-LABEL: vwsubu_vx_v2i64_i16: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 -; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV32-NEXT: lhu a1, 0(a1) ; RV32-NEXT: vle32.v v9, (a0) ; RV32-NEXT: sw zero, 12(sp) @@ -783,7 +783,7 @@ ; ; RV64-LABEL: vwsubu_vx_v2i64_i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-NEXT: lhu a1, 0(a1) ; RV64-NEXT: vle32.v v9, (a0) ; RV64-NEXT: vmv.v.x v10, a1 @@ -803,7 +803,7 @@ ; RV32-LABEL: vwsubu_vx_v2i64_i32: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 -; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV32-NEXT: lw a1, 0(a1) ; RV32-NEXT: vle32.v v9, (a0) ; RV32-NEXT: sw zero, 12(sp) @@ -816,7 +816,7 @@ ; ; RV64-LABEL: vwsubu_vx_v2i64_i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-NEXT: vle32.v v9, (a0) ; RV64-NEXT: vlse32.v v10, (a1), zero ; RV64-NEXT: vwsubu.vv v8, v10, v9 @@ -835,7 +835,7 @@ ; RV32-LABEL: vwsubu_vx_v2i64_i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 -; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV32-NEXT: lw a2, 4(a1) ; RV32-NEXT: lw a1, 0(a1) ; RV32-NEXT: vle32.v v9, (a0) @@ -849,7 +849,7 @@ ; ; RV64-LABEL: vwsubu_vx_v2i64_i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-NEXT: vle32.v v9, (a0) ; RV64-NEXT: vlse64.v v8, (a1), zero ; RV64-NEXT: vwsubu.wv v8, v8, v9 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll @@ -31,7 +31,7 @@ define <2 x i8> @vxor_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -67,7 +67,7 @@ define <2 x i8> @vxor_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_v2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0 @@ -93,7 +93,7 @@ define <2 x i8> @vxor_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 7, i32 0 @@ -119,7 +119,7 @@ define <2 x i8> @vxor_vi_v2i8_unmasked_1(<2 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v2i8_unmasked_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i8> poison, i8 -1, i32 0 @@ -145,7 +145,7 @@ define <4 x i8> @vxor_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -169,7 +169,7 @@ define <4 x i8> @vxor_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_v4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0 @@ -195,7 +195,7 @@ define <4 x i8> @vxor_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 7, i32 0 @@ -221,7 +221,7 @@ define <4 x i8> @vxor_vi_v4i8_unmasked_1(<4 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v4i8_unmasked_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i8> poison, i8 -1, i32 0 @@ -247,7 +247,7 @@ define <8 x i8> @vxor_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -271,7 +271,7 @@ define <8 x i8> @vxor_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_v8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 @@ -297,7 +297,7 @@ define <8 x i8> @vxor_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 7, i32 0 @@ -323,7 +323,7 @@ define <8 x i8> @vxor_vi_v8i8_unmasked_1(<8 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v8i8_unmasked_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i8> poison, i8 -1, i32 0 @@ -349,7 +349,7 @@ define <9 x i8> @vxor_vv_v9i8_unmasked(<9 x i8> %va, <9 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v9i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <9 x i1> poison, i1 true, i32 0 @@ -373,7 +373,7 @@ define <9 x i8> @vxor_vx_v9i8_unmasked(<9 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_v9i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <9 x i8> poison, i8 %b, i32 0 @@ -399,7 +399,7 @@ define <9 x i8> @vxor_vi_v9i8_unmasked(<9 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v9i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7 ; CHECK-NEXT: ret %elt.head = insertelement <9 x i8> poison, i8 7, i32 0 @@ -425,7 +425,7 @@ define <9 x i8> @vxor_vi_v9i8_unmasked_1(<9 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v9i8_unmasked_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <9 x i8> poison, i8 -1, i32 0 @@ -451,7 +451,7 @@ define <16 x i8> @vxor_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -475,7 +475,7 @@ define <16 x i8> @vxor_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_v16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0 @@ -501,7 +501,7 @@ define <16 x i8> @vxor_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 7, i32 0 @@ -527,7 +527,7 @@ define <16 x i8> @vxor_vi_v16i8_unmasked_1(<16 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v16i8_unmasked_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i8> poison, i8 -1, i32 0 @@ -553,7 +553,7 @@ define <2 x i16> @vxor_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -577,7 +577,7 @@ define <2 x i16> @vxor_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_v2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0 @@ -603,7 +603,7 @@ define <2 x i16> @vxor_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 7, i32 0 @@ -629,7 +629,7 @@ define <2 x i16> @vxor_vi_v2i16_unmasked_1(<2 x i16> %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v2i16_unmasked_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i16> poison, i16 -1, i32 0 @@ -655,7 +655,7 @@ define <4 x i16> @vxor_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -679,7 +679,7 @@ define <4 x i16> @vxor_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_v4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0 @@ -705,7 +705,7 @@ define <4 x i16> @vxor_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 7, i32 0 @@ -731,7 +731,7 @@ define <4 x i16> @vxor_vi_v4i16_unmasked_1(<4 x i16> %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v4i16_unmasked_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i16> poison, i16 -1, i32 0 @@ -757,7 +757,7 @@ define <8 x i16> @vxor_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -781,7 +781,7 @@ define <8 x i16> @vxor_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_v8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0 @@ -807,7 +807,7 @@ define <8 x i16> @vxor_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 7, i32 0 @@ -833,7 +833,7 @@ define <8 x i16> @vxor_vi_v8i16_unmasked_1(<8 x i16> %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v8i16_unmasked_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i16> poison, i16 -1, i32 0 @@ -859,7 +859,7 @@ define <16 x i16> @vxor_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -883,7 +883,7 @@ define <16 x i16> @vxor_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_v16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0 @@ -909,7 +909,7 @@ define <16 x i16> @vxor_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 7, i32 0 @@ -935,7 +935,7 @@ define <16 x i16> @vxor_vi_v16i16_unmasked_1(<16 x i16> %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v16i16_unmasked_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i16> poison, i16 -1, i32 0 @@ -961,7 +961,7 @@ define <2 x i32> @vxor_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -985,7 +985,7 @@ define <2 x i32> @vxor_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_v2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0 @@ -1011,7 +1011,7 @@ define <2 x i32> @vxor_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 7, i32 0 @@ -1037,7 +1037,7 @@ define <2 x i32> @vxor_vi_v2i32_unmasked_1(<2 x i32> %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v2i32_unmasked_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i32> poison, i32 -1, i32 0 @@ -1063,7 +1063,7 @@ define <4 x i32> @vxor_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -1087,7 +1087,7 @@ define <4 x i32> @vxor_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_v4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0 @@ -1113,7 +1113,7 @@ define <4 x i32> @vxor_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 7, i32 0 @@ -1139,7 +1139,7 @@ define <4 x i32> @vxor_vi_v4i32_unmasked_1(<4 x i32> %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v4i32_unmasked_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i32> poison, i32 -1, i32 0 @@ -1165,7 +1165,7 @@ define <8 x i32> @vxor_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -1189,7 +1189,7 @@ define <8 x i32> @vxor_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_v8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 @@ -1215,7 +1215,7 @@ define <8 x i32> @vxor_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 7, i32 0 @@ -1241,7 +1241,7 @@ define <8 x i32> @vxor_vi_v8i32_unmasked_1(<8 x i32> %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v8i32_unmasked_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i32> poison, i32 -1, i32 0 @@ -1267,7 +1267,7 @@ define <16 x i32> @vxor_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -1291,7 +1291,7 @@ define <16 x i32> @vxor_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_v16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0 @@ -1317,7 +1317,7 @@ define <16 x i32> @vxor_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 7, i32 0 @@ -1343,7 +1343,7 @@ define <16 x i32> @vxor_vi_v16i32_unmasked_1(<16 x i32> %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v16i32_unmasked_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i32> poison, i32 -1, i32 0 @@ -1369,7 +1369,7 @@ define <2 x i64> @vxor_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 @@ -1386,7 +1386,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vxor.vv v8, v8, v9, v0.t @@ -1412,16 +1412,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vxor.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vxor_vx_v2i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vxor.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0 @@ -1447,7 +1447,7 @@ define <2 x i64> @vxor_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 7, i32 0 @@ -1473,7 +1473,7 @@ define <2 x i64> @vxor_vi_v2i64_unmasked_1(<2 x i64> %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v2i64_unmasked_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <2 x i64> poison, i64 -1, i32 0 @@ -1499,7 +1499,7 @@ define <4 x i64> @vxor_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v4i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 @@ -1516,7 +1516,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vxor.vv v8, v8, v10, v0.t @@ -1542,16 +1542,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vxor.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vxor_vx_v4i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vxor.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0 @@ -1577,7 +1577,7 @@ define <4 x i64> @vxor_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v4i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 7, i32 0 @@ -1603,7 +1603,7 @@ define <4 x i64> @vxor_vi_v4i64_unmasked_1(<4 x i64> %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v4i64_unmasked_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <4 x i64> poison, i64 -1, i32 0 @@ -1629,7 +1629,7 @@ define <8 x i64> @vxor_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v8i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 @@ -1646,7 +1646,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vxor.vv v8, v8, v12, v0.t @@ -1672,16 +1672,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vxor.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vxor_vx_v8i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vxor.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 @@ -1707,7 +1707,7 @@ define <8 x i64> @vxor_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v8i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 7, i32 0 @@ -1733,7 +1733,7 @@ define <8 x i64> @vxor_vi_v8i64_unmasked_1(<8 x i64> %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v8i64_unmasked_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <8 x i64> poison, i64 -1, i32 0 @@ -1759,7 +1759,7 @@ define <16 x i64> @vxor_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_v16i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 @@ -1776,7 +1776,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vxor.vv v8, v8, v16, v0.t @@ -1802,16 +1802,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vxor.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vxor_vx_v16i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vxor.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0 @@ -1837,7 +1837,7 @@ define <16 x i64> @vxor_vi_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v16i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 7, i32 0 @@ -1863,7 +1863,7 @@ define <16 x i64> @vxor_vi_v16i64_unmasked_1(<16 x i64> %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_v16i64_unmasked_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement <16 x i64> poison, i64 -1, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp-mask.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp-mask.ll @@ -9,7 +9,7 @@ define <4 x i16> @vzext_v4i16_v4i1(<4 x i1> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vzext_v4i16_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -20,7 +20,7 @@ define <4 x i16> @vzext_v4i16_v4i1_unmasked(<4 x i1> %va, i32 zeroext %evl) { ; CHECK-LABEL: vzext_v4i16_v4i1_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -33,7 +33,7 @@ define <4 x i32> @vzext_v4i32_v4i1(<4 x i1> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vzext_v4i32_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -44,7 +44,7 @@ define <4 x i32> @vzext_v4i32_v4i1_unmasked(<4 x i1> %va, i32 zeroext %evl) { ; CHECK-LABEL: vzext_v4i32_v4i1_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -57,7 +57,7 @@ define <4 x i64> @vzext_v4i64_v4i1(<4 x i1> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vzext_v4i64_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -68,7 +68,7 @@ define <4 x i64> @vzext_v4i64_v4i1_unmasked(<4 x i1> %va, i32 zeroext %evl) { ; CHECK-LABEL: vzext_v4i64_v4i1_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp.ll @@ -20,7 +20,7 @@ define <4 x i16> @vzext_v4i16_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vzext_v4i16_v4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vzext.vf2 v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -44,7 +44,7 @@ define <4 x i32> @vzext_v4i32_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vzext_v4i32_v4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vzext.vf4 v9, v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -68,7 +68,7 @@ define <4 x i64> @vzext_v4i64_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vzext_v4i64_v4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vzext.vf8 v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -92,7 +92,7 @@ define <4 x i32> @vzext_v4i32_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ; CHECK-LABEL: vzext_v4i32_v4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vzext.vf2 v9, v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -116,7 +116,7 @@ define <4 x i64> @vzext_v4i64_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ; CHECK-LABEL: vzext_v4i64_v4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vzext.vf4 v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -140,7 +140,7 @@ define <4 x i64> @vzext_v4i64_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ; CHECK-LABEL: vzext_v4i64_v4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -155,14 +155,14 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v1, v0 ; CHECK-NEXT: li a1, 0 -; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; CHECK-NEXT: addi a2, a0, -16 ; CHECK-NEXT: vslidedown.vi v0, v0, 2 ; CHECK-NEXT: bltu a0, a2, .LBB12_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a1, a2 ; CHECK-NEXT: .LBB12_2: -; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v24, v8, 16 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: li a1, 16 @@ -189,16 +189,16 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a1, a2 ; CHECK-NEXT: .LBB13_2: -; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v24, v8, 16 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: li a1, 16 ; CHECK-NEXT: vzext.vf2 v16, v24 ; CHECK-NEXT: bltu a0, a1, .LBB13_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: li a0, 16 ; CHECK-NEXT: .LBB13_4: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vzext.vf2 v24, v8 ; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll @@ -674,7 +674,7 @@ ; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a4, a1, 3 -; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma ; CHECK-NEXT: sub a3, a0, a1 ; CHECK-NEXT: vslidedown.vx v2, v0, a4 ; CHECK-NEXT: bltu a0, a3, .LBB32_2 diff --git a/llvm/test/CodeGen/RISCV/rvv/fold-binary-reduce.ll b/llvm/test/CodeGen/RISCV/rvv/fold-binary-reduce.ll --- a/llvm/test/CodeGen/RISCV/rvv/fold-binary-reduce.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fold-binary-reduce.ll @@ -4,9 +4,9 @@ define i64 @reduce_add(i64 %x, <4 x i64> %v) { ; CHECK-LABEL: reduce_add: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vredsum.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -19,9 +19,9 @@ define i64 @reduce_add2(<4 x i64> %v) { ; CHECK-LABEL: reduce_add2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vmv.v.i v10, 8 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vredsum.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -34,9 +34,9 @@ define i64 @reduce_and(i64 %x, <4 x i64> %v) { ; CHECK-LABEL: reduce_and: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vredand.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -49,9 +49,9 @@ define i64 @reduce_and2(<4 x i64> %v) { ; CHECK-LABEL: reduce_and2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vmv.v.i v10, 8 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vredand.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -64,9 +64,9 @@ define i64 @reduce_or(i64 %x, <4 x i64> %v) { ; CHECK-LABEL: reduce_or: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vredor.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -79,9 +79,9 @@ define i64 @reduce_or2(<4 x i64> %v) { ; CHECK-LABEL: reduce_or2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vmv.v.i v10, 8 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vredor.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -94,9 +94,9 @@ define i64 @reduce_xor(i64 %x, <4 x i64> %v) { ; CHECK-LABEL: reduce_xor: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vredxor.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -109,9 +109,9 @@ define i64 @reduce_xor2(<4 x i64> %v) { ; CHECK-LABEL: reduce_xor2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, zero -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vredxor.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: andi a0, a0, 8 @@ -125,9 +125,9 @@ define i64 @reduce_umax(i64 %x, <4 x i64> %v) { ; CHECK-LABEL: reduce_umax: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -140,9 +140,9 @@ define i64 @reduce_umax2(<4 x i64> %v) { ; CHECK-LABEL: reduce_umax2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vmv.v.i v10, 8 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -155,9 +155,9 @@ define i64 @reduce_umin(i64 %x, <4 x i64> %v) { ; CHECK-LABEL: reduce_umin: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vredminu.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -170,9 +170,9 @@ define i64 @reduce_umin2(<4 x i64> %v) { ; CHECK-LABEL: reduce_umin2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vmv.v.i v10, 8 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vredminu.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -185,9 +185,9 @@ define i64 @reduce_smax(i64 %x, <4 x i64> %v) { ; CHECK-LABEL: reduce_smax: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vredmax.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -200,9 +200,9 @@ define i64 @reduce_smax2(<4 x i64> %v) { ; CHECK-LABEL: reduce_smax2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vmv.v.i v10, 8 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vredmax.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -215,9 +215,9 @@ define i64 @reduce_smin(i64 %x, <4 x i64> %v) { ; CHECK-LABEL: reduce_smin: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vredmin.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -230,9 +230,9 @@ define i64 @reduce_smin2(<4 x i64> %v) { ; CHECK-LABEL: reduce_smin2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vmv.v.i v10, 8 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vredmin.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -245,9 +245,9 @@ define float @reduce_fadd(float %x, <4 x float> %v) { ; CHECK-LABEL: reduce_fadd: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -259,9 +259,9 @@ define float @reduce_fadd2(float %x, <4 x float> %v) { ; CHECK-LABEL: reduce_fadd2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -274,9 +274,9 @@ define float @reduce_fmax(float %x, <4 x float> %v) { ; CHECK-LABEL: reduce_fmax: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -289,9 +289,9 @@ define float @reduce_fmin(float %x, <4 x float> %v) { ; CHECK-LABEL: reduce_fmin: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fptosi-sat.ll b/llvm/test/CodeGen/RISCV/rvv/fptosi-sat.ll --- a/llvm/test/CodeGen/RISCV/rvv/fptosi-sat.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fptosi-sat.ll @@ -17,7 +17,7 @@ define @test_signed_v2f32_v2i32( %f) { ; CHECK-LABEL: test_signed_v2f32_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v8 ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 @@ -29,7 +29,7 @@ define @test_signed_v4f32_v4i32( %f) { ; CHECK-LABEL: test_signed_v4f32_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v8 ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 @@ -41,7 +41,7 @@ define @test_signed_v8f32_v8i32( %f) { ; CHECK-LABEL: test_signed_v8f32_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v8 ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 @@ -53,9 +53,9 @@ define @test_signed_v4f32_v4i16( %f) { ; CHECK-LABEL: test_signed_v4f32_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 ; CHECK-NEXT: vmerge.vim v8, v10, 0, v0 ; CHECK-NEXT: ret @@ -66,9 +66,9 @@ define @test_signed_v8f32_v8i16( %f) { ; CHECK-LABEL: test_signed_v8f32_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 ; CHECK-NEXT: vmerge.vim v8, v12, 0, v0 ; CHECK-NEXT: ret @@ -79,10 +79,10 @@ define @test_signed_v2f32_v2i64( %f) { ; CHECK-LABEL: test_signed_v2f32_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v8 ; CHECK-NEXT: vfwcvt.rtz.x.f.v v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-NEXT: vmerge.vim v8, v10, 0, v0 ; CHECK-NEXT: ret %x = call @llvm.fptosi.sat.nxv2f32.nxv2i64( %f) @@ -92,10 +92,10 @@ define @test_signed_v4f32_v4i64( %f) { ; CHECK-LABEL: test_signed_v4f32_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v8 ; CHECK-NEXT: vfwcvt.rtz.x.f.v v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; CHECK-NEXT: vmerge.vim v8, v12, 0, v0 ; CHECK-NEXT: ret %x = call @llvm.fptosi.sat.nxv4f32.nxv4i64( %f) @@ -115,9 +115,9 @@ define @test_signed_v2f64_v2i32( %f) { ; CHECK-LABEL: test_signed_v2f64_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 ; CHECK-NEXT: vmerge.vim v8, v10, 0, v0 ; CHECK-NEXT: ret @@ -128,9 +128,9 @@ define @test_signed_v4f64_v4i32( %f) { ; CHECK-LABEL: test_signed_v4f64_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 ; CHECK-NEXT: vmerge.vim v8, v12, 0, v0 ; CHECK-NEXT: ret @@ -141,9 +141,9 @@ define @test_signed_v8f64_v8i32( %f) { ; CHECK-LABEL: test_signed_v8f64_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 ; CHECK-NEXT: vmerge.vim v8, v16, 0, v0 ; CHECK-NEXT: ret @@ -158,16 +158,16 @@ ; CHECK-NEXT: fld ft0, %lo(.LCPI10_0)(a0) ; CHECK-NEXT: lui a0, %hi(.LCPI10_1) ; CHECK-NEXT: fld ft1, %lo(.LCPI10_1)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vfmax.vf v12, v8, ft0 ; CHECK-NEXT: vfmin.vf v12, v12, ft1 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v12 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-NEXT: vnsrl.wi v12, v16, 0 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-NEXT: vmerge.vim v8, v12, 0, v0 ; CHECK-NEXT: ret %x = call @llvm.fptosi.sat.nxv4f64.nxv4i16( %f) @@ -181,16 +181,16 @@ ; CHECK-NEXT: fld ft0, %lo(.LCPI11_0)(a0) ; CHECK-NEXT: lui a0, %hi(.LCPI11_1) ; CHECK-NEXT: fld ft1, %lo(.LCPI11_1)(a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfmax.vf v16, v8, ft0 ; CHECK-NEXT: vfmin.vf v16, v16, ft1 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v24, v16 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vnsrl.wi v16, v24, 0 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vmerge.vim v8, v16, 0, v0 ; CHECK-NEXT: ret %x = call @llvm.fptosi.sat.nxv8f64.nxv8i16( %f) @@ -200,7 +200,7 @@ define @test_signed_v2f64_v2i64( %f) { ; CHECK-LABEL: test_signed_v2f64_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v8 ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 @@ -212,7 +212,7 @@ define @test_signed_v4f64_v4i64( %f) { ; CHECK-LABEL: test_signed_v4f64_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v8 ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 @@ -235,10 +235,10 @@ define @test_signed_v2f16_v2i32( %f) { ; CHECK-LABEL: test_signed_v2f16_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v8 ; CHECK-NEXT: vfwcvt.rtz.x.f.v v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vmerge.vim v8, v9, 0, v0 ; CHECK-NEXT: ret %x = call @llvm.fptosi.sat.nxv2f16.nxv2i32( %f) @@ -248,10 +248,10 @@ define @test_signed_v4f16_v4i32( %f) { ; CHECK-LABEL: test_signed_v4f16_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v8 ; CHECK-NEXT: vfwcvt.rtz.x.f.v v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK-NEXT: vmerge.vim v8, v10, 0, v0 ; CHECK-NEXT: ret %x = call @llvm.fptosi.sat.nxv4f16.nxv4i32( %f) @@ -261,10 +261,10 @@ define @test_signed_v8f16_v8i32( %f) { ; CHECK-LABEL: test_signed_v8f16_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v8 ; CHECK-NEXT: vfwcvt.rtz.x.f.v v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-NEXT: vmerge.vim v8, v12, 0, v0 ; CHECK-NEXT: ret %x = call @llvm.fptosi.sat.nxv8f16.nxv8i32( %f) @@ -274,7 +274,7 @@ define @test_signed_v4f16_v4i16( %f) { ; CHECK-LABEL: test_signed_v4f16_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v8 ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 @@ -286,7 +286,7 @@ define @test_signed_v8f16_v8i16( %f) { ; CHECK-LABEL: test_signed_v8f16_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v8 ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 @@ -298,12 +298,12 @@ define @test_signed_v2f16_v2i64( %f) { ; CHECK-LABEL: test_signed_v2f16_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v8 ; CHECK-NEXT: vfwcvt.f.f.v v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwcvt.rtz.x.f.v v10, v9 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-NEXT: vmerge.vim v8, v10, 0, v0 ; CHECK-NEXT: ret %x = call @llvm.fptosi.sat.nxv2f16.nxv2i64( %f) @@ -313,12 +313,12 @@ define @test_signed_v4f16_v4i64( %f) { ; CHECK-LABEL: test_signed_v4f16_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v8 ; CHECK-NEXT: vfwcvt.f.f.v v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK-NEXT: vfwcvt.rtz.x.f.v v12, v10 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; CHECK-NEXT: vmerge.vim v8, v12, 0, v0 ; CHECK-NEXT: ret %x = call @llvm.fptosi.sat.nxv4f16.nxv4i64( %f) diff --git a/llvm/test/CodeGen/RISCV/rvv/fptoui-sat.ll b/llvm/test/CodeGen/RISCV/rvv/fptoui-sat.ll --- a/llvm/test/CodeGen/RISCV/rvv/fptoui-sat.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fptoui-sat.ll @@ -17,7 +17,7 @@ define @test_signed_v2f32_v2i32( %f) { ; CHECK-LABEL: test_signed_v2f32_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v8 ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 @@ -29,7 +29,7 @@ define @test_signed_v4f32_v4i32( %f) { ; CHECK-LABEL: test_signed_v4f32_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v8 ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 @@ -41,7 +41,7 @@ define @test_signed_v8f32_v8i32( %f) { ; CHECK-LABEL: test_signed_v8f32_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v8 ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 @@ -53,9 +53,9 @@ define @test_signed_v4f32_v4i16( %f) { ; CHECK-LABEL: test_signed_v4f32_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 ; CHECK-NEXT: vmerge.vim v8, v10, 0, v0 ; CHECK-NEXT: ret @@ -66,9 +66,9 @@ define @test_signed_v8f32_v8i16( %f) { ; CHECK-LABEL: test_signed_v8f32_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 ; CHECK-NEXT: vmerge.vim v8, v12, 0, v0 ; CHECK-NEXT: ret @@ -79,10 +79,10 @@ define @test_signed_v2f32_v2i64( %f) { ; CHECK-LABEL: test_signed_v2f32_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v8 ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-NEXT: vmerge.vim v8, v10, 0, v0 ; CHECK-NEXT: ret %x = call @llvm.fptoui.sat.nxv2f32.nxv2i64( %f) @@ -92,10 +92,10 @@ define @test_signed_v4f32_v4i64( %f) { ; CHECK-LABEL: test_signed_v4f32_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v8 ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; CHECK-NEXT: vmerge.vim v8, v12, 0, v0 ; CHECK-NEXT: ret %x = call @llvm.fptoui.sat.nxv4f32.nxv4i64( %f) @@ -115,9 +115,9 @@ define @test_signed_v2f64_v2i32( %f) { ; CHECK-LABEL: test_signed_v2f64_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 ; CHECK-NEXT: vmerge.vim v8, v10, 0, v0 ; CHECK-NEXT: ret @@ -128,9 +128,9 @@ define @test_signed_v4f64_v4i32( %f) { ; CHECK-LABEL: test_signed_v4f64_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 ; CHECK-NEXT: vmerge.vim v8, v12, 0, v0 ; CHECK-NEXT: ret @@ -141,9 +141,9 @@ define @test_signed_v8f64_v8i32( %f) { ; CHECK-LABEL: test_signed_v8f64_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 ; CHECK-NEXT: vmerge.vim v8, v16, 0, v0 ; CHECK-NEXT: ret @@ -157,12 +157,12 @@ ; CHECK32-NEXT: lui a0, %hi(.LCPI10_0) ; CHECK32-NEXT: fld ft0, %lo(.LCPI10_0)(a0) ; CHECK32-NEXT: fcvt.d.w ft1, zero -; CHECK32-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK32-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK32-NEXT: vfmax.vf v8, v8, ft1 ; CHECK32-NEXT: vfmin.vf v8, v8, ft0 -; CHECK32-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK32-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK32-NEXT: vfncvt.rtz.xu.f.w v12, v8 -; CHECK32-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK32-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK32-NEXT: vnsrl.wi v8, v12, 0 ; CHECK32-NEXT: ret ; @@ -171,12 +171,12 @@ ; CHECK64-NEXT: lui a0, %hi(.LCPI10_0) ; CHECK64-NEXT: fld ft0, %lo(.LCPI10_0)(a0) ; CHECK64-NEXT: fmv.d.x ft1, zero -; CHECK64-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK64-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK64-NEXT: vfmax.vf v8, v8, ft1 ; CHECK64-NEXT: vfmin.vf v8, v8, ft0 -; CHECK64-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK64-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK64-NEXT: vfncvt.rtz.xu.f.w v12, v8 -; CHECK64-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK64-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK64-NEXT: vnsrl.wi v8, v12, 0 ; CHECK64-NEXT: ret %x = call @llvm.fptoui.sat.nxv4f64.nxv4i16( %f) @@ -189,12 +189,12 @@ ; CHECK32-NEXT: lui a0, %hi(.LCPI11_0) ; CHECK32-NEXT: fld ft0, %lo(.LCPI11_0)(a0) ; CHECK32-NEXT: fcvt.d.w ft1, zero -; CHECK32-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK32-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK32-NEXT: vfmax.vf v8, v8, ft1 ; CHECK32-NEXT: vfmin.vf v8, v8, ft0 -; CHECK32-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK32-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK32-NEXT: vfncvt.rtz.xu.f.w v16, v8 -; CHECK32-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK32-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK32-NEXT: vnsrl.wi v8, v16, 0 ; CHECK32-NEXT: ret ; @@ -203,12 +203,12 @@ ; CHECK64-NEXT: lui a0, %hi(.LCPI11_0) ; CHECK64-NEXT: fld ft0, %lo(.LCPI11_0)(a0) ; CHECK64-NEXT: fmv.d.x ft1, zero -; CHECK64-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK64-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK64-NEXT: vfmax.vf v8, v8, ft1 ; CHECK64-NEXT: vfmin.vf v8, v8, ft0 -; CHECK64-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK64-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK64-NEXT: vfncvt.rtz.xu.f.w v16, v8 -; CHECK64-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK64-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK64-NEXT: vnsrl.wi v8, v16, 0 ; CHECK64-NEXT: ret %x = call @llvm.fptoui.sat.nxv8f64.nxv8i16( %f) @@ -218,7 +218,7 @@ define @test_signed_v2f64_v2i64( %f) { ; CHECK-LABEL: test_signed_v2f64_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v8 ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 @@ -230,7 +230,7 @@ define @test_signed_v4f64_v4i64( %f) { ; CHECK-LABEL: test_signed_v4f64_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v8 ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 @@ -253,10 +253,10 @@ define @test_signed_v2f16_v2i32( %f) { ; CHECK-LABEL: test_signed_v2f16_v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v8 ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vmerge.vim v8, v9, 0, v0 ; CHECK-NEXT: ret %x = call @llvm.fptoui.sat.nxv2f16.nxv2i32( %f) @@ -266,10 +266,10 @@ define @test_signed_v4f16_v4i32( %f) { ; CHECK-LABEL: test_signed_v4f16_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v8 ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK-NEXT: vmerge.vim v8, v10, 0, v0 ; CHECK-NEXT: ret %x = call @llvm.fptoui.sat.nxv4f16.nxv4i32( %f) @@ -279,10 +279,10 @@ define @test_signed_v8f16_v8i32( %f) { ; CHECK-LABEL: test_signed_v8f16_v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v8 ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-NEXT: vmerge.vim v8, v12, 0, v0 ; CHECK-NEXT: ret %x = call @llvm.fptoui.sat.nxv8f16.nxv8i32( %f) @@ -292,7 +292,7 @@ define @test_signed_v4f16_v4i16( %f) { ; CHECK-LABEL: test_signed_v4f16_v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v8 ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 @@ -304,7 +304,7 @@ define @test_signed_v8f16_v8i16( %f) { ; CHECK-LABEL: test_signed_v8f16_v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v8 ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 @@ -316,12 +316,12 @@ define @test_signed_v2f16_v2i64( %f) { ; CHECK-LABEL: test_signed_v2f16_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v8 ; CHECK-NEXT: vfwcvt.f.f.v v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v10, v9 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-NEXT: vmerge.vim v8, v10, 0, v0 ; CHECK-NEXT: ret %x = call @llvm.fptoui.sat.nxv2f16.nxv2i64( %f) @@ -331,12 +331,12 @@ define @test_signed_v4f16_v4i64( %f) { ; CHECK-LABEL: test_signed_v4f16_v4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v8 ; CHECK-NEXT: vfwcvt.f.f.v v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v12, v10 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; CHECK-NEXT: vmerge.vim v8, v12, 0, v0 ; CHECK-NEXT: ret %x = call @llvm.fptoui.sat.nxv4f16.nxv4i64( %f) diff --git a/llvm/test/CodeGen/RISCV/rvv/fshr-fshl.ll b/llvm/test/CodeGen/RISCV/rvv/fshr-fshl.ll --- a/llvm/test/CodeGen/RISCV/rvv/fshr-fshl.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fshr-fshl.ll @@ -6,7 +6,7 @@ ; CHECK-LABEL: fshr: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 31 -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vand.vx v11, v10, a0 ; CHECK-NEXT: vsrl.vv v9, v9, v11 ; CHECK-NEXT: vnot.v v10, v10 @@ -23,7 +23,7 @@ ; CHECK-LABEL: fshl: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 31 -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vand.vx v11, v10, a0 ; CHECK-NEXT: vsll.vv v8, v8, v11 ; CHECK-NEXT: vnot.v v10, v10 diff --git a/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll --- a/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll @@ -61,7 +61,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 0 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv1i8.nxv4i8( %vec, %subvec, i64 0) @@ -76,7 +76,7 @@ ; CHECK-NEXT: slli a1, a0, 1 ; CHECK-NEXT: add a1, a1, a0 ; CHECK-NEXT: add a0, a1, a0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv1i8.nxv4i8( %vec, %subvec, i64 3) @@ -214,7 +214,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vslideup.vi v8, v16, 0 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv1i32.nxv16i32( %vec, %subvec, i64 0) @@ -227,7 +227,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: add a1, a0, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv1i32.nxv16i32( %vec, %subvec, i64 1) @@ -239,7 +239,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vslideup.vi v11, v16, 0 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv1i32.nxv16i32( %vec, %subvec, i64 6) @@ -251,7 +251,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 0 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv1i8.nxv16i8( %vec, %subvec, i64 0) @@ -264,7 +264,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: add a1, a0, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv1i8.nxv16i8( %vec, %subvec, i64 1) @@ -278,7 +278,7 @@ ; CHECK-NEXT: srli a1, a0, 3 ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: add a1, a0, a1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv1i8.nxv16i8( %vec, %subvec, i64 2) @@ -293,7 +293,7 @@ ; CHECK-NEXT: slli a1, a0, 1 ; CHECK-NEXT: add a1, a1, a0 ; CHECK-NEXT: add a0, a1, a0 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vslideup.vx v8, v10, a1 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv1i8.nxv16i8( %vec, %subvec, i64 3) @@ -306,7 +306,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a1, a0, 3 ; CHECK-NEXT: sub a1, a0, a1 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vslideup.vx v8, v10, a1 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv1i8.nxv16i8( %vec, %subvec, i64 7) @@ -319,7 +319,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a1, a0, 3 ; CHECK-NEXT: sub a1, a0, a1 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vslideup.vx v9, v10, a1 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv1i8.nxv16i8( %vec, %subvec, i64 15) @@ -331,7 +331,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vslideup.vi v8, v16, 0 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv2f16.nxv32f16( %vec, %subvec, i64 0) @@ -344,7 +344,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: add a1, a0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv2f16.nxv32f16( %vec, %subvec, i64 2) @@ -357,7 +357,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: add a1, a0, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma ; CHECK-NEXT: vslideup.vx v14, v16, a0 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv2f16.nxv32f16( %vec, %subvec, i64 26) @@ -380,7 +380,7 @@ ; CHECK-NEXT: srli a1, a0, 3 ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: add a1, a0, a1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma ; CHECK-NEXT: vslideup.vx v22, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -393,7 +393,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v0, v8, 0 ; CHECK-NEXT: ret %vec = call @llvm.vector.insert.nxv8i1.nxv32i1( %v, %sv, i64 0) @@ -406,7 +406,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: add a1, a0, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vx v0, v8, a0 ; CHECK-NEXT: ret %vec = call @llvm.vector.insert.nxv8i1.nxv32i1( %v, %sv, i64 8) @@ -416,18 +416,18 @@ define @insert_nxv4i1_nxv1i1_0( %v, %sv) { ; CHECK-LABEL: insert_nxv4i1_nxv1i1_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0 -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v8, v10, 1, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 0 -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v0, v9, 0 ; CHECK-NEXT: ret %vec = call @llvm.vector.insert.nxv1i1.nxv4i1( %v, %sv, i64 0) @@ -437,20 +437,20 @@ define @insert_nxv4i1_nxv1i1_2( %v, %sv) { ; CHECK-LABEL: insert_nxv4i1_nxv1i1_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a1, a0, 3 ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: add a1, a0, a1 -; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v8, v10, 1, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vx v9, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v0, v9, 0 ; CHECK-NEXT: ret %vec = call @llvm.vector.insert.nxv1i1.nxv4i1( %v, %sv, i64 2) diff --git a/llvm/test/CodeGen/RISCV/rvv/insertelt-fp.ll b/llvm/test/CodeGen/RISCV/rvv/insertelt-fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/insertelt-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/insertelt-fp.ll @@ -7,7 +7,7 @@ define @insertelt_nxv1f16_0( %v, half %elt) { ; CHECK-LABEL: insertelt_nxv1f16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, tu, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 0 @@ -17,9 +17,9 @@ define @insertelt_nxv1f16_imm( %v, half %elt) { ; CHECK-LABEL: insertelt_nxv1f16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetivli zero, 4, e16, mf4, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf4, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 3 @@ -29,10 +29,10 @@ define @insertelt_nxv1f16_idx( %v, half %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv1f16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 %idx @@ -42,7 +42,7 @@ define @insertelt_nxv2f16_0( %v, half %elt) { ; CHECK-LABEL: insertelt_nxv2f16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, tu, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 0 @@ -52,9 +52,9 @@ define @insertelt_nxv2f16_imm( %v, half %elt) { ; CHECK-LABEL: insertelt_nxv2f16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 3 @@ -64,10 +64,10 @@ define @insertelt_nxv2f16_idx( %v, half %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv2f16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 %idx @@ -77,7 +77,7 @@ define @insertelt_nxv4f16_0( %v, half %elt) { ; CHECK-LABEL: insertelt_nxv4f16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, tu, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 0 @@ -87,9 +87,9 @@ define @insertelt_nxv4f16_imm( %v, half %elt) { ; CHECK-LABEL: insertelt_nxv4f16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetivli zero, 4, e16, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, m1, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 3 @@ -99,10 +99,10 @@ define @insertelt_nxv4f16_idx( %v, half %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv4f16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 %idx @@ -112,7 +112,7 @@ define @insertelt_nxv8f16_0( %v, half %elt) { ; CHECK-LABEL: insertelt_nxv8f16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, tu, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 0 @@ -122,9 +122,9 @@ define @insertelt_nxv8f16_imm( %v, half %elt) { ; CHECK-LABEL: insertelt_nxv8f16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfmv.s.f v10, fa0 -; CHECK-NEXT: vsetivli zero, 4, e16, m2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, m2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 3 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 3 @@ -134,10 +134,10 @@ define @insertelt_nxv8f16_idx( %v, half %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv8f16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vfmv.s.f v10, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 %idx @@ -147,7 +147,7 @@ define @insertelt_nxv16f16_0( %v, half %elt) { ; CHECK-LABEL: insertelt_nxv16f16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, tu, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 0 @@ -157,9 +157,9 @@ define @insertelt_nxv16f16_imm( %v, half %elt) { ; CHECK-LABEL: insertelt_nxv16f16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfmv.s.f v12, fa0 -; CHECK-NEXT: vsetivli zero, 4, e16, m4, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, m4, tu, ma ; CHECK-NEXT: vslideup.vi v8, v12, 3 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 3 @@ -169,10 +169,10 @@ define @insertelt_nxv16f16_idx( %v, half %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv16f16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vfmv.s.f v12, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 %idx @@ -182,7 +182,7 @@ define @insertelt_nxv32f16_0( %v, half %elt) { ; CHECK-LABEL: insertelt_nxv32f16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, tu, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 0 @@ -192,9 +192,9 @@ define @insertelt_nxv32f16_imm( %v, half %elt) { ; CHECK-LABEL: insertelt_nxv32f16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vfmv.s.f v16, fa0 -; CHECK-NEXT: vsetivli zero, 4, e16, m8, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, m8, tu, ma ; CHECK-NEXT: vslideup.vi v8, v16, 3 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 3 @@ -204,10 +204,10 @@ define @insertelt_nxv32f16_idx( %v, half %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv32f16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; CHECK-NEXT: vfmv.s.f v16, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, ma ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret %r = insertelement %v, half %elt, i32 %idx @@ -217,7 +217,7 @@ define @insertelt_nxv1f32_0( %v, float %elt) { ; CHECK-LABEL: insertelt_nxv1f32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, tu, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 0 @@ -227,9 +227,9 @@ define @insertelt_nxv1f32_imm( %v, float %elt) { ; CHECK-LABEL: insertelt_nxv1f32_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetivli zero, 4, e32, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e32, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 3 @@ -239,10 +239,10 @@ define @insertelt_nxv1f32_idx( %v, float %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv1f32_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 %idx @@ -252,7 +252,7 @@ define @insertelt_nxv2f32_0( %v, float %elt) { ; CHECK-LABEL: insertelt_nxv2f32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, tu, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 0 @@ -262,9 +262,9 @@ define @insertelt_nxv2f32_imm( %v, float %elt) { ; CHECK-LABEL: insertelt_nxv2f32_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 3 @@ -274,10 +274,10 @@ define @insertelt_nxv2f32_idx( %v, float %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv2f32_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 %idx @@ -287,7 +287,7 @@ define @insertelt_nxv4f32_0( %v, float %elt) { ; CHECK-LABEL: insertelt_nxv4f32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, tu, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 0 @@ -297,9 +297,9 @@ define @insertelt_nxv4f32_imm( %v, float %elt) { ; CHECK-LABEL: insertelt_nxv4f32_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfmv.s.f v10, fa0 -; CHECK-NEXT: vsetivli zero, 4, e32, m2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 3 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 3 @@ -309,10 +309,10 @@ define @insertelt_nxv4f32_idx( %v, float %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv4f32_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vfmv.s.f v10, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 %idx @@ -322,7 +322,7 @@ define @insertelt_nxv8f32_0( %v, float %elt) { ; CHECK-LABEL: insertelt_nxv8f32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, tu, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 0 @@ -332,9 +332,9 @@ define @insertelt_nxv8f32_imm( %v, float %elt) { ; CHECK-LABEL: insertelt_nxv8f32_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfmv.s.f v12, fa0 -; CHECK-NEXT: vsetivli zero, 4, e32, m4, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m4, tu, ma ; CHECK-NEXT: vslideup.vi v8, v12, 3 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 3 @@ -344,10 +344,10 @@ define @insertelt_nxv8f32_idx( %v, float %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv8f32_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vfmv.s.f v12, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 %idx @@ -357,7 +357,7 @@ define @insertelt_nxv16f32_0( %v, float %elt) { ; CHECK-LABEL: insertelt_nxv16f32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, tu, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 0 @@ -367,9 +367,9 @@ define @insertelt_nxv16f32_imm( %v, float %elt) { ; CHECK-LABEL: insertelt_nxv16f32_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vfmv.s.f v16, fa0 -; CHECK-NEXT: vsetivli zero, 4, e32, m8, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m8, tu, ma ; CHECK-NEXT: vslideup.vi v8, v16, 3 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 3 @@ -379,10 +379,10 @@ define @insertelt_nxv16f32_idx( %v, float %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv16f32_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; CHECK-NEXT: vfmv.s.f v16, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret %r = insertelement %v, float %elt, i32 %idx @@ -392,7 +392,7 @@ define @insertelt_nxv1f64_0( %v, double %elt) { ; CHECK-LABEL: insertelt_nxv1f64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, tu, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 0 @@ -402,9 +402,9 @@ define @insertelt_nxv1f64_imm( %v, double %elt) { ; CHECK-LABEL: insertelt_nxv1f64_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetivli zero, 4, e64, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m1, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 3 @@ -414,10 +414,10 @@ define @insertelt_nxv1f64_idx( %v, double %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv1f64_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 %idx @@ -427,7 +427,7 @@ define @insertelt_nxv2f64_0( %v, double %elt) { ; CHECK-LABEL: insertelt_nxv2f64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, tu, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 0 @@ -437,9 +437,9 @@ define @insertelt_nxv2f64_imm( %v, double %elt) { ; CHECK-LABEL: insertelt_nxv2f64_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vfmv.s.f v10, fa0 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 3 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 3 @@ -449,10 +449,10 @@ define @insertelt_nxv2f64_idx( %v, double %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv2f64_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; CHECK-NEXT: vfmv.s.f v10, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, ma ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 %idx @@ -462,7 +462,7 @@ define @insertelt_nxv4f64_0( %v, double %elt) { ; CHECK-LABEL: insertelt_nxv4f64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, tu, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 0 @@ -472,9 +472,9 @@ define @insertelt_nxv4f64_imm( %v, double %elt) { ; CHECK-LABEL: insertelt_nxv4f64_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vfmv.s.f v12, fa0 -; CHECK-NEXT: vsetivli zero, 4, e64, m4, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m4, tu, ma ; CHECK-NEXT: vslideup.vi v8, v12, 3 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 3 @@ -484,10 +484,10 @@ define @insertelt_nxv4f64_idx( %v, double %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv4f64_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; CHECK-NEXT: vfmv.s.f v12, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, ma ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 %idx @@ -497,7 +497,7 @@ define @insertelt_nxv8f64_0( %v, double %elt) { ; CHECK-LABEL: insertelt_nxv8f64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, tu, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 0 @@ -507,9 +507,9 @@ define @insertelt_nxv8f64_imm( %v, double %elt) { ; CHECK-LABEL: insertelt_nxv8f64_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfmv.s.f v16, fa0 -; CHECK-NEXT: vsetivli zero, 4, e64, m8, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m8, tu, ma ; CHECK-NEXT: vslideup.vi v8, v16, 3 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 3 @@ -519,10 +519,10 @@ define @insertelt_nxv8f64_idx( %v, double %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv8f64_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; CHECK-NEXT: vfmv.s.f v16, fa0 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, ma ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret %r = insertelement %v, double %elt, i32 %idx diff --git a/llvm/test/CodeGen/RISCV/rvv/insertelt-i1.ll b/llvm/test/CodeGen/RISCV/rvv/insertelt-i1.ll --- a/llvm/test/CodeGen/RISCV/rvv/insertelt-i1.ll +++ b/llvm/test/CodeGen/RISCV/rvv/insertelt-i1.ll @@ -5,13 +5,13 @@ define @insertelt_nxv1i1( %x, i1 %elt) { ; CHECK-LABEL: insertelt_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0 -; CHECK-NEXT: vsetivli zero, 3, e8, mf8, tu, mu +; CHECK-NEXT: vsetivli zero, 3, e8, mf8, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 2 -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -22,14 +22,14 @@ define @insertelt_idx_nxv1i1( %x, i1 %elt, i64 %idx) { ; CHECK-LABEL: insertelt_idx_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vslideup.vx v9, v8, a1 -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -40,13 +40,13 @@ define @insertelt_nxv2i1( %x, i1 %elt) { ; CHECK-LABEL: insertelt_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0 -; CHECK-NEXT: vsetivli zero, 3, e8, mf4, tu, mu +; CHECK-NEXT: vsetivli zero, 3, e8, mf4, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 2 -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -57,14 +57,14 @@ define @insertelt_idx_nxv2i1( %x, i1 %elt, i64 %idx) { ; CHECK-LABEL: insertelt_idx_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vslideup.vx v9, v8, a1 -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -75,13 +75,13 @@ define @insertelt_nxv4i1( %x, i1 %elt) { ; CHECK-LABEL: insertelt_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0 -; CHECK-NEXT: vsetivli zero, 3, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 3, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 2 -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -92,14 +92,14 @@ define @insertelt_idx_nxv4i1( %x, i1 %elt, i64 %idx) { ; CHECK-LABEL: insertelt_idx_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vx v9, v8, a1 -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -110,13 +110,13 @@ define @insertelt_nxv8i1( %x, i1 %elt) { ; CHECK-LABEL: insertelt_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0 -; CHECK-NEXT: vsetivli zero, 3, e8, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 3, e8, m1, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 2 -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -127,14 +127,14 @@ define @insertelt_idx_nxv8i1( %x, i1 %elt, i64 %idx) { ; CHECK-LABEL: insertelt_idx_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vslideup.vx v9, v8, a1 -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -145,13 +145,13 @@ define @insertelt_nxv16i1( %x, i1 %elt) { ; CHECK-LABEL: insertelt_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vmerge.vim v10, v10, 1, v0 -; CHECK-NEXT: vsetivli zero, 3, e8, m2, tu, mu +; CHECK-NEXT: vsetivli zero, 3, e8, m2, tu, ma ; CHECK-NEXT: vslideup.vi v10, v8, 2 -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vand.vi v8, v10, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -162,14 +162,14 @@ define @insertelt_idx_nxv16i1( %x, i1 %elt, i64 %idx) { ; CHECK-LABEL: insertelt_idx_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, m2, ta, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vmerge.vim v10, v10, 1, v0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vslideup.vx v10, v8, a1 -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vand.vi v8, v10, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -180,13 +180,13 @@ define @insertelt_nxv32i1( %x, i1 %elt) { ; CHECK-LABEL: insertelt_nxv32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vmerge.vim v12, v12, 1, v0 -; CHECK-NEXT: vsetivli zero, 3, e8, m4, tu, mu +; CHECK-NEXT: vsetivli zero, 3, e8, m4, tu, ma ; CHECK-NEXT: vslideup.vi v12, v8, 2 -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vand.vi v8, v12, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -197,14 +197,14 @@ define @insertelt_idx_nxv32i1( %x, i1 %elt, i64 %idx) { ; CHECK-LABEL: insertelt_idx_nxv32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, m4, ta, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vmerge.vim v12, v12, 1, v0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vslideup.vx v12, v8, a1 -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vand.vi v8, v12, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -215,13 +215,13 @@ define @insertelt_nxv64i1( %x, i1 %elt) { ; CHECK-LABEL: insertelt_nxv64i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vmerge.vim v16, v16, 1, v0 -; CHECK-NEXT: vsetivli zero, 3, e8, m8, tu, mu +; CHECK-NEXT: vsetivli zero, 3, e8, m8, tu, ma ; CHECK-NEXT: vslideup.vi v16, v8, 2 -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vand.vi v8, v16, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -232,14 +232,14 @@ define @insertelt_idx_nxv64i1( %x, i1 %elt, i64 %idx) { ; CHECK-LABEL: insertelt_idx_nxv64i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vmerge.vim v16, v16, 1, v0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma ; CHECK-NEXT: vslideup.vx v16, v8, a1 -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vand.vi v8, v16, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll @@ -5,7 +5,7 @@ define @insertelt_nxv1i8_0( %v, i8 signext %elt) { ; CHECK-LABEL: insertelt_nxv1i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 0 @@ -15,9 +15,9 @@ define @insertelt_nxv1i8_imm( %v, i8 signext %elt) { ; CHECK-LABEL: insertelt_nxv1i8_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf8, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf8, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 @@ -27,10 +27,10 @@ define @insertelt_nxv1i8_idx( %v, i8 signext %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv1i8_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx @@ -40,7 +40,7 @@ define @insertelt_nxv2i8_0( %v, i8 signext %elt) { ; CHECK-LABEL: insertelt_nxv2i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 0 @@ -50,9 +50,9 @@ define @insertelt_nxv2i8_imm( %v, i8 signext %elt) { ; CHECK-LABEL: insertelt_nxv2i8_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 @@ -62,10 +62,10 @@ define @insertelt_nxv2i8_idx( %v, i8 signext %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv2i8_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx @@ -75,7 +75,7 @@ define @insertelt_nxv4i8_0( %v, i8 signext %elt) { ; CHECK-LABEL: insertelt_nxv4i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 0 @@ -85,9 +85,9 @@ define @insertelt_nxv4i8_imm( %v, i8 signext %elt) { ; CHECK-LABEL: insertelt_nxv4i8_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 @@ -97,10 +97,10 @@ define @insertelt_nxv4i8_idx( %v, i8 signext %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv4i8_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx @@ -110,7 +110,7 @@ define @insertelt_nxv8i8_0( %v, i8 signext %elt) { ; CHECK-LABEL: insertelt_nxv8i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 0 @@ -120,9 +120,9 @@ define @insertelt_nxv8i8_imm( %v, i8 signext %elt) { ; CHECK-LABEL: insertelt_nxv8i8_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e8, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, m1, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 @@ -132,10 +132,10 @@ define @insertelt_nxv8i8_idx( %v, i8 signext %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv8i8_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx @@ -145,7 +145,7 @@ define @insertelt_nxv16i8_0( %v, i8 signext %elt) { ; CHECK-LABEL: insertelt_nxv16i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m2, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 0 @@ -155,9 +155,9 @@ define @insertelt_nxv16i8_imm( %v, i8 signext %elt) { ; CHECK-LABEL: insertelt_nxv16i8_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetivli zero, 4, e8, m2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, m2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 3 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 @@ -167,10 +167,10 @@ define @insertelt_nxv16i8_idx( %v, i8 signext %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv16i8_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, m2, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vslideup.vx v8, v10, a1 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx @@ -180,7 +180,7 @@ define @insertelt_nxv32i8_0( %v, i8 signext %elt) { ; CHECK-LABEL: insertelt_nxv32i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m4, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 0 @@ -190,9 +190,9 @@ define @insertelt_nxv32i8_imm( %v, i8 signext %elt) { ; CHECK-LABEL: insertelt_nxv32i8_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vmv.s.x v12, a0 -; CHECK-NEXT: vsetivli zero, 4, e8, m4, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, m4, tu, ma ; CHECK-NEXT: vslideup.vi v8, v12, 3 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 @@ -202,10 +202,10 @@ define @insertelt_nxv32i8_idx( %v, i8 signext %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv32i8_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, m4, ta, ma ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vslideup.vx v8, v12, a1 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx @@ -215,7 +215,7 @@ define @insertelt_nxv64i8_0( %v, i8 signext %elt) { ; CHECK-LABEL: insertelt_nxv64i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m8, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 0 @@ -225,9 +225,9 @@ define @insertelt_nxv64i8_imm( %v, i8 signext %elt) { ; CHECK-LABEL: insertelt_nxv64i8_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vmv.s.x v16, a0 -; CHECK-NEXT: vsetivli zero, 4, e8, m8, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, m8, tu, ma ; CHECK-NEXT: vslideup.vi v8, v16, 3 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 @@ -237,10 +237,10 @@ define @insertelt_nxv64i8_idx( %v, i8 signext %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv64i8_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma ; CHECK-NEXT: vslideup.vx v8, v16, a1 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx @@ -250,7 +250,7 @@ define @insertelt_nxv1i16_0( %v, i16 signext %elt) { ; CHECK-LABEL: insertelt_nxv1i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 0 @@ -260,9 +260,9 @@ define @insertelt_nxv1i16_imm( %v, i16 signext %elt) { ; CHECK-LABEL: insertelt_nxv1i16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e16, mf4, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf4, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 3 @@ -272,10 +272,10 @@ define @insertelt_nxv1i16_idx( %v, i16 signext %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv1i16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e16, mf4, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 %idx @@ -285,7 +285,7 @@ define @insertelt_nxv2i16_0( %v, i16 signext %elt) { ; CHECK-LABEL: insertelt_nxv2i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 0 @@ -295,9 +295,9 @@ define @insertelt_nxv2i16_imm( %v, i16 signext %elt) { ; CHECK-LABEL: insertelt_nxv2i16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 3 @@ -307,10 +307,10 @@ define @insertelt_nxv2i16_idx( %v, i16 signext %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv2i16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 %idx @@ -320,7 +320,7 @@ define @insertelt_nxv4i16_0( %v, i16 signext %elt) { ; CHECK-LABEL: insertelt_nxv4i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 0 @@ -330,9 +330,9 @@ define @insertelt_nxv4i16_imm( %v, i16 signext %elt) { ; CHECK-LABEL: insertelt_nxv4i16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e16, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, m1, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 3 @@ -342,10 +342,10 @@ define @insertelt_nxv4i16_idx( %v, i16 signext %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv4i16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 %idx @@ -355,7 +355,7 @@ define @insertelt_nxv8i16_0( %v, i16 signext %elt) { ; CHECK-LABEL: insertelt_nxv8i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 0 @@ -365,9 +365,9 @@ define @insertelt_nxv8i16_imm( %v, i16 signext %elt) { ; CHECK-LABEL: insertelt_nxv8i16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetivli zero, 4, e16, m2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, m2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 3 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 3 @@ -377,10 +377,10 @@ define @insertelt_nxv8i16_idx( %v, i16 signext %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv8i16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e16, m2, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vslideup.vx v8, v10, a1 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 %idx @@ -390,7 +390,7 @@ define @insertelt_nxv16i16_0( %v, i16 signext %elt) { ; CHECK-LABEL: insertelt_nxv16i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m4, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 0 @@ -400,9 +400,9 @@ define @insertelt_nxv16i16_imm( %v, i16 signext %elt) { ; CHECK-LABEL: insertelt_nxv16i16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vmv.s.x v12, a0 -; CHECK-NEXT: vsetivli zero, 4, e16, m4, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, m4, tu, ma ; CHECK-NEXT: vslideup.vi v8, v12, 3 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 3 @@ -412,10 +412,10 @@ define @insertelt_nxv16i16_idx( %v, i16 signext %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv16i16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e16, m4, ta, ma ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vslideup.vx v8, v12, a1 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 %idx @@ -425,7 +425,7 @@ define @insertelt_nxv32i16_0( %v, i16 signext %elt) { ; CHECK-LABEL: insertelt_nxv32i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m8, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 0 @@ -435,9 +435,9 @@ define @insertelt_nxv32i16_imm( %v, i16 signext %elt) { ; CHECK-LABEL: insertelt_nxv32i16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; CHECK-NEXT: vmv.s.x v16, a0 -; CHECK-NEXT: vsetivli zero, 4, e16, m8, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, m8, tu, ma ; CHECK-NEXT: vslideup.vi v8, v16, 3 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 3 @@ -447,10 +447,10 @@ define @insertelt_nxv32i16_idx( %v, i16 signext %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv32i16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e16, m8, ta, ma ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vslideup.vx v8, v16, a1 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 %idx @@ -460,7 +460,7 @@ define @insertelt_nxv1i32_0( %v, i32 %elt) { ; CHECK-LABEL: insertelt_nxv1i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 0 @@ -470,9 +470,9 @@ define @insertelt_nxv1i32_imm( %v, i32 %elt) { ; CHECK-LABEL: insertelt_nxv1i32_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e32, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e32, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 3 @@ -482,10 +482,10 @@ define @insertelt_nxv1i32_idx( %v, i32 %elt, i32 %idx) { ; CHECK-LABEL: insertelt_nxv1i32_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 %idx @@ -495,7 +495,7 @@ define @insertelt_nxv2i32_0( %v, i32 %elt) { ; CHECK-LABEL: insertelt_nxv2i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 0 @@ -505,9 +505,9 @@ define @insertelt_nxv2i32_imm( %v, i32 %elt) { ; CHECK-LABEL: insertelt_nxv2i32_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 3 @@ -517,10 +517,10 @@ define @insertelt_nxv2i32_idx( %v, i32 %elt, i32 %idx) { ; CHECK-LABEL: insertelt_nxv2i32_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 %idx @@ -530,7 +530,7 @@ define @insertelt_nxv4i32_0( %v, i32 %elt) { ; CHECK-LABEL: insertelt_nxv4i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 0 @@ -540,9 +540,9 @@ define @insertelt_nxv4i32_imm( %v, i32 %elt) { ; CHECK-LABEL: insertelt_nxv4i32_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetivli zero, 4, e32, m2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 3 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 3 @@ -552,10 +552,10 @@ define @insertelt_nxv4i32_idx( %v, i32 %elt, i32 %idx) { ; CHECK-LABEL: insertelt_nxv4i32_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e32, m2, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vslideup.vx v8, v10, a1 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 %idx @@ -565,7 +565,7 @@ define @insertelt_nxv8i32_0( %v, i32 %elt) { ; CHECK-LABEL: insertelt_nxv8i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 0 @@ -575,9 +575,9 @@ define @insertelt_nxv8i32_imm( %v, i32 %elt) { ; CHECK-LABEL: insertelt_nxv8i32_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vmv.s.x v12, a0 -; CHECK-NEXT: vsetivli zero, 4, e32, m4, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m4, tu, ma ; CHECK-NEXT: vslideup.vi v8, v12, 3 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 3 @@ -587,10 +587,10 @@ define @insertelt_nxv8i32_idx( %v, i32 %elt, i32 %idx) { ; CHECK-LABEL: insertelt_nxv8i32_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vslideup.vx v8, v12, a1 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 %idx @@ -600,7 +600,7 @@ define @insertelt_nxv16i32_0( %v, i32 %elt) { ; CHECK-LABEL: insertelt_nxv16i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m8, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 0 @@ -610,9 +610,9 @@ define @insertelt_nxv16i32_imm( %v, i32 %elt) { ; CHECK-LABEL: insertelt_nxv16i32_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; CHECK-NEXT: vmv.s.x v16, a0 -; CHECK-NEXT: vsetivli zero, 4, e32, m8, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m8, tu, ma ; CHECK-NEXT: vslideup.vi v8, v16, 3 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 3 @@ -622,10 +622,10 @@ define @insertelt_nxv16i32_idx( %v, i32 %elt, i32 %idx) { ; CHECK-LABEL: insertelt_nxv16i32_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e32, m8, ta, ma ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vslideup.vx v8, v16, a1 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 %idx @@ -635,11 +635,11 @@ define @insertelt_nxv1i64_0( %v, i64 %elt) { ; CHECK-LABEL: insertelt_nxv1i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vslide1up.vx v10, v9, a1 ; CHECK-NEXT: vslide1up.vx v9, v10, a0 -; CHECK-NEXT: vsetivli zero, 1, e64, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 0 @@ -649,11 +649,11 @@ define @insertelt_nxv1i64_imm( %v, i64 %elt) { ; CHECK-LABEL: insertelt_nxv1i64_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vslide1up.vx v10, v9, a1 ; CHECK-NEXT: vslide1up.vx v9, v10, a0 -; CHECK-NEXT: vsetivli zero, 4, e64, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m1, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 3 @@ -663,12 +663,12 @@ define @insertelt_nxv1i64_idx( %v, i64 %elt, i32 %idx) { ; CHECK-LABEL: insertelt_nxv1i64_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vslide1up.vx v10, v9, a1 ; CHECK-NEXT: vslide1up.vx v9, v10, a0 ; CHECK-NEXT: addi a0, a2, 1 -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vslideup.vx v8, v9, a2 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 %idx @@ -678,11 +678,11 @@ define @insertelt_nxv2i64_0( %v, i64 %elt) { ; CHECK-LABEL: insertelt_nxv2i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, m2, ta, ma ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vslide1up.vx v12, v10, a1 ; CHECK-NEXT: vslide1up.vx v10, v12, a0 -; CHECK-NEXT: vsetivli zero, 1, e64, m2, tu, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 0 @@ -692,11 +692,11 @@ define @insertelt_nxv2i64_imm( %v, i64 %elt) { ; CHECK-LABEL: insertelt_nxv2i64_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, m2, ta, ma ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vslide1up.vx v12, v10, a1 ; CHECK-NEXT: vslide1up.vx v10, v12, a0 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 3 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 3 @@ -706,12 +706,12 @@ define @insertelt_nxv2i64_idx( %v, i64 %elt, i32 %idx) { ; CHECK-LABEL: insertelt_nxv2i64_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, m2, ta, ma ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vslide1up.vx v12, v10, a1 ; CHECK-NEXT: vslide1up.vx v10, v12, a0 ; CHECK-NEXT: addi a0, a2, 1 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vslideup.vx v8, v10, a2 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 %idx @@ -721,11 +721,11 @@ define @insertelt_nxv4i64_0( %v, i64 %elt) { ; CHECK-LABEL: insertelt_nxv4i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, m4, ta, ma ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vslide1up.vx v16, v12, a1 ; CHECK-NEXT: vslide1up.vx v12, v16, a0 -; CHECK-NEXT: vsetivli zero, 1, e64, m4, tu, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m4, tu, ma ; CHECK-NEXT: vslideup.vi v8, v12, 0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 0 @@ -735,11 +735,11 @@ define @insertelt_nxv4i64_imm( %v, i64 %elt) { ; CHECK-LABEL: insertelt_nxv4i64_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, m4, ta, ma ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vslide1up.vx v16, v12, a1 ; CHECK-NEXT: vslide1up.vx v12, v16, a0 -; CHECK-NEXT: vsetivli zero, 4, e64, m4, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m4, tu, ma ; CHECK-NEXT: vslideup.vi v8, v12, 3 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 3 @@ -749,12 +749,12 @@ define @insertelt_nxv4i64_idx( %v, i64 %elt, i32 %idx) { ; CHECK-LABEL: insertelt_nxv4i64_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, m4, ta, ma ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vslide1up.vx v16, v12, a1 ; CHECK-NEXT: vslide1up.vx v12, v16, a0 ; CHECK-NEXT: addi a0, a2, 1 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vslideup.vx v8, v12, a2 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 %idx @@ -764,11 +764,11 @@ define @insertelt_nxv8i64_0( %v, i64 %elt) { ; CHECK-LABEL: insertelt_nxv8i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, m8, ta, ma ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vslide1up.vx v24, v16, a1 ; CHECK-NEXT: vslide1up.vx v16, v24, a0 -; CHECK-NEXT: vsetivli zero, 1, e64, m8, tu, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m8, tu, ma ; CHECK-NEXT: vslideup.vi v8, v16, 0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 0 @@ -778,11 +778,11 @@ define @insertelt_nxv8i64_imm( %v, i64 %elt) { ; CHECK-LABEL: insertelt_nxv8i64_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, m8, ta, ma ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vslide1up.vx v24, v16, a1 ; CHECK-NEXT: vslide1up.vx v16, v24, a0 -; CHECK-NEXT: vsetivli zero, 4, e64, m8, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m8, tu, ma ; CHECK-NEXT: vslideup.vi v8, v16, 3 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 3 @@ -792,12 +792,12 @@ define @insertelt_nxv8i64_idx( %v, i64 %elt, i32 %idx) { ; CHECK-LABEL: insertelt_nxv8i64_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, m8, ta, ma ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vslide1up.vx v24, v16, a1 ; CHECK-NEXT: vslide1up.vx v16, v24, a0 ; CHECK-NEXT: addi a0, a2, 1 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vslideup.vx v8, v16, a2 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 %idx @@ -809,7 +809,7 @@ ; CHECK-LABEL: insertelt_nxv2i64_0_c10: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 10 -; CHECK-NEXT: vsetvli a1, zero, e64, m2, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m2, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret %r = insertelement %v, i64 10, i32 0 @@ -820,9 +820,9 @@ ; CHECK-LABEL: insertelt_nxv2i64_imm_c10: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 10 -; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 3 ; CHECK-NEXT: ret %r = insertelement %v, i64 10, i32 3 @@ -833,10 +833,10 @@ ; CHECK-LABEL: insertelt_nxv2i64_idx_c10: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 10 -; CHECK-NEXT: vsetvli a2, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e64, m2, ta, ma ; CHECK-NEXT: vmv.s.x v10, a1 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, ma ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %r = insertelement %v, i64 10, i32 %idx @@ -847,7 +847,7 @@ ; CHECK-LABEL: insertelt_nxv2i64_0_cn1: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e64, m2, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m2, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret %r = insertelement %v, i64 -1, i32 0 @@ -858,9 +858,9 @@ ; CHECK-LABEL: insertelt_nxv2i64_imm_cn1: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 3 ; CHECK-NEXT: ret %r = insertelement %v, i64 -1, i32 3 @@ -871,10 +871,10 @@ ; CHECK-LABEL: insertelt_nxv2i64_idx_cn1: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, -1 -; CHECK-NEXT: vsetvli a2, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e64, m2, ta, ma ; CHECK-NEXT: vmv.s.x v10, a1 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, ma ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %r = insertelement %v, i64 -1, i32 %idx diff --git a/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll @@ -5,7 +5,7 @@ define @insertelt_nxv1i8_0( %v, i8 signext %elt) { ; CHECK-LABEL: insertelt_nxv1i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 0 @@ -15,9 +15,9 @@ define @insertelt_nxv1i8_imm( %v, i8 signext %elt) { ; CHECK-LABEL: insertelt_nxv1i8_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf8, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf8, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 @@ -27,10 +27,10 @@ define @insertelt_nxv1i8_idx( %v, i8 signext %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv1i8_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx @@ -40,7 +40,7 @@ define @insertelt_nxv2i8_0( %v, i8 signext %elt) { ; CHECK-LABEL: insertelt_nxv2i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 0 @@ -50,9 +50,9 @@ define @insertelt_nxv2i8_imm( %v, i8 signext %elt) { ; CHECK-LABEL: insertelt_nxv2i8_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 @@ -62,10 +62,10 @@ define @insertelt_nxv2i8_idx( %v, i8 signext %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv2i8_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx @@ -75,7 +75,7 @@ define @insertelt_nxv4i8_0( %v, i8 signext %elt) { ; CHECK-LABEL: insertelt_nxv4i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 0 @@ -85,9 +85,9 @@ define @insertelt_nxv4i8_imm( %v, i8 signext %elt) { ; CHECK-LABEL: insertelt_nxv4i8_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 @@ -97,10 +97,10 @@ define @insertelt_nxv4i8_idx( %v, i8 signext %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv4i8_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx @@ -110,7 +110,7 @@ define @insertelt_nxv8i8_0( %v, i8 signext %elt) { ; CHECK-LABEL: insertelt_nxv8i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 0 @@ -120,9 +120,9 @@ define @insertelt_nxv8i8_imm( %v, i8 signext %elt) { ; CHECK-LABEL: insertelt_nxv8i8_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e8, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, m1, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 @@ -132,10 +132,10 @@ define @insertelt_nxv8i8_idx( %v, i8 signext %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv8i8_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx @@ -145,7 +145,7 @@ define @insertelt_nxv16i8_0( %v, i8 signext %elt) { ; CHECK-LABEL: insertelt_nxv16i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m2, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 0 @@ -155,9 +155,9 @@ define @insertelt_nxv16i8_imm( %v, i8 signext %elt) { ; CHECK-LABEL: insertelt_nxv16i8_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetivli zero, 4, e8, m2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, m2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 3 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 @@ -167,10 +167,10 @@ define @insertelt_nxv16i8_idx( %v, i8 signext %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv16i8_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, m2, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vslideup.vx v8, v10, a1 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx @@ -180,7 +180,7 @@ define @insertelt_nxv32i8_0( %v, i8 signext %elt) { ; CHECK-LABEL: insertelt_nxv32i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m4, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 0 @@ -190,9 +190,9 @@ define @insertelt_nxv32i8_imm( %v, i8 signext %elt) { ; CHECK-LABEL: insertelt_nxv32i8_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vmv.s.x v12, a0 -; CHECK-NEXT: vsetivli zero, 4, e8, m4, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, m4, tu, ma ; CHECK-NEXT: vslideup.vi v8, v12, 3 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 @@ -202,10 +202,10 @@ define @insertelt_nxv32i8_idx( %v, i8 signext %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv32i8_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, m4, ta, ma ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vslideup.vx v8, v12, a1 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx @@ -215,7 +215,7 @@ define @insertelt_nxv64i8_0( %v, i8 signext %elt) { ; CHECK-LABEL: insertelt_nxv64i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m8, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 0 @@ -225,9 +225,9 @@ define @insertelt_nxv64i8_imm( %v, i8 signext %elt) { ; CHECK-LABEL: insertelt_nxv64i8_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vmv.s.x v16, a0 -; CHECK-NEXT: vsetivli zero, 4, e8, m8, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, m8, tu, ma ; CHECK-NEXT: vslideup.vi v8, v16, 3 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 3 @@ -237,10 +237,10 @@ define @insertelt_nxv64i8_idx( %v, i8 signext %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv64i8_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma ; CHECK-NEXT: vslideup.vx v8, v16, a1 ; CHECK-NEXT: ret %r = insertelement %v, i8 %elt, i32 %idx @@ -250,7 +250,7 @@ define @insertelt_nxv1i16_0( %v, i16 signext %elt) { ; CHECK-LABEL: insertelt_nxv1i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 0 @@ -260,9 +260,9 @@ define @insertelt_nxv1i16_imm( %v, i16 signext %elt) { ; CHECK-LABEL: insertelt_nxv1i16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e16, mf4, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf4, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 3 @@ -272,10 +272,10 @@ define @insertelt_nxv1i16_idx( %v, i16 signext %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv1i16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e16, mf4, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 %idx @@ -285,7 +285,7 @@ define @insertelt_nxv2i16_0( %v, i16 signext %elt) { ; CHECK-LABEL: insertelt_nxv2i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 0 @@ -295,9 +295,9 @@ define @insertelt_nxv2i16_imm( %v, i16 signext %elt) { ; CHECK-LABEL: insertelt_nxv2i16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 3 @@ -307,10 +307,10 @@ define @insertelt_nxv2i16_idx( %v, i16 signext %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv2i16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 %idx @@ -320,7 +320,7 @@ define @insertelt_nxv4i16_0( %v, i16 signext %elt) { ; CHECK-LABEL: insertelt_nxv4i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 0 @@ -330,9 +330,9 @@ define @insertelt_nxv4i16_imm( %v, i16 signext %elt) { ; CHECK-LABEL: insertelt_nxv4i16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e16, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, m1, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 3 @@ -342,10 +342,10 @@ define @insertelt_nxv4i16_idx( %v, i16 signext %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv4i16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 %idx @@ -355,7 +355,7 @@ define @insertelt_nxv8i16_0( %v, i16 signext %elt) { ; CHECK-LABEL: insertelt_nxv8i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 0 @@ -365,9 +365,9 @@ define @insertelt_nxv8i16_imm( %v, i16 signext %elt) { ; CHECK-LABEL: insertelt_nxv8i16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetivli zero, 4, e16, m2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, m2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 3 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 3 @@ -377,10 +377,10 @@ define @insertelt_nxv8i16_idx( %v, i16 signext %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv8i16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e16, m2, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vslideup.vx v8, v10, a1 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 %idx @@ -390,7 +390,7 @@ define @insertelt_nxv16i16_0( %v, i16 signext %elt) { ; CHECK-LABEL: insertelt_nxv16i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m4, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 0 @@ -400,9 +400,9 @@ define @insertelt_nxv16i16_imm( %v, i16 signext %elt) { ; CHECK-LABEL: insertelt_nxv16i16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vmv.s.x v12, a0 -; CHECK-NEXT: vsetivli zero, 4, e16, m4, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, m4, tu, ma ; CHECK-NEXT: vslideup.vi v8, v12, 3 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 3 @@ -412,10 +412,10 @@ define @insertelt_nxv16i16_idx( %v, i16 signext %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv16i16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e16, m4, ta, ma ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vslideup.vx v8, v12, a1 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 %idx @@ -425,7 +425,7 @@ define @insertelt_nxv32i16_0( %v, i16 signext %elt) { ; CHECK-LABEL: insertelt_nxv32i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m8, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 0 @@ -435,9 +435,9 @@ define @insertelt_nxv32i16_imm( %v, i16 signext %elt) { ; CHECK-LABEL: insertelt_nxv32i16_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; CHECK-NEXT: vmv.s.x v16, a0 -; CHECK-NEXT: vsetivli zero, 4, e16, m8, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, m8, tu, ma ; CHECK-NEXT: vslideup.vi v8, v16, 3 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 3 @@ -447,10 +447,10 @@ define @insertelt_nxv32i16_idx( %v, i16 signext %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv32i16_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e16, m8, ta, ma ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vslideup.vx v8, v16, a1 ; CHECK-NEXT: ret %r = insertelement %v, i16 %elt, i32 %idx @@ -460,7 +460,7 @@ define @insertelt_nxv1i32_0( %v, i32 signext %elt) { ; CHECK-LABEL: insertelt_nxv1i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 0 @@ -470,9 +470,9 @@ define @insertelt_nxv1i32_imm( %v, i32 signext %elt) { ; CHECK-LABEL: insertelt_nxv1i32_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e32, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e32, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 3 @@ -482,10 +482,10 @@ define @insertelt_nxv1i32_idx( %v, i32 signext %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv1i32_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 %idx @@ -495,7 +495,7 @@ define @insertelt_nxv2i32_0( %v, i32 signext %elt) { ; CHECK-LABEL: insertelt_nxv2i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 0 @@ -505,9 +505,9 @@ define @insertelt_nxv2i32_imm( %v, i32 signext %elt) { ; CHECK-LABEL: insertelt_nxv2i32_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 3 @@ -517,10 +517,10 @@ define @insertelt_nxv2i32_idx( %v, i32 signext %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv2i32_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 %idx @@ -530,7 +530,7 @@ define @insertelt_nxv4i32_0( %v, i32 signext %elt) { ; CHECK-LABEL: insertelt_nxv4i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 0 @@ -540,9 +540,9 @@ define @insertelt_nxv4i32_imm( %v, i32 signext %elt) { ; CHECK-LABEL: insertelt_nxv4i32_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetivli zero, 4, e32, m2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 3 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 3 @@ -552,10 +552,10 @@ define @insertelt_nxv4i32_idx( %v, i32 signext %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv4i32_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e32, m2, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vslideup.vx v8, v10, a1 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 %idx @@ -565,7 +565,7 @@ define @insertelt_nxv8i32_0( %v, i32 signext %elt) { ; CHECK-LABEL: insertelt_nxv8i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 0 @@ -575,9 +575,9 @@ define @insertelt_nxv8i32_imm( %v, i32 signext %elt) { ; CHECK-LABEL: insertelt_nxv8i32_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vmv.s.x v12, a0 -; CHECK-NEXT: vsetivli zero, 4, e32, m4, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m4, tu, ma ; CHECK-NEXT: vslideup.vi v8, v12, 3 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 3 @@ -587,10 +587,10 @@ define @insertelt_nxv8i32_idx( %v, i32 signext %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv8i32_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vslideup.vx v8, v12, a1 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 %idx @@ -600,7 +600,7 @@ define @insertelt_nxv16i32_0( %v, i32 signext %elt) { ; CHECK-LABEL: insertelt_nxv16i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m8, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 0 @@ -610,9 +610,9 @@ define @insertelt_nxv16i32_imm( %v, i32 signext %elt) { ; CHECK-LABEL: insertelt_nxv16i32_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; CHECK-NEXT: vmv.s.x v16, a0 -; CHECK-NEXT: vsetivli zero, 4, e32, m8, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m8, tu, ma ; CHECK-NEXT: vslideup.vi v8, v16, 3 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 3 @@ -622,10 +622,10 @@ define @insertelt_nxv16i32_idx( %v, i32 signext %elt, i32 signext %idx) { ; CHECK-LABEL: insertelt_nxv16i32_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e32, m8, ta, ma ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: addi a0, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vslideup.vx v8, v16, a1 ; CHECK-NEXT: ret %r = insertelement %v, i32 %elt, i32 %idx @@ -635,7 +635,7 @@ define @insertelt_nxv1i64_0( %v, i64 %elt) { ; CHECK-LABEL: insertelt_nxv1i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m1, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m1, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 0 @@ -645,9 +645,9 @@ define @insertelt_nxv1i64_imm( %v, i64 %elt) { ; CHECK-LABEL: insertelt_nxv1i64_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetivli zero, 4, e64, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m1, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 3 @@ -657,11 +657,11 @@ define @insertelt_nxv1i64_idx( %v, i64 %elt, i32 %idx) { ; CHECK-LABEL: insertelt_nxv1i64_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e64, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: sext.w a0, a1 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 %idx @@ -671,7 +671,7 @@ define @insertelt_nxv2i64_0( %v, i64 %elt) { ; CHECK-LABEL: insertelt_nxv2i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m2, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m2, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 0 @@ -681,9 +681,9 @@ define @insertelt_nxv2i64_imm( %v, i64 %elt) { ; CHECK-LABEL: insertelt_nxv2i64_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 3 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 3 @@ -693,11 +693,11 @@ define @insertelt_nxv2i64_idx( %v, i64 %elt, i32 %idx) { ; CHECK-LABEL: insertelt_nxv2i64_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e64, m2, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: sext.w a0, a1 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, ma ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 %idx @@ -707,7 +707,7 @@ define @insertelt_nxv4i64_0( %v, i64 %elt) { ; CHECK-LABEL: insertelt_nxv4i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m4, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m4, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 0 @@ -717,9 +717,9 @@ define @insertelt_nxv4i64_imm( %v, i64 %elt) { ; CHECK-LABEL: insertelt_nxv4i64_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; CHECK-NEXT: vmv.s.x v12, a0 -; CHECK-NEXT: vsetivli zero, 4, e64, m4, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m4, tu, ma ; CHECK-NEXT: vslideup.vi v8, v12, 3 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 3 @@ -729,11 +729,11 @@ define @insertelt_nxv4i64_idx( %v, i64 %elt, i32 %idx) { ; CHECK-LABEL: insertelt_nxv4i64_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e64, m4, ta, ma ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: sext.w a0, a1 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, ma ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 %idx @@ -743,7 +743,7 @@ define @insertelt_nxv8i64_0( %v, i64 %elt) { ; CHECK-LABEL: insertelt_nxv8i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m8, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m8, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 0 @@ -753,9 +753,9 @@ define @insertelt_nxv8i64_imm( %v, i64 %elt) { ; CHECK-LABEL: insertelt_nxv8i64_imm: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; CHECK-NEXT: vmv.s.x v16, a0 -; CHECK-NEXT: vsetivli zero, 4, e64, m8, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m8, tu, ma ; CHECK-NEXT: vslideup.vi v8, v16, 3 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 3 @@ -765,11 +765,11 @@ define @insertelt_nxv8i64_idx( %v, i64 %elt, i32 %idx) { ; CHECK-LABEL: insertelt_nxv8i64_idx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: sext.w a0, a1 ; CHECK-NEXT: addi a1, a0, 1 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, ma ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret %r = insertelement %v, i64 %elt, i32 %idx diff --git a/llvm/test/CodeGen/RISCV/rvv/interleave-crash.ll b/llvm/test/CodeGen/RISCV/rvv/interleave-crash.ll --- a/llvm/test/CodeGen/RISCV/rvv/interleave-crash.ll +++ b/llvm/test/CodeGen/RISCV/rvv/interleave-crash.ll @@ -6,30 +6,30 @@ ; RV64-1024-LABEL: interleave256: ; RV64-1024: # %bb.0: # %entry ; RV64-1024-NEXT: li a3, 128 -; RV64-1024-NEXT: vsetvli zero, a3, e16, m2, ta, mu +; RV64-1024-NEXT: vsetvli zero, a3, e16, m2, ta, ma ; RV64-1024-NEXT: vle16.v v8, (a1) ; RV64-1024-NEXT: vle16.v v10, (a2) ; RV64-1024-NEXT: li a1, 256 -; RV64-1024-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV64-1024-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; RV64-1024-NEXT: vwaddu.vv v12, v8, v10 ; RV64-1024-NEXT: li a2, -1 ; RV64-1024-NEXT: vwmaccu.vx v12, a2, v10 -; RV64-1024-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; RV64-1024-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; RV64-1024-NEXT: vse16.v v12, (a0) ; RV64-1024-NEXT: ret ; ; RV64-2048-LABEL: interleave256: ; RV64-2048: # %bb.0: # %entry ; RV64-2048-NEXT: li a3, 128 -; RV64-2048-NEXT: vsetvli zero, a3, e16, m1, ta, mu +; RV64-2048-NEXT: vsetvli zero, a3, e16, m1, ta, ma ; RV64-2048-NEXT: vle16.v v8, (a1) ; RV64-2048-NEXT: vle16.v v9, (a2) ; RV64-2048-NEXT: li a1, 256 -; RV64-2048-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; RV64-2048-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; RV64-2048-NEXT: vwaddu.vv v10, v8, v9 ; RV64-2048-NEXT: li a2, -1 ; RV64-2048-NEXT: vwmaccu.vx v10, a2, v9 -; RV64-2048-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV64-2048-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; RV64-2048-NEXT: vse16.v v10, (a0) ; RV64-2048-NEXT: ret entry: @@ -46,30 +46,30 @@ ; RV64-1024-LABEL: interleave512: ; RV64-1024: # %bb.0: # %entry ; RV64-1024-NEXT: li a3, 256 -; RV64-1024-NEXT: vsetvli zero, a3, e16, m4, ta, mu +; RV64-1024-NEXT: vsetvli zero, a3, e16, m4, ta, ma ; RV64-1024-NEXT: vle16.v v8, (a1) ; RV64-1024-NEXT: vle16.v v12, (a2) ; RV64-1024-NEXT: li a1, 512 -; RV64-1024-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; RV64-1024-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; RV64-1024-NEXT: vwaddu.vv v16, v8, v12 ; RV64-1024-NEXT: li a2, -1 ; RV64-1024-NEXT: vwmaccu.vx v16, a2, v12 -; RV64-1024-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; RV64-1024-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; RV64-1024-NEXT: vse16.v v16, (a0) ; RV64-1024-NEXT: ret ; ; RV64-2048-LABEL: interleave512: ; RV64-2048: # %bb.0: # %entry ; RV64-2048-NEXT: li a3, 256 -; RV64-2048-NEXT: vsetvli zero, a3, e16, m2, ta, mu +; RV64-2048-NEXT: vsetvli zero, a3, e16, m2, ta, ma ; RV64-2048-NEXT: vle16.v v8, (a1) ; RV64-2048-NEXT: vle16.v v10, (a2) ; RV64-2048-NEXT: li a1, 512 -; RV64-2048-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV64-2048-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; RV64-2048-NEXT: vwaddu.vv v12, v8, v10 ; RV64-2048-NEXT: li a2, -1 ; RV64-2048-NEXT: vwmaccu.vx v12, a2, v10 -; RV64-2048-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; RV64-2048-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; RV64-2048-NEXT: vse16.v v12, (a0) ; RV64-2048-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/legalize-load-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/legalize-load-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/legalize-load-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/legalize-load-sdnode.ll @@ -11,7 +11,7 @@ ; CHECK-NEXT: srli a1, a1, 3 ; CHECK-NEXT: slli a2, a1, 1 ; CHECK-NEXT: add a1, a2, a1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: ret %v = load , * %ptr @@ -25,7 +25,7 @@ ; CHECK-NEXT: srli a1, a1, 3 ; CHECK-NEXT: slli a2, a1, 2 ; CHECK-NEXT: add a1, a2, a1 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: ret %v = load , * %ptr @@ -38,7 +38,7 @@ ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: srli a3, a2, 3 ; CHECK-NEXT: sub a2, a2, a3 -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vse16.v v8, (a1) ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/legalize-scalable-vectortype.ll b/llvm/test/CodeGen/RISCV/rvv/legalize-scalable-vectortype.ll --- a/llvm/test/CodeGen/RISCV/rvv/legalize-scalable-vectortype.ll +++ b/llvm/test/CodeGen/RISCV/rvv/legalize-scalable-vectortype.ll @@ -5,9 +5,9 @@ define @trunc_nxv4i32_to_nxv4i5( %a) { ; CHECK-LABEL: trunc_nxv4i32_to_nxv4i5: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vnsrl.wi v10, v8, 0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-NEXT: ret %v = trunc %a to @@ -17,9 +17,9 @@ define @trunc_nxv1i32_to_nxv1i5( %a) { ; CHECK-LABEL: trunc_nxv1i32_to_nxv1i5: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %v = trunc %a to diff --git a/llvm/test/CodeGen/RISCV/rvv/legalize-store-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/legalize-store-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/legalize-store-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/legalize-store-sdnode.ll @@ -11,7 +11,7 @@ ; CHECK-NEXT: srli a1, a1, 3 ; CHECK-NEXT: slli a2, a1, 1 ; CHECK-NEXT: add a1, a2, a1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret store %val, * %ptr @@ -24,7 +24,7 @@ ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a2, a1, 3 ; CHECK-NEXT: sub a1, a1, a2 -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vse64.v v8, (a0) ; CHECK-NEXT: ret store %val, * %ptr diff --git a/llvm/test/CodeGen/RISCV/rvv/load-add-store-16.ll b/llvm/test/CodeGen/RISCV/rvv/load-add-store-16.ll --- a/llvm/test/CodeGen/RISCV/rvv/load-add-store-16.ll +++ b/llvm/test/CodeGen/RISCV/rvv/load-add-store-16.ll @@ -9,7 +9,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vl1re16.v v8, (a1) ; CHECK-NEXT: vl1re16.v v9, (a2) -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: vs1r.v v8, (a0) ; CHECK-NEXT: ret @@ -25,7 +25,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vl2re16.v v8, (a1) ; CHECK-NEXT: vl2re16.v v10, (a2) -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v10 ; CHECK-NEXT: vs2r.v v8, (a0) ; CHECK-NEXT: ret @@ -41,7 +41,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vl4re16.v v8, (a1) ; CHECK-NEXT: vl4re16.v v12, (a2) -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v12 ; CHECK-NEXT: vs4r.v v8, (a0) ; CHECK-NEXT: ret @@ -57,7 +57,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re16.v v8, (a1) ; CHECK-NEXT: vl8re16.v v16, (a2) -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v16 ; CHECK-NEXT: vs8r.v v8, (a0) ; CHECK-NEXT: ret @@ -71,7 +71,7 @@ define void @vadd_vint16mf2( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint16mf2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a3, zero, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v8, (a1) ; CHECK-NEXT: vle16.v v9, (a2) ; CHECK-NEXT: vadd.vv v8, v8, v9 @@ -87,7 +87,7 @@ define void @vadd_vint16mf4( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint16mf4: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a3, zero, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a1) ; CHECK-NEXT: vle16.v v9, (a2) ; CHECK-NEXT: vadd.vv v8, v8, v9 diff --git a/llvm/test/CodeGen/RISCV/rvv/load-add-store-32.ll b/llvm/test/CodeGen/RISCV/rvv/load-add-store-32.ll --- a/llvm/test/CodeGen/RISCV/rvv/load-add-store-32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/load-add-store-32.ll @@ -9,7 +9,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vl1re32.v v8, (a1) ; CHECK-NEXT: vl1re32.v v9, (a2) -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: vs1r.v v8, (a0) ; CHECK-NEXT: ret @@ -25,7 +25,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vl2re32.v v8, (a1) ; CHECK-NEXT: vl2re32.v v10, (a2) -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v10 ; CHECK-NEXT: vs2r.v v8, (a0) ; CHECK-NEXT: ret @@ -41,7 +41,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vl4re32.v v8, (a1) ; CHECK-NEXT: vl4re32.v v12, (a2) -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v12 ; CHECK-NEXT: vs4r.v v8, (a0) ; CHECK-NEXT: ret @@ -57,7 +57,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re32.v v8, (a1) ; CHECK-NEXT: vl8re32.v v16, (a2) -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v16 ; CHECK-NEXT: vs8r.v v8, (a0) ; CHECK-NEXT: ret @@ -71,7 +71,7 @@ define void @vadd_vint32mf2( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint32mf2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a3, zero, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: vle32.v v9, (a2) ; CHECK-NEXT: vadd.vv v8, v8, v9 diff --git a/llvm/test/CodeGen/RISCV/rvv/load-add-store-64.ll b/llvm/test/CodeGen/RISCV/rvv/load-add-store-64.ll --- a/llvm/test/CodeGen/RISCV/rvv/load-add-store-64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/load-add-store-64.ll @@ -9,7 +9,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vl1re64.v v8, (a1) ; CHECK-NEXT: vl1re64.v v9, (a2) -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: vs1r.v v8, (a0) ; CHECK-NEXT: ret @@ -25,7 +25,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vl2re64.v v8, (a1) ; CHECK-NEXT: vl2re64.v v10, (a2) -; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v10 ; CHECK-NEXT: vs2r.v v8, (a0) ; CHECK-NEXT: ret @@ -41,7 +41,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vl4re64.v v8, (a1) ; CHECK-NEXT: vl4re64.v v12, (a2) -; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v12 ; CHECK-NEXT: vs4r.v v8, (a0) ; CHECK-NEXT: ret @@ -57,7 +57,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re64.v v8, (a1) ; CHECK-NEXT: vl8re64.v v16, (a2) -; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v16 ; CHECK-NEXT: vs8r.v v8, (a0) ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/load-add-store-8.ll b/llvm/test/CodeGen/RISCV/rvv/load-add-store-8.ll --- a/llvm/test/CodeGen/RISCV/rvv/load-add-store-8.ll +++ b/llvm/test/CodeGen/RISCV/rvv/load-add-store-8.ll @@ -9,7 +9,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vl1r.v v8, (a1) ; CHECK-NEXT: vl1r.v v9, (a2) -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: vs1r.v v8, (a0) ; CHECK-NEXT: ret @@ -25,7 +25,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vl2r.v v8, (a1) ; CHECK-NEXT: vl2r.v v10, (a2) -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v10 ; CHECK-NEXT: vs2r.v v8, (a0) ; CHECK-NEXT: ret @@ -41,7 +41,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vl4r.v v8, (a1) ; CHECK-NEXT: vl4r.v v12, (a2) -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v12 ; CHECK-NEXT: vs4r.v v8, (a0) ; CHECK-NEXT: ret @@ -57,7 +57,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vl8r.v v8, (a1) ; CHECK-NEXT: vl8r.v v16, (a2) -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v16 ; CHECK-NEXT: vs8r.v v8, (a0) ; CHECK-NEXT: ret @@ -71,7 +71,7 @@ define void @vadd_vint8mf2( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint8mf2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v8, (a1) ; CHECK-NEXT: vle8.v v9, (a2) ; CHECK-NEXT: vadd.vv v8, v8, v9 @@ -87,7 +87,7 @@ define void @vadd_vint8mf4( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint8mf4: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v8, (a1) ; CHECK-NEXT: vle8.v v9, (a2) ; CHECK-NEXT: vadd.vv v8, v8, v9 @@ -103,7 +103,7 @@ define void @vadd_vint8mf8( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint8mf8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a3, zero, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v8, (a1) ; CHECK-NEXT: vle8.v v9, (a2) ; CHECK-NEXT: vadd.vv v8, v8, v9 diff --git a/llvm/test/CodeGen/RISCV/rvv/load-mask.ll b/llvm/test/CodeGen/RISCV/rvv/load-mask.ll --- a/llvm/test/CodeGen/RISCV/rvv/load-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/load-mask.ll @@ -7,7 +7,7 @@ define void @test_load_mask_64(* %pa, * %pb) { ; CHECK-LABEL: test_load_mask_64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma ; CHECK-NEXT: vlm.v v8, (a0) ; CHECK-NEXT: vsm.v v8, (a1) ; CHECK-NEXT: ret @@ -19,7 +19,7 @@ define void @test_load_mask_32(* %pa, * %pb) { ; CHECK-LABEL: test_load_mask_32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, m4, ta, ma ; CHECK-NEXT: vlm.v v8, (a0) ; CHECK-NEXT: vsm.v v8, (a1) ; CHECK-NEXT: ret @@ -31,7 +31,7 @@ define void @test_load_mask_16(* %pa, * %pb) { ; CHECK-LABEL: test_load_mask_16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, m2, ta, ma ; CHECK-NEXT: vlm.v v8, (a0) ; CHECK-NEXT: vsm.v v8, (a1) ; CHECK-NEXT: ret @@ -43,7 +43,7 @@ define void @test_load_mask_8(* %pa, * %pb) { ; CHECK-LABEL: test_load_mask_8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma ; CHECK-NEXT: vlm.v v8, (a0) ; CHECK-NEXT: vsm.v v8, (a1) ; CHECK-NEXT: ret @@ -55,7 +55,7 @@ define void @test_load_mask_4(* %pa, * %pb) { ; CHECK-LABEL: test_load_mask_4: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma ; CHECK-NEXT: vlm.v v8, (a0) ; CHECK-NEXT: vsm.v v8, (a1) ; CHECK-NEXT: ret @@ -67,7 +67,7 @@ define void @test_load_mask_2(* %pa, * %pb) { ; CHECK-LABEL: test_load_mask_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, mf4, ta, ma ; CHECK-NEXT: vlm.v v8, (a0) ; CHECK-NEXT: vsm.v v8, (a1) ; CHECK-NEXT: ret @@ -79,7 +79,7 @@ define void @test_load_mask_1(* %pa, * %pb) { ; CHECK-LABEL: test_load_mask_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma ; CHECK-NEXT: vlm.v v8, (a0) ; CHECK-NEXT: vsm.v v8, (a1) ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/localvar.ll b/llvm/test/CodeGen/RISCV/rvv/localvar.ll --- a/llvm/test/CodeGen/RISCV/rvv/localvar.ll +++ b/llvm/test/CodeGen/RISCV/rvv/localvar.ll @@ -13,7 +13,7 @@ ; RV64IV-NEXT: csrr a0, vlenb ; RV64IV-NEXT: add a0, sp, a0 ; RV64IV-NEXT: addi a0, a0, 16 -; RV64IV-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; RV64IV-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; RV64IV-NEXT: vle8.v v8, (a0) ; RV64IV-NEXT: addi a0, sp, 16 ; RV64IV-NEXT: vle8.v v8, (a0) diff --git a/llvm/test/CodeGen/RISCV/rvv/marith-vp.ll b/llvm/test/CodeGen/RISCV/rvv/marith-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/marith-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/marith-vp.ll @@ -9,7 +9,7 @@ define <1 x i1> @and_v1i1(<1 x i1> %b, <1 x i1> %c, <1 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: and_v1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmand.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <1 x i1> @llvm.vp.and.v1i1(<1 x i1> %b, <1 x i1> %c, <1 x i1> %a, i32 %evl) @@ -21,7 +21,7 @@ define <2 x i1> @and_v2i1(<2 x i1> %b, <2 x i1> %c, <2 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: and_v2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmand.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <2 x i1> @llvm.vp.and.v2i1(<2 x i1> %b, <2 x i1> %c, <2 x i1> %a, i32 %evl) @@ -33,7 +33,7 @@ define <4 x i1> @and_v4i1(<4 x i1> %b, <4 x i1> %c, <4 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: and_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmand.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <4 x i1> @llvm.vp.and.v4i1(<4 x i1> %b, <4 x i1> %c, <4 x i1> %a, i32 %evl) @@ -45,7 +45,7 @@ define <8 x i1> @and_v8i1(<8 x i1> %b, <8 x i1> %c, <8 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: and_v8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmand.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <8 x i1> @llvm.vp.and.v8i1(<8 x i1> %b, <8 x i1> %c, <8 x i1> %a, i32 %evl) @@ -57,7 +57,7 @@ define <16 x i1> @and_v16i1(<16 x i1> %b, <16 x i1> %c, <16 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: and_v16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmand.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <16 x i1> @llvm.vp.and.v16i1(<16 x i1> %b, <16 x i1> %c, <16 x i1> %a, i32 %evl) @@ -69,7 +69,7 @@ define @and_nxv1i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: and_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmand.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv1i1( %b, %c, %a, i32 %evl) @@ -81,7 +81,7 @@ define @and_nxv2i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: and_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmand.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv2i1( %b, %c, %a, i32 %evl) @@ -93,7 +93,7 @@ define @and_nxv4i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: and_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmand.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv4i1( %b, %c, %a, i32 %evl) @@ -105,7 +105,7 @@ define @and_nxv8i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: and_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmand.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv8i1( %b, %c, %a, i32 %evl) @@ -117,7 +117,7 @@ define @and_nxv16i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: and_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmand.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv16i1( %b, %c, %a, i32 %evl) @@ -129,7 +129,7 @@ define @and_nxv32i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: and_nxv32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmand.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv32i1( %b, %c, %a, i32 %evl) @@ -141,7 +141,7 @@ define @and_nxv64i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: and_nxv64i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmand.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.and.nxv64i1( %b, %c, %a, i32 %evl) @@ -153,7 +153,7 @@ define <1 x i1> @or_v1i1(<1 x i1> %b, <1 x i1> %c, <1 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: or_v1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <1 x i1> @llvm.vp.or.v1i1(<1 x i1> %b, <1 x i1> %c, <1 x i1> %a, i32 %evl) @@ -165,7 +165,7 @@ define <2 x i1> @or_v2i1(<2 x i1> %b, <2 x i1> %c, <2 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: or_v2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <2 x i1> @llvm.vp.or.v2i1(<2 x i1> %b, <2 x i1> %c, <2 x i1> %a, i32 %evl) @@ -177,7 +177,7 @@ define <4 x i1> @or_v4i1(<4 x i1> %b, <4 x i1> %c, <4 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: or_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <4 x i1> @llvm.vp.or.v4i1(<4 x i1> %b, <4 x i1> %c, <4 x i1> %a, i32 %evl) @@ -189,7 +189,7 @@ define <8 x i1> @or_v8i1(<8 x i1> %b, <8 x i1> %c, <8 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: or_v8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <8 x i1> @llvm.vp.or.v8i1(<8 x i1> %b, <8 x i1> %c, <8 x i1> %a, i32 %evl) @@ -201,7 +201,7 @@ define <16 x i1> @or_v16i1(<16 x i1> %b, <16 x i1> %c, <16 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: or_v16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <16 x i1> @llvm.vp.or.v16i1(<16 x i1> %b, <16 x i1> %c, <16 x i1> %a, i32 %evl) @@ -213,7 +213,7 @@ define @or_nxv1i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: or_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv1i1( %b, %c, %a, i32 %evl) @@ -225,7 +225,7 @@ define @or_nxv2i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: or_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv2i1( %b, %c, %a, i32 %evl) @@ -237,7 +237,7 @@ define @or_nxv4i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: or_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv4i1( %b, %c, %a, i32 %evl) @@ -249,7 +249,7 @@ define @or_nxv8i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: or_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv8i1( %b, %c, %a, i32 %evl) @@ -261,7 +261,7 @@ define @or_nxv16i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: or_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv16i1( %b, %c, %a, i32 %evl) @@ -273,7 +273,7 @@ define @or_nxv32i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: or_nxv32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv32i1( %b, %c, %a, i32 %evl) @@ -285,7 +285,7 @@ define @or_nxv64i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: or_nxv64i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.or.nxv64i1( %b, %c, %a, i32 %evl) @@ -297,7 +297,7 @@ define <1 x i1> @xor_v1i1(<1 x i1> %b, <1 x i1> %c, <1 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_v1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <1 x i1> @llvm.vp.xor.v1i1(<1 x i1> %b, <1 x i1> %c, <1 x i1> %a, i32 %evl) @@ -309,7 +309,7 @@ define <2 x i1> @xor_v2i1(<2 x i1> %b, <2 x i1> %c, <2 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_v2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <2 x i1> @llvm.vp.xor.v2i1(<2 x i1> %b, <2 x i1> %c, <2 x i1> %a, i32 %evl) @@ -321,7 +321,7 @@ define <4 x i1> @xor_v4i1(<4 x i1> %b, <4 x i1> %c, <4 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_v4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <4 x i1> @llvm.vp.xor.v4i1(<4 x i1> %b, <4 x i1> %c, <4 x i1> %a, i32 %evl) @@ -333,7 +333,7 @@ define <8 x i1> @xor_v8i1(<8 x i1> %b, <8 x i1> %c, <8 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_v8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <8 x i1> @llvm.vp.xor.v8i1(<8 x i1> %b, <8 x i1> %c, <8 x i1> %a, i32 %evl) @@ -345,7 +345,7 @@ define <16 x i1> @xor_v16i1(<16 x i1> %b, <16 x i1> %c, <16 x i1> %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_v16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call <16 x i1> @llvm.vp.xor.v16i1(<16 x i1> %b, <16 x i1> %c, <16 x i1> %a, i32 %evl) @@ -357,7 +357,7 @@ define @xor_nxv1i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv1i1( %b, %c, %a, i32 %evl) @@ -369,7 +369,7 @@ define @xor_nxv2i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv2i1( %b, %c, %a, i32 %evl) @@ -381,7 +381,7 @@ define @xor_nxv4i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv4i1( %b, %c, %a, i32 %evl) @@ -393,7 +393,7 @@ define @xor_nxv8i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv8i1( %b, %c, %a, i32 %evl) @@ -405,7 +405,7 @@ define @xor_nxv16i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv16i1( %b, %c, %a, i32 %evl) @@ -417,7 +417,7 @@ define @xor_nxv32i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_nxv32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv32i1( %b, %c, %a, i32 %evl) @@ -429,7 +429,7 @@ define @xor_nxv64i1( %b, %c, %a, i32 zeroext %evl) { ; CHECK-LABEL: xor_nxv64i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.xor.nxv64i1( %b, %c, %a, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/mask-exts-truncs-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/mask-exts-truncs-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/mask-exts-truncs-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/mask-exts-truncs-rv32.ll @@ -4,7 +4,7 @@ define @sext_nxv1i1_nxv1i8( %v) { ; CHECK-LABEL: sext_nxv1i1_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -15,7 +15,7 @@ define @zext_nxv1i1_nxv1i8( %v) { ; CHECK-LABEL: zext_nxv1i1_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -26,7 +26,7 @@ define @trunc_nxv1i8_nxv1i1( %v) { ; CHECK-LABEL: trunc_nxv1i8_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -37,7 +37,7 @@ define @sext_nxv2i1_nxv2i8( %v) { ; CHECK-LABEL: sext_nxv2i1_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -48,7 +48,7 @@ define @zext_nxv2i1_nxv2i8( %v) { ; CHECK-LABEL: zext_nxv2i1_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -59,7 +59,7 @@ define @trunc_nxv2i8_nxv2i1( %v) { ; CHECK-LABEL: trunc_nxv2i8_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -70,7 +70,7 @@ define @sext_nxv4i1_nxv4i8( %v) { ; CHECK-LABEL: sext_nxv4i1_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -81,7 +81,7 @@ define @zext_nxv4i1_nxv4i8( %v) { ; CHECK-LABEL: zext_nxv4i1_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -92,7 +92,7 @@ define @trunc_nxv4i8_nxv4i1( %v) { ; CHECK-LABEL: trunc_nxv4i8_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -103,7 +103,7 @@ define @sext_nxv8i1_nxv8i8( %v) { ; CHECK-LABEL: sext_nxv8i1_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -114,7 +114,7 @@ define @zext_nxv8i1_nxv8i8( %v) { ; CHECK-LABEL: zext_nxv8i1_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -125,7 +125,7 @@ define @trunc_nxv8i8_nxv8i1( %v) { ; CHECK-LABEL: trunc_nxv8i8_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -136,7 +136,7 @@ define @sext_nxv16i1_nxv16i8( %v) { ; CHECK-LABEL: sext_nxv16i1_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -147,7 +147,7 @@ define @zext_nxv16i1_nxv16i8( %v) { ; CHECK-LABEL: zext_nxv16i1_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -158,7 +158,7 @@ define @trunc_nxv16i8_nxv16i1( %v) { ; CHECK-LABEL: trunc_nxv16i8_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -169,7 +169,7 @@ define @sext_nxv32i1_nxv32i8( %v) { ; CHECK-LABEL: sext_nxv32i1_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -180,7 +180,7 @@ define @zext_nxv32i1_nxv32i8( %v) { ; CHECK-LABEL: zext_nxv32i1_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -191,7 +191,7 @@ define @trunc_nxv32i8_nxv32i1( %v) { ; CHECK-LABEL: trunc_nxv32i8_nxv32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -202,7 +202,7 @@ define @sext_nxv64i1_nxv64i8( %v) { ; CHECK-LABEL: sext_nxv64i1_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -213,7 +213,7 @@ define @zext_nxv64i1_nxv64i8( %v) { ; CHECK-LABEL: zext_nxv64i1_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -224,7 +224,7 @@ define @trunc_nxv64i8_nxv64i1( %v) { ; CHECK-LABEL: trunc_nxv64i8_nxv64i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -235,7 +235,7 @@ define @sext_nxv1i1_nxv1i16( %v) { ; CHECK-LABEL: sext_nxv1i1_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -246,7 +246,7 @@ define @zext_nxv1i1_nxv1i16( %v) { ; CHECK-LABEL: zext_nxv1i1_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -257,7 +257,7 @@ define @trunc_nxv1i16_nxv1i1( %v) { ; CHECK-LABEL: trunc_nxv1i16_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -268,7 +268,7 @@ define @sext_nxv2i1_nxv2i16( %v) { ; CHECK-LABEL: sext_nxv2i1_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -279,7 +279,7 @@ define @zext_nxv2i1_nxv2i16( %v) { ; CHECK-LABEL: zext_nxv2i1_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -290,7 +290,7 @@ define @trunc_nxv2i16_nxv2i1( %v) { ; CHECK-LABEL: trunc_nxv2i16_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -301,7 +301,7 @@ define @sext_nxv4i1_nxv4i16( %v) { ; CHECK-LABEL: sext_nxv4i1_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -312,7 +312,7 @@ define @zext_nxv4i1_nxv4i16( %v) { ; CHECK-LABEL: zext_nxv4i1_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -323,7 +323,7 @@ define @trunc_nxv4i16_nxv4i1( %v) { ; CHECK-LABEL: trunc_nxv4i16_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -334,7 +334,7 @@ define @sext_nxv8i1_nxv8i16( %v) { ; CHECK-LABEL: sext_nxv8i1_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -345,7 +345,7 @@ define @zext_nxv8i1_nxv8i16( %v) { ; CHECK-LABEL: zext_nxv8i1_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -356,7 +356,7 @@ define @trunc_nxv8i16_nxv8i1( %v) { ; CHECK-LABEL: trunc_nxv8i16_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -367,7 +367,7 @@ define @sext_nxv16i1_nxv16i16( %v) { ; CHECK-LABEL: sext_nxv16i1_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -378,7 +378,7 @@ define @zext_nxv16i1_nxv16i16( %v) { ; CHECK-LABEL: zext_nxv16i1_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -389,7 +389,7 @@ define @trunc_nxv16i16_nxv16i1( %v) { ; CHECK-LABEL: trunc_nxv16i16_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -400,7 +400,7 @@ define @sext_nxv32i1_nxv32i16( %v) { ; CHECK-LABEL: sext_nxv32i1_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -411,7 +411,7 @@ define @zext_nxv32i1_nxv32i16( %v) { ; CHECK-LABEL: zext_nxv32i1_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -422,7 +422,7 @@ define @trunc_nxv32i16_nxv32i1( %v) { ; CHECK-LABEL: trunc_nxv32i16_nxv32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -433,7 +433,7 @@ define @sext_nxv1i1_nxv1i32( %v) { ; CHECK-LABEL: sext_nxv1i1_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -444,7 +444,7 @@ define @zext_nxv1i1_nxv1i32( %v) { ; CHECK-LABEL: zext_nxv1i1_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -455,7 +455,7 @@ define @trunc_nxv1i32_nxv1i1( %v) { ; CHECK-LABEL: trunc_nxv1i32_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -466,7 +466,7 @@ define @sext_nxv2i1_nxv2i32( %v) { ; CHECK-LABEL: sext_nxv2i1_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -477,7 +477,7 @@ define @zext_nxv2i1_nxv2i32( %v) { ; CHECK-LABEL: zext_nxv2i1_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -488,7 +488,7 @@ define @trunc_nxv2i32_nxv2i1( %v) { ; CHECK-LABEL: trunc_nxv2i32_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -499,7 +499,7 @@ define @sext_nxv4i1_nxv4i32( %v) { ; CHECK-LABEL: sext_nxv4i1_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -510,7 +510,7 @@ define @zext_nxv4i1_nxv4i32( %v) { ; CHECK-LABEL: zext_nxv4i1_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -521,7 +521,7 @@ define @trunc_nxv4i32_nxv4i1( %v) { ; CHECK-LABEL: trunc_nxv4i32_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -532,7 +532,7 @@ define @sext_nxv8i1_nxv8i32( %v) { ; CHECK-LABEL: sext_nxv8i1_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -543,7 +543,7 @@ define @zext_nxv8i1_nxv8i32( %v) { ; CHECK-LABEL: zext_nxv8i1_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -554,7 +554,7 @@ define @trunc_nxv8i32_nxv8i1( %v) { ; CHECK-LABEL: trunc_nxv8i32_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -565,7 +565,7 @@ define @sext_nxv16i1_nxv16i32( %v) { ; CHECK-LABEL: sext_nxv16i1_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -576,7 +576,7 @@ define @zext_nxv16i1_nxv16i32( %v) { ; CHECK-LABEL: zext_nxv16i1_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -587,7 +587,7 @@ define @trunc_nxv16i32_nxv16i1( %v) { ; CHECK-LABEL: trunc_nxv16i32_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -598,7 +598,7 @@ define @sext_nxv1i1_nxv1i64( %v) { ; CHECK-LABEL: sext_nxv1i1_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -609,7 +609,7 @@ define @zext_nxv1i1_nxv1i64( %v) { ; CHECK-LABEL: zext_nxv1i1_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -620,7 +620,7 @@ define @trunc_nxv1i64_nxv1i1( %v) { ; CHECK-LABEL: trunc_nxv1i64_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -631,7 +631,7 @@ define @sext_nxv2i1_nxv2i64( %v) { ; CHECK-LABEL: sext_nxv2i1_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -642,7 +642,7 @@ define @zext_nxv2i1_nxv2i64( %v) { ; CHECK-LABEL: zext_nxv2i1_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -653,7 +653,7 @@ define @trunc_nxv2i64_nxv2i1( %v) { ; CHECK-LABEL: trunc_nxv2i64_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -664,7 +664,7 @@ define @sext_nxv4i1_nxv4i64( %v) { ; CHECK-LABEL: sext_nxv4i1_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -675,7 +675,7 @@ define @zext_nxv4i1_nxv4i64( %v) { ; CHECK-LABEL: zext_nxv4i1_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -686,7 +686,7 @@ define @trunc_nxv4i64_nxv4i1( %v) { ; CHECK-LABEL: trunc_nxv4i64_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -697,7 +697,7 @@ define @sext_nxv8i1_nxv8i64( %v) { ; CHECK-LABEL: sext_nxv8i1_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -708,7 +708,7 @@ define @zext_nxv8i1_nxv8i64( %v) { ; CHECK-LABEL: zext_nxv8i1_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -719,7 +719,7 @@ define @trunc_nxv8i64_nxv8i1( %v) { ; CHECK-LABEL: trunc_nxv8i64_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/mask-exts-truncs-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/mask-exts-truncs-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/mask-exts-truncs-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/mask-exts-truncs-rv64.ll @@ -4,7 +4,7 @@ define @sext_nxv1i1_nxv1i8( %v) { ; CHECK-LABEL: sext_nxv1i1_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -15,7 +15,7 @@ define @zext_nxv1i1_nxv1i8( %v) { ; CHECK-LABEL: zext_nxv1i1_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -26,7 +26,7 @@ define @trunc_nxv1i8_nxv1i1( %v) { ; CHECK-LABEL: trunc_nxv1i8_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -37,7 +37,7 @@ define @sext_nxv2i1_nxv2i8( %v) { ; CHECK-LABEL: sext_nxv2i1_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -48,7 +48,7 @@ define @zext_nxv2i1_nxv2i8( %v) { ; CHECK-LABEL: zext_nxv2i1_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -59,7 +59,7 @@ define @trunc_nxv2i8_nxv2i1( %v) { ; CHECK-LABEL: trunc_nxv2i8_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -70,7 +70,7 @@ define @sext_nxv4i1_nxv4i8( %v) { ; CHECK-LABEL: sext_nxv4i1_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -81,7 +81,7 @@ define @zext_nxv4i1_nxv4i8( %v) { ; CHECK-LABEL: zext_nxv4i1_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -92,7 +92,7 @@ define @trunc_nxv4i8_nxv4i1( %v) { ; CHECK-LABEL: trunc_nxv4i8_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -103,7 +103,7 @@ define @sext_nxv8i1_nxv8i8( %v) { ; CHECK-LABEL: sext_nxv8i1_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -114,7 +114,7 @@ define @zext_nxv8i1_nxv8i8( %v) { ; CHECK-LABEL: zext_nxv8i1_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -125,7 +125,7 @@ define @trunc_nxv8i8_nxv8i1( %v) { ; CHECK-LABEL: trunc_nxv8i8_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -136,7 +136,7 @@ define @sext_nxv16i1_nxv16i8( %v) { ; CHECK-LABEL: sext_nxv16i1_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -147,7 +147,7 @@ define @zext_nxv16i1_nxv16i8( %v) { ; CHECK-LABEL: zext_nxv16i1_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -158,7 +158,7 @@ define @trunc_nxv16i8_nxv16i1( %v) { ; CHECK-LABEL: trunc_nxv16i8_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -169,7 +169,7 @@ define @sext_nxv32i1_nxv32i8( %v) { ; CHECK-LABEL: sext_nxv32i1_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -180,7 +180,7 @@ define @zext_nxv32i1_nxv32i8( %v) { ; CHECK-LABEL: zext_nxv32i1_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -191,7 +191,7 @@ define @trunc_nxv32i8_nxv32i1( %v) { ; CHECK-LABEL: trunc_nxv32i8_nxv32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -202,7 +202,7 @@ define @sext_nxv64i1_nxv64i8( %v) { ; CHECK-LABEL: sext_nxv64i1_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -213,7 +213,7 @@ define @zext_nxv64i1_nxv64i8( %v) { ; CHECK-LABEL: zext_nxv64i1_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -224,7 +224,7 @@ define @trunc_nxv64i8_nxv64i1( %v) { ; CHECK-LABEL: trunc_nxv64i8_nxv64i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -235,7 +235,7 @@ define @sext_nxv1i1_nxv1i16( %v) { ; CHECK-LABEL: sext_nxv1i1_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -246,7 +246,7 @@ define @zext_nxv1i1_nxv1i16( %v) { ; CHECK-LABEL: zext_nxv1i1_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -257,7 +257,7 @@ define @trunc_nxv1i16_nxv1i1( %v) { ; CHECK-LABEL: trunc_nxv1i16_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -268,7 +268,7 @@ define @sext_nxv2i1_nxv2i16( %v) { ; CHECK-LABEL: sext_nxv2i1_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -279,7 +279,7 @@ define @zext_nxv2i1_nxv2i16( %v) { ; CHECK-LABEL: zext_nxv2i1_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -290,7 +290,7 @@ define @trunc_nxv2i16_nxv2i1( %v) { ; CHECK-LABEL: trunc_nxv2i16_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -301,7 +301,7 @@ define @sext_nxv4i1_nxv4i16( %v) { ; CHECK-LABEL: sext_nxv4i1_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -312,7 +312,7 @@ define @zext_nxv4i1_nxv4i16( %v) { ; CHECK-LABEL: zext_nxv4i1_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -323,7 +323,7 @@ define @trunc_nxv4i16_nxv4i1( %v) { ; CHECK-LABEL: trunc_nxv4i16_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -334,7 +334,7 @@ define @sext_nxv8i1_nxv8i16( %v) { ; CHECK-LABEL: sext_nxv8i1_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -345,7 +345,7 @@ define @zext_nxv8i1_nxv8i16( %v) { ; CHECK-LABEL: zext_nxv8i1_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -356,7 +356,7 @@ define @trunc_nxv8i16_nxv8i1( %v) { ; CHECK-LABEL: trunc_nxv8i16_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -367,7 +367,7 @@ define @sext_nxv16i1_nxv16i16( %v) { ; CHECK-LABEL: sext_nxv16i1_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -378,7 +378,7 @@ define @zext_nxv16i1_nxv16i16( %v) { ; CHECK-LABEL: zext_nxv16i1_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -389,7 +389,7 @@ define @trunc_nxv16i16_nxv16i1( %v) { ; CHECK-LABEL: trunc_nxv16i16_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -400,7 +400,7 @@ define @sext_nxv32i1_nxv32i16( %v) { ; CHECK-LABEL: sext_nxv32i1_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -411,7 +411,7 @@ define @zext_nxv32i1_nxv32i16( %v) { ; CHECK-LABEL: zext_nxv32i1_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -422,7 +422,7 @@ define @trunc_nxv32i16_nxv32i1( %v) { ; CHECK-LABEL: trunc_nxv32i16_nxv32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -433,7 +433,7 @@ define @sext_nxv1i1_nxv1i32( %v) { ; CHECK-LABEL: sext_nxv1i1_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -444,7 +444,7 @@ define @zext_nxv1i1_nxv1i32( %v) { ; CHECK-LABEL: zext_nxv1i1_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -455,7 +455,7 @@ define @trunc_nxv1i32_nxv1i1( %v) { ; CHECK-LABEL: trunc_nxv1i32_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -466,7 +466,7 @@ define @sext_nxv2i1_nxv2i32( %v) { ; CHECK-LABEL: sext_nxv2i1_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -477,7 +477,7 @@ define @zext_nxv2i1_nxv2i32( %v) { ; CHECK-LABEL: zext_nxv2i1_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -488,7 +488,7 @@ define @trunc_nxv2i32_nxv2i1( %v) { ; CHECK-LABEL: trunc_nxv2i32_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -499,7 +499,7 @@ define @sext_nxv4i1_nxv4i32( %v) { ; CHECK-LABEL: sext_nxv4i1_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -510,7 +510,7 @@ define @zext_nxv4i1_nxv4i32( %v) { ; CHECK-LABEL: zext_nxv4i1_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -521,7 +521,7 @@ define @trunc_nxv4i32_nxv4i1( %v) { ; CHECK-LABEL: trunc_nxv4i32_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -532,7 +532,7 @@ define @sext_nxv8i1_nxv8i32( %v) { ; CHECK-LABEL: sext_nxv8i1_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -543,7 +543,7 @@ define @zext_nxv8i1_nxv8i32( %v) { ; CHECK-LABEL: zext_nxv8i1_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -554,7 +554,7 @@ define @trunc_nxv8i32_nxv8i1( %v) { ; CHECK-LABEL: trunc_nxv8i32_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -565,7 +565,7 @@ define @sext_nxv16i1_nxv16i32( %v) { ; CHECK-LABEL: sext_nxv16i1_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -576,7 +576,7 @@ define @zext_nxv16i1_nxv16i32( %v) { ; CHECK-LABEL: zext_nxv16i1_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -587,7 +587,7 @@ define @trunc_nxv16i32_nxv16i1( %v) { ; CHECK-LABEL: trunc_nxv16i32_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -598,7 +598,7 @@ define @sext_nxv1i1_nxv1i64( %v) { ; CHECK-LABEL: sext_nxv1i1_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -609,7 +609,7 @@ define @zext_nxv1i1_nxv1i64( %v) { ; CHECK-LABEL: zext_nxv1i1_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -620,7 +620,7 @@ define @trunc_nxv1i64_nxv1i1( %v) { ; CHECK-LABEL: trunc_nxv1i64_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -631,7 +631,7 @@ define @sext_nxv2i1_nxv2i64( %v) { ; CHECK-LABEL: sext_nxv2i1_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -642,7 +642,7 @@ define @zext_nxv2i1_nxv2i64( %v) { ; CHECK-LABEL: zext_nxv2i1_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -653,7 +653,7 @@ define @trunc_nxv2i64_nxv2i1( %v) { ; CHECK-LABEL: trunc_nxv2i64_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -664,7 +664,7 @@ define @sext_nxv4i1_nxv4i64( %v) { ; CHECK-LABEL: sext_nxv4i1_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -675,7 +675,7 @@ define @zext_nxv4i1_nxv4i64( %v) { ; CHECK-LABEL: zext_nxv4i1_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -686,7 +686,7 @@ define @trunc_nxv4i64_nxv4i1( %v) { ; CHECK-LABEL: trunc_nxv4i64_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -697,7 +697,7 @@ define @sext_nxv8i1_nxv8i64( %v) { ; CHECK-LABEL: sext_nxv8i1_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -708,7 +708,7 @@ define @zext_nxv8i1_nxv8i64( %v) { ; CHECK-LABEL: zext_nxv8i1_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -719,7 +719,7 @@ define @trunc_nxv8i64_nxv8i1( %v) { ; CHECK-LABEL: trunc_nxv8i64_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/mask-reg-alloc.mir b/llvm/test/CodeGen/RISCV/rvv/mask-reg-alloc.mir --- a/llvm/test/CodeGen/RISCV/rvv/mask-reg-alloc.mir +++ b/llvm/test/CodeGen/RISCV/rvv/mask-reg-alloc.mir @@ -16,7 +16,7 @@ ; CHECK-LABEL: name: mask_reg_alloc ; CHECK: liveins: $v0, $v1, $v2, $v3 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 1, 64 /* e8, m1, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 1, 192 /* e8, m1, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: renamable $v8 = PseudoVMERGE_VIM_M1 killed renamable $v2, 1, killed renamable $v0, 1, 3 /* e8 */, implicit $vl, implicit $vtype ; CHECK-NEXT: renamable $v0 = COPY killed renamable $v1 ; CHECK-NEXT: renamable $v9 = PseudoVMERGE_VIM_M1 killed renamable $v3, 1, killed renamable $v0, 1, 3 /* e8 */, implicit $vl, implicit $vtype diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-load-int.ll b/llvm/test/CodeGen/RISCV/rvv/masked-load-int.ll --- a/llvm/test/CodeGen/RISCV/rvv/masked-load-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-load-int.ll @@ -255,7 +255,7 @@ define @masked_load_allones_mask(* %a, %maskedoff) nounwind { ; CHECK-LABEL: masked_load_allones_mask: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: ret %insert = insertelement poison, i1 1, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-store-fp.ll b/llvm/test/CodeGen/RISCV/rvv/masked-store-fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/masked-store-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-store-fp.ll @@ -5,7 +5,7 @@ define void @masked_store_nxv1f16( %val, * %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vse16.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.masked.store.nxv1f16.p0nxv1f16( %val, * %a, i32 2, %mask) @@ -16,7 +16,7 @@ define void @masked_store_nxv1f32( %val, * %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vse32.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.masked.store.nxv1f32.p0nxv1f32( %val, * %a, i32 4, %mask) @@ -27,7 +27,7 @@ define void @masked_store_nxv1f64( %val, * %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vse64.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.masked.store.nxv1f64.p0nxv1f64( %val, * %a, i32 8, %mask) @@ -38,7 +38,7 @@ define void @masked_store_nxv2f16( %val, * %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vse16.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.masked.store.nxv2f16.p0nxv2f16( %val, * %a, i32 2, %mask) @@ -49,7 +49,7 @@ define void @masked_store_nxv2f32( %val, * %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.masked.store.nxv2f32.p0nxv2f32( %val, * %a, i32 4, %mask) @@ -60,7 +60,7 @@ define void @masked_store_nxv2f64( %val, * %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; CHECK-NEXT: vse64.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.masked.store.nxv2f64.p0nxv2f64( %val, * %a, i32 8, %mask) @@ -71,7 +71,7 @@ define void @masked_store_nxv4f16( %val, * %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vse16.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.masked.store.nxv4f16.p0nxv4f16( %val, * %a, i32 2, %mask) @@ -82,7 +82,7 @@ define void @masked_store_nxv4f32( %val, * %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vse32.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.masked.store.nxv4f32.p0nxv4f32( %val, * %a, i32 4, %mask) @@ -93,7 +93,7 @@ define void @masked_store_nxv4f64( %val, * %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; CHECK-NEXT: vse64.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.masked.store.nxv4f64.p0nxv4f64( %val, * %a, i32 8, %mask) @@ -104,7 +104,7 @@ define void @masked_store_nxv8f16( %val, * %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vse16.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.masked.store.nxv8f16.p0nxv8f16( %val, * %a, i32 2, %mask) @@ -115,7 +115,7 @@ define void @masked_store_nxv8f32( %val, * %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vse32.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.masked.store.nxv8f32.p0nxv8f32( %val, * %a, i32 4, %mask) @@ -126,7 +126,7 @@ define void @masked_store_nxv8f64( %val, * %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; CHECK-NEXT: vse64.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.masked.store.nxv8f64.p0nxv8f64( %val, * %a, i32 8, %mask) @@ -137,7 +137,7 @@ define void @masked_store_nxv16f16( %val, * %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vse16.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.masked.store.nxv16f16.p0nxv16f16( %val, * %a, i32 2, %mask) @@ -148,7 +148,7 @@ define void @masked_store_nxv16f32( %val, * %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; CHECK-NEXT: vse32.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.masked.store.nxv16f32.p0nxv16f32( %val, * %a, i32 4, %mask) @@ -159,7 +159,7 @@ define void @masked_store_nxv32f16( %val, * %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; CHECK-NEXT: vse16.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.masked.store.nxv32f16.p0nxv32f16( %val, * %a, i32 2, %mask) diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-store-int.ll b/llvm/test/CodeGen/RISCV/rvv/masked-store-int.ll --- a/llvm/test/CodeGen/RISCV/rvv/masked-store-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-store-int.ll @@ -5,7 +5,7 @@ define void @masked_store_nxv1i8( %val, * %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vse8.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.masked.store.v1i8.p0v1i8( %val, * %a, i32 1, %mask) @@ -16,7 +16,7 @@ define void @masked_store_nxv1i16( %val, * %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vse16.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.masked.store.v1i16.p0v1i16( %val, * %a, i32 2, %mask) @@ -27,7 +27,7 @@ define void @masked_store_nxv1i32( %val, * %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vse32.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.masked.store.v1i32.p0v1i32( %val, * %a, i32 4, %mask) @@ -38,7 +38,7 @@ define void @masked_store_nxv1i64( %val, * %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vse64.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.masked.store.v1i64.p0v1i64( %val, * %a, i32 8, %mask) @@ -49,7 +49,7 @@ define void @masked_store_nxv2i8( %val, * %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vse8.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.masked.store.v2i8.p0v2i8( %val, * %a, i32 1, %mask) @@ -60,7 +60,7 @@ define void @masked_store_nxv2i16( %val, * %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vse16.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.masked.store.v2i16.p0v2i16( %val, * %a, i32 2, %mask) @@ -71,7 +71,7 @@ define void @masked_store_nxv2i32( %val, * %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.masked.store.v2i32.p0v2i32( %val, * %a, i32 4, %mask) @@ -82,7 +82,7 @@ define void @masked_store_nxv2i64( %val, * %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; CHECK-NEXT: vse64.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.masked.store.v2i64.p0v2i64( %val, * %a, i32 8, %mask) @@ -93,7 +93,7 @@ define void @masked_store_nxv4i8( %val, * %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vse8.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.masked.store.v4i8.p0v4i8( %val, * %a, i32 1, %mask) @@ -104,7 +104,7 @@ define void @masked_store_nxv4i16( %val, * %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vse16.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.masked.store.v4i16.p0v4i16( %val, * %a, i32 2, %mask) @@ -115,7 +115,7 @@ define void @masked_store_nxv4i32( %val, * %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vse32.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.masked.store.v4i32.p0v4i32( %val, * %a, i32 4, %mask) @@ -126,7 +126,7 @@ define void @masked_store_nxv4i64( %val, * %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; CHECK-NEXT: vse64.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.masked.store.v4i64.p0v4i64( %val, * %a, i32 8, %mask) @@ -137,7 +137,7 @@ define void @masked_store_nxv8i8( %val, * %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vse8.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.masked.store.v8i8.p0v8i8( %val, * %a, i32 1, %mask) @@ -148,7 +148,7 @@ define void @masked_store_nxv8i16( %val, * %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vse16.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.masked.store.v8i16.p0v8i16( %val, * %a, i32 2, %mask) @@ -159,7 +159,7 @@ define void @masked_store_nxv8i32( %val, * %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vse32.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.masked.store.v8i32.p0v8i32( %val, * %a, i32 4, %mask) @@ -170,7 +170,7 @@ define void @masked_store_nxv8i64( %val, * %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; CHECK-NEXT: vse64.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.masked.store.v8i64.p0v8i64( %val, * %a, i32 8, %mask) @@ -181,7 +181,7 @@ define void @masked_store_nxv16i8( %val, * %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vse8.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.masked.store.v16i8.p0v16i8( %val, * %a, i32 1, %mask) @@ -192,7 +192,7 @@ define void @masked_store_nxv16i16( %val, * %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vse16.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.masked.store.v16i16.p0v16i16( %val, * %a, i32 2, %mask) @@ -203,7 +203,7 @@ define void @masked_store_nxv16i32( %val, * %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; CHECK-NEXT: vse32.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.masked.store.v16i32.p0v16i32( %val, * %a, i32 4, %mask) @@ -214,7 +214,7 @@ define void @masked_store_nxv32i8( %val, * %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vse8.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.masked.store.v32i8.p0v32i8( %val, * %a, i32 1, %mask) @@ -225,7 +225,7 @@ define void @masked_store_nxv32i16( %val, * %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; CHECK-NEXT: vse16.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.masked.store.v32i16.p0v32i16( %val, * %a, i32 2, %mask) @@ -236,7 +236,7 @@ define void @masked_store_nxv64i8( %val, * %a, %mask) nounwind { ; CHECK-LABEL: masked_store_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vse8.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.masked.store.v64i8.p0v64i8( %val, * %a, i32 4, %mask) @@ -255,7 +255,7 @@ define void @masked_store_allones_mask( %val, * %a) nounwind { ; CHECK-LABEL: masked_store_allones_mask: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %insert = insertelement poison, i1 1, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-vslide1down-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/masked-vslide1down-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/masked-vslide1down-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-vslide1down-rv32.ll @@ -15,10 +15,10 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, a2, e64, m1, ta, mu ; CHECK-NEXT: slli a3, a3, 1 -; CHECK-NEXT: vsetvli zero, a3, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e32, m1, ta, ma ; CHECK-NEXT: vslide1down.vx v9, v9, a0 ; CHECK-NEXT: vslide1down.vx v9, v9, a1 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -37,10 +37,10 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, a2, e64, m1, ta, mu ; CHECK-NEXT: slli a3, a3, 1 -; CHECK-NEXT: vsetvli zero, a3, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e32, m1, ta, ma ; CHECK-NEXT: vslide1down.vx v9, v9, a0 ; CHECK-NEXT: vslide1down.vx v9, v9, a1 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -61,10 +61,10 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, a2, e64, m1, ta, mu ; CHECK-NEXT: slli a3, a3, 1 -; CHECK-NEXT: vsetvli zero, a3, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e32, m1, ta, ma ; CHECK-NEXT: vslide1down.vx v9, v9, a0 ; CHECK-NEXT: vslide1down.vx v9, v9, a1 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, tu, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -84,7 +84,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, a2, e64, m1, ta, mu ; CHECK-NEXT: slli a2, a2, 1 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: vslide1down.vx v8, v8, a1 ; CHECK-NEXT: ret @@ -104,7 +104,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, a2, e64, m1, ta, mu ; CHECK-NEXT: slli a2, a2, 1 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: vslide1down.vx v8, v8, a1 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/memory-args.ll b/llvm/test/CodeGen/RISCV/rvv/memory-args.ll --- a/llvm/test/CodeGen/RISCV/rvv/memory-args.ll +++ b/llvm/test/CodeGen/RISCV/rvv/memory-args.ll @@ -14,7 +14,7 @@ ; RV64IV: # %bb.0: ; RV64IV-NEXT: vl8r.v v24, (a0) ; RV64IV-NEXT: li a0, 1024 -; RV64IV-NEXT: vsetvli zero, a0, e8, m8, tu, mu +; RV64IV-NEXT: vsetvli zero, a0, e8, m8, tu, ma ; RV64IV-NEXT: vmacc.vv v8, v16, v24 ; RV64IV-NEXT: ret %ret = call @llvm.riscv.vmacc.nxv64i8.nxv64i8( diff --git a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll @@ -49,7 +49,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV32-NEXT: vsext.vf2 v8, v9 ; RV32-NEXT: ret ; @@ -57,7 +57,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t -; RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV64-NEXT: vsext.vf2 v8, v10 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv2i8.nxv2p0i8( %ptrs, i32 1, %m, %passthru) @@ -70,7 +70,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV32-NEXT: vzext.vf2 v8, v9 ; RV32-NEXT: ret ; @@ -78,7 +78,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t -; RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV64-NEXT: vzext.vf2 v8, v10 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv2i8.nxv2p0i8( %ptrs, i32 1, %m, %passthru) @@ -91,7 +91,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV32-NEXT: vsext.vf4 v8, v9 ; RV32-NEXT: ret ; @@ -99,7 +99,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t -; RV64-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64-NEXT: vsext.vf4 v8, v10 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv2i8.nxv2p0i8( %ptrs, i32 1, %m, %passthru) @@ -112,7 +112,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV32-NEXT: vzext.vf4 v8, v9 ; RV32-NEXT: ret ; @@ -120,7 +120,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t -; RV64-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64-NEXT: vzext.vf4 v8, v10 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv2i8.nxv2p0i8( %ptrs, i32 1, %m, %passthru) @@ -133,7 +133,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; RV32-NEXT: vsext.vf8 v10, v9 ; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret @@ -142,7 +142,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t -; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; RV64-NEXT: vsext.vf8 v8, v10 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv2i8.nxv2p0i8( %ptrs, i32 1, %m, %passthru) @@ -155,7 +155,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; RV32-NEXT: vzext.vf8 v10, v9 ; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret @@ -164,7 +164,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t -; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; RV64-NEXT: vzext.vf8 v8, v10 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv2i8.nxv2p0i8( %ptrs, i32 1, %m, %passthru) @@ -195,14 +195,14 @@ define @mgather_truemask_nxv4i8( %ptrs, %passthru) { ; RV32-LABEL: mgather_truemask_nxv4i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; RV32-NEXT: vluxei32.v v10, (zero), v8 ; RV32-NEXT: vmv1r.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_truemask_nxv4i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; RV64-NEXT: vluxei64.v v12, (zero), v8 ; RV64-NEXT: vmv1r.v v8, v12 ; RV64-NEXT: ret @@ -249,7 +249,7 @@ define @mgather_baseidx_nxv8i8(i8* %base, %idxs, %m, %passthru) { ; RV32-LABEL: mgather_baseidx_nxv8i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (a0), v12, v0.t @@ -258,7 +258,7 @@ ; ; RV64-LABEL: mgather_baseidx_nxv8i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV64-NEXT: vluxei64.v v9, (a0), v16, v0.t @@ -314,7 +314,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV32-NEXT: vsext.vf2 v8, v9 ; RV32-NEXT: ret ; @@ -322,7 +322,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t -; RV64-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64-NEXT: vsext.vf2 v8, v10 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv2i16.nxv2p0i16( %ptrs, i32 2, %m, %passthru) @@ -335,7 +335,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV32-NEXT: vzext.vf2 v8, v9 ; RV32-NEXT: ret ; @@ -343,7 +343,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t -; RV64-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64-NEXT: vzext.vf2 v8, v10 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv2i16.nxv2p0i16( %ptrs, i32 2, %m, %passthru) @@ -356,7 +356,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; RV32-NEXT: vsext.vf4 v10, v9 ; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret @@ -365,7 +365,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t -; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; RV64-NEXT: vsext.vf4 v8, v10 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv2i16.nxv2p0i16( %ptrs, i32 2, %m, %passthru) @@ -378,7 +378,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; RV32-NEXT: vzext.vf4 v10, v9 ; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret @@ -387,7 +387,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t -; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; RV64-NEXT: vzext.vf4 v8, v10 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv2i16.nxv2p0i16( %ptrs, i32 2, %m, %passthru) @@ -418,14 +418,14 @@ define @mgather_truemask_nxv4i16( %ptrs, %passthru) { ; RV32-LABEL: mgather_truemask_nxv4i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; RV32-NEXT: vluxei32.v v10, (zero), v8 ; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_truemask_nxv4i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; RV64-NEXT: vluxei64.v v12, (zero), v8 ; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret @@ -472,7 +472,7 @@ define @mgather_baseidx_nxv8i8_nxv8i16(i16* %base, %idxs, %m, %passthru) { ; RV32-LABEL: mgather_baseidx_nxv8i8_nxv8i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vadd.vv v12, v12, v12 ; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu @@ -482,7 +482,7 @@ ; ; RV64-LABEL: mgather_baseidx_nxv8i8_nxv8i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu @@ -497,7 +497,7 @@ define @mgather_baseidx_sext_nxv8i8_nxv8i16(i16* %base, %idxs, %m, %passthru) { ; RV32-LABEL: mgather_baseidx_sext_nxv8i8_nxv8i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vadd.vv v12, v12, v12 ; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu @@ -507,7 +507,7 @@ ; ; RV64-LABEL: mgather_baseidx_sext_nxv8i8_nxv8i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu @@ -523,7 +523,7 @@ define @mgather_baseidx_zext_nxv8i8_nxv8i16(i16* %base, %idxs, %m, %passthru) { ; RV32-LABEL: mgather_baseidx_zext_nxv8i8_nxv8i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf4 v12, v8 ; RV32-NEXT: vadd.vv v12, v12, v12 ; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu @@ -533,7 +533,7 @@ ; ; RV64-LABEL: mgather_baseidx_zext_nxv8i8_nxv8i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf8 v16, v8 ; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu @@ -549,7 +549,7 @@ define @mgather_baseidx_nxv8i16(i16* %base, %idxs, %m, %passthru) { ; RV32-LABEL: mgather_baseidx_nxv8i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v12, v8 ; RV32-NEXT: vadd.vv v12, v12, v12 ; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu @@ -559,7 +559,7 @@ ; ; RV64-LABEL: mgather_baseidx_nxv8i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v16, v8 ; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu @@ -616,7 +616,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; RV32-NEXT: vsext.vf2 v10, v9 ; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret @@ -625,7 +625,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t -; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; RV64-NEXT: vsext.vf2 v8, v10 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv2i32.nxv2p0i32( %ptrs, i32 4, %m, %passthru) @@ -638,7 +638,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; RV32-NEXT: vzext.vf2 v10, v9 ; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret @@ -647,7 +647,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t -; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; RV64-NEXT: vzext.vf2 v8, v10 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv2i32.nxv2p0i32( %ptrs, i32 4, %m, %passthru) @@ -678,13 +678,13 @@ define @mgather_truemask_nxv4i32( %ptrs, %passthru) { ; RV32-LABEL: mgather_truemask_nxv4i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; RV32-NEXT: vluxei32.v v8, (zero), v8 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_truemask_nxv4i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; RV64-NEXT: vluxei64.v v12, (zero), v8 ; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret @@ -740,7 +740,7 @@ ; ; RV64-LABEL: mgather_baseidx_nxv8i8_nxv8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu @@ -764,7 +764,7 @@ ; ; RV64-LABEL: mgather_baseidx_sext_nxv8i8_nxv8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu @@ -789,7 +789,7 @@ ; ; RV64-LABEL: mgather_baseidx_zext_nxv8i8_nxv8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf8 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu @@ -814,7 +814,7 @@ ; ; RV64-LABEL: mgather_baseidx_nxv8i16_nxv8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu @@ -838,7 +838,7 @@ ; ; RV64-LABEL: mgather_baseidx_sext_nxv8i16_nxv8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu @@ -863,7 +863,7 @@ ; ; RV64-LABEL: mgather_baseidx_zext_nxv8i16_nxv8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf4 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu @@ -887,7 +887,7 @@ ; ; RV64-LABEL: mgather_baseidx_nxv8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf2 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu @@ -962,14 +962,14 @@ define @mgather_truemask_nxv4i64( %ptrs, %passthru) { ; RV32-LABEL: mgather_truemask_nxv4i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV32-NEXT: vluxei32.v v12, (zero), v8 ; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_truemask_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV64-NEXT: vluxei64.v v8, (zero), v8 ; RV64-NEXT: ret %mhead = insertelement poison, i1 1, i32 0 @@ -1010,7 +1010,7 @@ define @mgather_baseidx_nxv8i8_nxv8i64(i64* %base, %idxs, %m, %passthru) { ; RV32-LABEL: mgather_baseidx_nxv8i8_nxv8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu @@ -1034,7 +1034,7 @@ define @mgather_baseidx_sext_nxv8i8_nxv8i64(i64* %base, %idxs, %m, %passthru) { ; RV32-LABEL: mgather_baseidx_sext_nxv8i8_nxv8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu @@ -1059,7 +1059,7 @@ define @mgather_baseidx_zext_nxv8i8_nxv8i64(i64* %base, %idxs, %m, %passthru) { ; RV32-LABEL: mgather_baseidx_zext_nxv8i8_nxv8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu @@ -1084,7 +1084,7 @@ define @mgather_baseidx_nxv8i16_nxv8i64(i64* %base, %idxs, %m, %passthru) { ; RV32-LABEL: mgather_baseidx_nxv8i16_nxv8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu @@ -1108,7 +1108,7 @@ define @mgather_baseidx_sext_nxv8i16_nxv8i64(i64* %base, %idxs, %m, %passthru) { ; RV32-LABEL: mgather_baseidx_sext_nxv8i16_nxv8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu @@ -1133,7 +1133,7 @@ define @mgather_baseidx_zext_nxv8i16_nxv8i64(i64* %base, %idxs, %m, %passthru) { ; RV32-LABEL: mgather_baseidx_zext_nxv8i16_nxv8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu @@ -1158,7 +1158,7 @@ define @mgather_baseidx_nxv8i32_nxv8i64(i64* %base, %idxs, %m, %passthru) { ; RV32-LABEL: mgather_baseidx_nxv8i32_nxv8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t @@ -1181,7 +1181,7 @@ define @mgather_baseidx_sext_nxv8i32_nxv8i64(i64* %base, %idxs, %m, %passthru) { ; RV32-LABEL: mgather_baseidx_sext_nxv8i32_nxv8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t @@ -1205,7 +1205,7 @@ define @mgather_baseidx_zext_nxv8i32_nxv8i64(i64* %base, %idxs, %m, %passthru) { ; RV32-LABEL: mgather_baseidx_zext_nxv8i32_nxv8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t @@ -1229,7 +1229,7 @@ define @mgather_baseidx_nxv8i64(i64* %base, %idxs, %m, %passthru) { ; RV32-LABEL: mgather_baseidx_nxv8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vnsrl.wi v24, v8, 0 ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu @@ -1262,7 +1262,7 @@ ; RV32-NEXT: vluxei32.v v16, (zero), v8, v0.t ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: srli a2, a0, 3 -; RV32-NEXT: vsetvli a3, zero, e8, mf4, ta, mu +; RV32-NEXT: vsetvli a3, zero, e8, mf4, ta, ma ; RV32-NEXT: vslidedown.vx v0, v0, a2 ; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v24, (zero), v12, v0.t @@ -1288,7 +1288,7 @@ ; RV64-NEXT: vluxei64.v v24, (zero), v16, v0.t ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: srli a1, a0, 3 -; RV64-NEXT: vsetvli a3, zero, e8, mf4, ta, mu +; RV64-NEXT: vsetvli a3, zero, e8, mf4, ta, ma ; RV64-NEXT: vslidedown.vx v0, v0, a1 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: addi a1, sp, 16 @@ -1378,14 +1378,14 @@ define @mgather_truemask_nxv4f16( %ptrs, %passthru) { ; RV32-LABEL: mgather_truemask_nxv4f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; RV32-NEXT: vluxei32.v v10, (zero), v8 ; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_truemask_nxv4f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; RV64-NEXT: vluxei64.v v12, (zero), v8 ; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret @@ -1432,7 +1432,7 @@ define @mgather_baseidx_nxv8i8_nxv8f16(half* %base, %idxs, %m, %passthru) { ; RV32-LABEL: mgather_baseidx_nxv8i8_nxv8f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vadd.vv v12, v12, v12 ; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu @@ -1442,7 +1442,7 @@ ; ; RV64-LABEL: mgather_baseidx_nxv8i8_nxv8f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu @@ -1457,7 +1457,7 @@ define @mgather_baseidx_sext_nxv8i8_nxv8f16(half* %base, %idxs, %m, %passthru) { ; RV32-LABEL: mgather_baseidx_sext_nxv8i8_nxv8f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vadd.vv v12, v12, v12 ; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu @@ -1467,7 +1467,7 @@ ; ; RV64-LABEL: mgather_baseidx_sext_nxv8i8_nxv8f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu @@ -1483,7 +1483,7 @@ define @mgather_baseidx_zext_nxv8i8_nxv8f16(half* %base, %idxs, %m, %passthru) { ; RV32-LABEL: mgather_baseidx_zext_nxv8i8_nxv8f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf4 v12, v8 ; RV32-NEXT: vadd.vv v12, v12, v12 ; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu @@ -1493,7 +1493,7 @@ ; ; RV64-LABEL: mgather_baseidx_zext_nxv8i8_nxv8f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf8 v16, v8 ; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu @@ -1509,7 +1509,7 @@ define @mgather_baseidx_nxv8f16(half* %base, %idxs, %m, %passthru) { ; RV32-LABEL: mgather_baseidx_nxv8f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v12, v8 ; RV32-NEXT: vadd.vv v12, v12, v12 ; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu @@ -1519,7 +1519,7 @@ ; ; RV64-LABEL: mgather_baseidx_nxv8f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v16, v8 ; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu @@ -1594,13 +1594,13 @@ define @mgather_truemask_nxv4f32( %ptrs, %passthru) { ; RV32-LABEL: mgather_truemask_nxv4f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; RV32-NEXT: vluxei32.v v8, (zero), v8 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_truemask_nxv4f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; RV64-NEXT: vluxei64.v v12, (zero), v8 ; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret @@ -1656,7 +1656,7 @@ ; ; RV64-LABEL: mgather_baseidx_nxv8i8_nxv8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu @@ -1680,7 +1680,7 @@ ; ; RV64-LABEL: mgather_baseidx_sext_nxv8i8_nxv8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu @@ -1705,7 +1705,7 @@ ; ; RV64-LABEL: mgather_baseidx_zext_nxv8i8_nxv8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf8 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu @@ -1730,7 +1730,7 @@ ; ; RV64-LABEL: mgather_baseidx_nxv8i16_nxv8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu @@ -1754,7 +1754,7 @@ ; ; RV64-LABEL: mgather_baseidx_sext_nxv8i16_nxv8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu @@ -1779,7 +1779,7 @@ ; ; RV64-LABEL: mgather_baseidx_zext_nxv8i16_nxv8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf4 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu @@ -1803,7 +1803,7 @@ ; ; RV64-LABEL: mgather_baseidx_nxv8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf2 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu @@ -1878,14 +1878,14 @@ define @mgather_truemask_nxv4f64( %ptrs, %passthru) { ; RV32-LABEL: mgather_truemask_nxv4f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV32-NEXT: vluxei32.v v12, (zero), v8 ; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_truemask_nxv4f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV64-NEXT: vluxei64.v v8, (zero), v8 ; RV64-NEXT: ret %mhead = insertelement poison, i1 1, i32 0 @@ -1926,7 +1926,7 @@ define @mgather_baseidx_nxv8i8_nxv8f64(double* %base, %idxs, %m, %passthru) { ; RV32-LABEL: mgather_baseidx_nxv8i8_nxv8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu @@ -1950,7 +1950,7 @@ define @mgather_baseidx_sext_nxv8i8_nxv8f64(double* %base, %idxs, %m, %passthru) { ; RV32-LABEL: mgather_baseidx_sext_nxv8i8_nxv8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu @@ -1975,7 +1975,7 @@ define @mgather_baseidx_zext_nxv8i8_nxv8f64(double* %base, %idxs, %m, %passthru) { ; RV32-LABEL: mgather_baseidx_zext_nxv8i8_nxv8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu @@ -2000,7 +2000,7 @@ define @mgather_baseidx_nxv8i16_nxv8f64(double* %base, %idxs, %m, %passthru) { ; RV32-LABEL: mgather_baseidx_nxv8i16_nxv8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu @@ -2024,7 +2024,7 @@ define @mgather_baseidx_sext_nxv8i16_nxv8f64(double* %base, %idxs, %m, %passthru) { ; RV32-LABEL: mgather_baseidx_sext_nxv8i16_nxv8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu @@ -2049,7 +2049,7 @@ define @mgather_baseidx_zext_nxv8i16_nxv8f64(double* %base, %idxs, %m, %passthru) { ; RV32-LABEL: mgather_baseidx_zext_nxv8i16_nxv8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu @@ -2074,7 +2074,7 @@ define @mgather_baseidx_nxv8i32_nxv8f64(double* %base, %idxs, %m, %passthru) { ; RV32-LABEL: mgather_baseidx_nxv8i32_nxv8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t @@ -2097,7 +2097,7 @@ define @mgather_baseidx_sext_nxv8i32_nxv8f64(double* %base, %idxs, %m, %passthru) { ; RV32-LABEL: mgather_baseidx_sext_nxv8i32_nxv8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t @@ -2121,7 +2121,7 @@ define @mgather_baseidx_zext_nxv8i32_nxv8f64(double* %base, %idxs, %m, %passthru) { ; RV32-LABEL: mgather_baseidx_zext_nxv8i32_nxv8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t @@ -2145,7 +2145,7 @@ define @mgather_baseidx_nxv8f64(double* %base, %idxs, %m, %passthru) { ; RV32-LABEL: mgather_baseidx_nxv8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vnsrl.wi v24, v8, 0 ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu @@ -2170,7 +2170,7 @@ define @mgather_baseidx_nxv16i8(i8* %base, %idxs, %m, %passthru) { ; RV32-LABEL: mgather_baseidx_nxv16i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; RV32-NEXT: vsext.vf4 v16, v8 ; RV32-NEXT: vsetvli zero, zero, e8, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (a0), v16, v0.t @@ -2179,15 +2179,15 @@ ; ; RV64-LABEL: mgather_baseidx_nxv16i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: srli a1, a1, 3 -; RV64-NEXT: vsetvli a2, zero, e8, mf4, ta, mu +; RV64-NEXT: vsetvli a2, zero, e8, mf4, ta, ma ; RV64-NEXT: vslidedown.vx v0, v0, a1 -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v9 ; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV64-NEXT: vluxei64.v v11, (a0), v16, v0.t @@ -2203,15 +2203,15 @@ define @mgather_baseidx_nxv32i8(i8* %base, %idxs, %m, %passthru) { ; RV32-LABEL: mgather_baseidx_nxv32i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; RV32-NEXT: vsext.vf4 v16, v8 ; RV32-NEXT: vsetvli zero, zero, e8, m2, ta, mu ; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: srli a1, a1, 2 -; RV32-NEXT: vsetvli a2, zero, e8, mf2, ta, mu +; RV32-NEXT: vsetvli a2, zero, e8, mf2, ta, ma ; RV32-NEXT: vslidedown.vx v0, v0, a1 -; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; RV32-NEXT: vsext.vf4 v16, v10 ; RV32-NEXT: vsetvli zero, zero, e8, m2, ta, mu ; RV32-NEXT: vluxei32.v v14, (a0), v16, v0.t @@ -2221,28 +2221,28 @@ ; RV64-LABEL: mgather_baseidx_nxv32i8: ; RV64: # %bb.0: ; RV64-NEXT: vmv1r.v v16, v0 -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v24, v8 ; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV64-NEXT: vluxei64.v v12, (a0), v24, v0.t ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: srli a2, a1, 3 -; RV64-NEXT: vsetvli a3, zero, e8, mf4, ta, mu +; RV64-NEXT: vsetvli a3, zero, e8, mf4, ta, ma ; RV64-NEXT: vslidedown.vx v0, v0, a2 -; RV64-NEXT: vsetvli a3, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a3, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v24, v9 ; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV64-NEXT: vluxei64.v v13, (a0), v24, v0.t ; RV64-NEXT: srli a1, a1, 2 -; RV64-NEXT: vsetvli a3, zero, e8, mf2, ta, mu +; RV64-NEXT: vsetvli a3, zero, e8, mf2, ta, ma ; RV64-NEXT: vslidedown.vx v0, v16, a1 -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v10 ; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV64-NEXT: vluxei64.v v14, (a0), v16, v0.t -; RV64-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; RV64-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; RV64-NEXT: vslidedown.vx v0, v0, a2 -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v11 ; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV64-NEXT: vluxei64.v v15, (a0), v16, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/mscatter-combine.ll b/llvm/test/CodeGen/RISCV/rvv/mscatter-combine.ll --- a/llvm/test/CodeGen/RISCV/rvv/mscatter-combine.ll +++ b/llvm/test/CodeGen/RISCV/rvv/mscatter-combine.ll @@ -9,7 +9,7 @@ define void @complex_gep(ptr %p, %vec.ind, %m) { ; RV32-LABEL: complex_gep: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; RV32-NEXT: vnsrl.wi v10, v8, 0 ; RV32-NEXT: li a1, 48 ; RV32-NEXT: vmul.vx v8, v10, a1 @@ -21,10 +21,10 @@ ; RV64-LABEL: complex_gep: ; RV64: # %bb.0: ; RV64-NEXT: li a1, 56 -; RV64-NEXT: vsetvli a2, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m2, ta, ma ; RV64-NEXT: vmul.vx v8, v8, a1 ; RV64-NEXT: addi a0, a0, 32 -; RV64-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV64-NEXT: vmv.v.i v10, 0 ; RV64-NEXT: vsoxei64.v v10, (a0), v8, v0.t ; RV64-NEXT: ret @@ -36,14 +36,14 @@ define void @strided_store_zero_start(i64 %n, ptr %p) { ; RV32-LABEL: strided_store_zero_start: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV32-NEXT: vid.v v8 -; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: li a0, 48 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: addi a0, a2, 32 -; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; RV32-NEXT: vmv.v.i v9, 0 ; RV32-NEXT: vsoxei32.v v9, (a0), v8 ; RV32-NEXT: ret @@ -51,7 +51,7 @@ ; RV64-LABEL: strided_store_zero_start: ; RV64: # %bb.0: ; RV64-NEXT: addi a0, a1, 36 -; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV64-NEXT: vmv.v.i v8, 0 ; RV64-NEXT: li a1, 56 ; RV64-NEXT: vsse64.v v8, (a0), a1 @@ -67,19 +67,19 @@ ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 -; RV32-NEXT: vsetvli a3, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a3, zero, e64, m1, ta, ma ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v8, (a0), zero ; RV32-NEXT: vid.v v9 ; RV32-NEXT: vadd.vv v8, v9, v8 -; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: li a0, 48 ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: addi a0, a2, 32 -; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; RV32-NEXT: vmv.v.i v9, 0 ; RV32-NEXT: vsoxei32.v v9, (a0), v8 ; RV32-NEXT: addi sp, sp, 16 @@ -91,7 +91,7 @@ ; RV64-NEXT: mul a0, a0, a2 ; RV64-NEXT: add a0, a1, a0 ; RV64-NEXT: addi a0, a0, 36 -; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV64-NEXT: vmv.v.i v8, 0 ; RV64-NEXT: vsse64.v v8, (a0), a2 ; RV64-NEXT: ret @@ -107,19 +107,19 @@ define void @stride_one_store(i64 %n, ptr %p) { ; RV32-LABEL: stride_one_store: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV32-NEXT: vid.v v8 -; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: vsll.vi v8, v8, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; RV32-NEXT: vmv.v.i v9, 0 ; RV32-NEXT: vsoxei32.v v9, (a2), v8 ; RV32-NEXT: ret ; ; RV64-LABEL: stride_one_store: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV64-NEXT: vmv.v.i v8, 0 ; RV64-NEXT: li a0, 8 ; RV64-NEXT: vsse64.v v8, (a1), a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll @@ -9,13 +9,13 @@ define void @mscatter_nxv1i8( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv1i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; RV32-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv1i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; RV64-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv1i8.nxv1p0i8( %val, %ptrs, i32 1, %m) @@ -27,13 +27,13 @@ define void @mscatter_nxv2i8( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv2i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv2i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv2i8.nxv2p0i8( %val, %ptrs, i32 1, %m) @@ -43,14 +43,14 @@ define void @mscatter_nxv2i16_truncstore_nxv2i8( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv2i16_truncstore_nxv2i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv2i16_truncstore_nxv2i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret @@ -62,18 +62,18 @@ define void @mscatter_nxv2i32_truncstore_nxv2i8( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv2i32_truncstore_nxv2i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; RV32-NEXT: vnsrl.wi v8, v8, 0 -; RV32-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; RV32-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv2i32_truncstore_nxv2i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; RV64-NEXT: vnsrl.wi v8, v8, 0 -; RV64-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; RV64-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret @@ -85,22 +85,22 @@ define void @mscatter_nxv2i64_truncstore_nxv2i8( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv2i64_truncstore_nxv2i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV32-NEXT: vnsrl.wi v11, v8, 0 -; RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV32-NEXT: vnsrl.wi v8, v11, 0 -; RV32-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; RV32-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv2i64_truncstore_nxv2i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV64-NEXT: vnsrl.wi v12, v8, 0 -; RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV64-NEXT: vnsrl.wi v8, v12, 0 -; RV64-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; RV64-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret @@ -114,13 +114,13 @@ define void @mscatter_nxv4i8( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv4i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv4i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv4i8.nxv4p0i8( %val, %ptrs, i32 1, %m) @@ -130,13 +130,13 @@ define void @mscatter_truemask_nxv4i8( %val, %ptrs) { ; RV32-LABEL: mscatter_truemask_nxv4i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_truemask_nxv4i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v12 ; RV64-NEXT: ret %mhead = insertelement poison, i1 1, i32 0 @@ -158,13 +158,13 @@ define void @mscatter_nxv8i8( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv8i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; RV32-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv8i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; RV64-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv8i8.nxv8p0i8( %val, %ptrs, i32 1, %m) @@ -174,17 +174,17 @@ define void @mscatter_baseidx_nxv8i8( %val, i8* %base, %idxs, %m) { ; RV32-LABEL: mscatter_baseidx_nxv8i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v9 -; RV32-NEXT: vsetvli zero, zero, e8, m1, ta, mu +; RV32-NEXT: vsetvli zero, zero, e8, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_nxv8i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v9 -; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu +; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds i8, i8* %base, %idxs @@ -197,13 +197,13 @@ define void @mscatter_nxv1i16( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv1i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv1i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv1i16.nxv1p0i16( %val, %ptrs, i32 2, %m) @@ -215,13 +215,13 @@ define void @mscatter_nxv2i16( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv2i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv2i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv2i16.nxv2p0i16( %val, %ptrs, i32 2, %m) @@ -231,14 +231,14 @@ define void @mscatter_nxv2i32_truncstore_nxv2i16( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv2i32_truncstore_nxv2i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv2i32_truncstore_nxv2i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret @@ -250,18 +250,18 @@ define void @mscatter_nxv2i64_truncstore_nxv2i16( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv2i64_truncstore_nxv2i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV32-NEXT: vnsrl.wi v11, v8, 0 -; RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV32-NEXT: vnsrl.wi v8, v11, 0 ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv2i64_truncstore_nxv2i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV64-NEXT: vnsrl.wi v12, v8, 0 -; RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV64-NEXT: vnsrl.wi v8, v12, 0 ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret @@ -275,13 +275,13 @@ define void @mscatter_nxv4i16( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv4i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv4i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv4i16.nxv4p0i16( %val, %ptrs, i32 2, %m) @@ -291,13 +291,13 @@ define void @mscatter_truemask_nxv4i16( %val, %ptrs) { ; RV32-LABEL: mscatter_truemask_nxv4i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_truemask_nxv4i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v12 ; RV64-NEXT: ret %mhead = insertelement poison, i1 1, i32 0 @@ -319,13 +319,13 @@ define void @mscatter_nxv8i16( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv8i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv8i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv8i16.nxv8p0i16( %val, %ptrs, i32 2, %m) @@ -335,19 +335,19 @@ define void @mscatter_baseidx_nxv8i8_nxv8i16( %val, i16* %base, %idxs, %m) { ; RV32-LABEL: mscatter_baseidx_nxv8i8_nxv8i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v10 ; RV32-NEXT: vadd.vv v12, v12, v12 -; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_nxv8i8_nxv8i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v10 ; RV64-NEXT: vadd.vv v16, v16, v16 -; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds i16, i16* %base, %idxs @@ -358,19 +358,19 @@ define void @mscatter_baseidx_sext_nxv8i8_nxv8i16( %val, i16* %base, %idxs, %m) { ; RV32-LABEL: mscatter_baseidx_sext_nxv8i8_nxv8i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v10 ; RV32-NEXT: vadd.vv v12, v12, v12 -; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_sext_nxv8i8_nxv8i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v10 ; RV64-NEXT: vadd.vv v16, v16, v16 -; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = sext %idxs to @@ -382,19 +382,19 @@ define void @mscatter_baseidx_zext_nxv8i8_nxv8i16( %val, i16* %base, %idxs, %m) { ; RV32-LABEL: mscatter_baseidx_zext_nxv8i8_nxv8i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf4 v12, v10 ; RV32-NEXT: vadd.vv v12, v12, v12 -; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_zext_nxv8i8_nxv8i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf8 v16, v10 ; RV64-NEXT: vadd.vv v16, v16, v16 -; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = zext %idxs to @@ -406,19 +406,19 @@ define void @mscatter_baseidx_nxv8i16( %val, i16* %base, %idxs, %m) { ; RV32-LABEL: mscatter_baseidx_nxv8i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v12, v10 ; RV32-NEXT: vadd.vv v12, v12, v12 -; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_nxv8i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v16, v10 ; RV64-NEXT: vadd.vv v16, v16, v16 -; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds i16, i16* %base, %idxs @@ -431,13 +431,13 @@ define void @mscatter_nxv1i32( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv1i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv1i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv1i32.nxv1p0i32( %val, %ptrs, i32 4, %m) @@ -449,13 +449,13 @@ define void @mscatter_nxv2i32( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv2i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv2i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv2i32.nxv2p0i32( %val, %ptrs, i32 4, %m) @@ -465,14 +465,14 @@ define void @mscatter_nxv2i64_truncstore_nxv2i32( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv2i64_truncstore_nxv2i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV32-NEXT: vnsrl.wi v11, v8, 0 ; RV32-NEXT: vsoxei32.v v11, (zero), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv2i64_truncstore_nxv2i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV64-NEXT: vnsrl.wi v12, v8, 0 ; RV64-NEXT: vsoxei64.v v12, (zero), v10, v0.t ; RV64-NEXT: ret @@ -486,13 +486,13 @@ define void @mscatter_nxv4i32( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv4i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv4i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv4i32.nxv4p0i32( %val, %ptrs, i32 4, %m) @@ -502,13 +502,13 @@ define void @mscatter_truemask_nxv4i32( %val, %ptrs) { ; RV32-LABEL: mscatter_truemask_nxv4i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_truemask_nxv4i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v12 ; RV64-NEXT: ret %mhead = insertelement poison, i1 1, i32 0 @@ -530,13 +530,13 @@ define void @mscatter_nxv8i32( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; RV64-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv8i32.nxv8p0i32( %val, %ptrs, i32 4, %m) @@ -546,7 +546,7 @@ define void @mscatter_baseidx_nxv8i8_nxv8i32( %val, i32* %base, %idxs, %m) { ; RV32-LABEL: mscatter_baseidx_nxv8i8_nxv8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t @@ -554,10 +554,10 @@ ; ; RV64-LABEL: mscatter_baseidx_nxv8i8_nxv8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v12 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds i32, i32* %base, %idxs @@ -568,7 +568,7 @@ define void @mscatter_baseidx_sext_nxv8i8_nxv8i32( %val, i32* %base, %idxs, %m) { ; RV32-LABEL: mscatter_baseidx_sext_nxv8i8_nxv8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t @@ -576,10 +576,10 @@ ; ; RV64-LABEL: mscatter_baseidx_sext_nxv8i8_nxv8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v12 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = sext %idxs to @@ -591,7 +591,7 @@ define void @mscatter_baseidx_zext_nxv8i8_nxv8i32( %val, i32* %base, %idxs, %m) { ; RV32-LABEL: mscatter_baseidx_zext_nxv8i8_nxv8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf4 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t @@ -599,10 +599,10 @@ ; ; RV64-LABEL: mscatter_baseidx_zext_nxv8i8_nxv8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf8 v16, v12 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = zext %idxs to @@ -614,7 +614,7 @@ define void @mscatter_baseidx_nxv8i16_nxv8i32( %val, i32* %base, %idxs, %m) { ; RV32-LABEL: mscatter_baseidx_nxv8i16_nxv8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t @@ -622,10 +622,10 @@ ; ; RV64-LABEL: mscatter_baseidx_nxv8i16_nxv8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v16, v12 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds i32, i32* %base, %idxs @@ -636,7 +636,7 @@ define void @mscatter_baseidx_sext_nxv8i16_nxv8i32( %val, i32* %base, %idxs, %m) { ; RV32-LABEL: mscatter_baseidx_sext_nxv8i16_nxv8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t @@ -644,10 +644,10 @@ ; ; RV64-LABEL: mscatter_baseidx_sext_nxv8i16_nxv8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v16, v12 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = sext %idxs to @@ -659,7 +659,7 @@ define void @mscatter_baseidx_zext_nxv8i16_nxv8i32( %val, i32* %base, %idxs, %m) { ; RV32-LABEL: mscatter_baseidx_zext_nxv8i16_nxv8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf2 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t @@ -667,10 +667,10 @@ ; ; RV64-LABEL: mscatter_baseidx_zext_nxv8i16_nxv8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf4 v16, v12 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = zext %idxs to @@ -682,17 +682,17 @@ define void @mscatter_baseidx_nxv8i32( %val, i32* %base, %idxs, %m) { ; RV32-LABEL: mscatter_baseidx_nxv8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsll.vi v12, v12, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_nxv8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf2 v16, v12 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds i32, i32* %base, %idxs @@ -705,13 +705,13 @@ define void @mscatter_nxv1i64( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv1i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv1i64.nxv1p0i64( %val, %ptrs, i32 8, %m) @@ -723,13 +723,13 @@ define void @mscatter_nxv2i64( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv2i64.nxv2p0i64( %val, %ptrs, i32 8, %m) @@ -741,13 +741,13 @@ define void @mscatter_nxv4i64( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv4i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv4i64.nxv4p0i64( %val, %ptrs, i32 8, %m) @@ -757,13 +757,13 @@ define void @mscatter_truemask_nxv4i64( %val, %ptrs) { ; RV32-LABEL: mscatter_truemask_nxv4i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_truemask_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v12 ; RV64-NEXT: ret %mhead = insertelement poison, i1 1, i32 0 @@ -785,13 +785,13 @@ define void @mscatter_nxv8i64( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv8i64.nxv8p0i64( %val, %ptrs, i32 8, %m) @@ -801,16 +801,16 @@ define void @mscatter_baseidx_nxv8i8_nxv8i64( %val, i64* %base, %idxs, %m) { ; RV32-LABEL: mscatter_baseidx_nxv8i8_nxv8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v20, v16 ; RV32-NEXT: vsll.vi v16, v20, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_nxv8i8_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t @@ -823,16 +823,16 @@ define void @mscatter_baseidx_sext_nxv8i8_nxv8i64( %val, i64* %base, %idxs, %m) { ; RV32-LABEL: mscatter_baseidx_sext_nxv8i8_nxv8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v20, v16 ; RV32-NEXT: vsll.vi v16, v20, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_sext_nxv8i8_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t @@ -846,16 +846,16 @@ define void @mscatter_baseidx_zext_nxv8i8_nxv8i64( %val, i64* %base, %idxs, %m) { ; RV32-LABEL: mscatter_baseidx_zext_nxv8i8_nxv8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf4 v20, v16 ; RV32-NEXT: vsll.vi v16, v20, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_zext_nxv8i8_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf8 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t @@ -869,16 +869,16 @@ define void @mscatter_baseidx_nxv8i16_nxv8i64( %val, i64* %base, %idxs, %m) { ; RV32-LABEL: mscatter_baseidx_nxv8i16_nxv8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v20, v16 ; RV32-NEXT: vsll.vi v16, v20, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_nxv8i16_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t @@ -891,16 +891,16 @@ define void @mscatter_baseidx_sext_nxv8i16_nxv8i64( %val, i64* %base, %idxs, %m) { ; RV32-LABEL: mscatter_baseidx_sext_nxv8i16_nxv8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v20, v16 ; RV32-NEXT: vsll.vi v16, v20, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_sext_nxv8i16_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t @@ -914,16 +914,16 @@ define void @mscatter_baseidx_zext_nxv8i16_nxv8i64( %val, i64* %base, %idxs, %m) { ; RV32-LABEL: mscatter_baseidx_zext_nxv8i16_nxv8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf2 v20, v16 ; RV32-NEXT: vsll.vi v16, v20, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_zext_nxv8i16_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf4 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t @@ -937,15 +937,15 @@ define void @mscatter_baseidx_nxv8i32_nxv8i64( %val, i64* %base, %idxs, %m) { ; RV32-LABEL: mscatter_baseidx_nxv8i32_nxv8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsll.vi v16, v16, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_nxv8i32_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf2 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t @@ -958,15 +958,15 @@ define void @mscatter_baseidx_sext_nxv8i32_nxv8i64( %val, i64* %base, %idxs, %m) { ; RV32-LABEL: mscatter_baseidx_sext_nxv8i32_nxv8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsll.vi v16, v16, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_sext_nxv8i32_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf2 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t @@ -980,15 +980,15 @@ define void @mscatter_baseidx_zext_nxv8i32_nxv8i64( %val, i64* %base, %idxs, %m) { ; RV32-LABEL: mscatter_baseidx_zext_nxv8i32_nxv8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsll.vi v16, v16, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_zext_nxv8i32_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf2 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t @@ -1002,16 +1002,16 @@ define void @mscatter_baseidx_nxv8i64( %val, i64* %base, %idxs, %m) { ; RV32-LABEL: mscatter_baseidx_nxv8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vnsrl.wi v24, v16, 0 ; RV32-NEXT: vsll.vi v16, v24, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsll.vi v16, v16, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret @@ -1025,13 +1025,13 @@ define void @mscatter_nxv1f16( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv1f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv1f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv1f16.nxv1p0f16( %val, %ptrs, i32 2, %m) @@ -1043,13 +1043,13 @@ define void @mscatter_nxv2f16( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv2f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv2f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv2f16.nxv2p0f16( %val, %ptrs, i32 2, %m) @@ -1061,13 +1061,13 @@ define void @mscatter_nxv4f16( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv4f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv4f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv4f16.nxv4p0f16( %val, %ptrs, i32 2, %m) @@ -1077,13 +1077,13 @@ define void @mscatter_truemask_nxv4f16( %val, %ptrs) { ; RV32-LABEL: mscatter_truemask_nxv4f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_truemask_nxv4f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v12 ; RV64-NEXT: ret %mhead = insertelement poison, i1 1, i32 0 @@ -1105,13 +1105,13 @@ define void @mscatter_nxv8f16( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv8f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv8f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv8f16.nxv8p0f16( %val, %ptrs, i32 2, %m) @@ -1121,19 +1121,19 @@ define void @mscatter_baseidx_nxv8i8_nxv8f16( %val, half* %base, %idxs, %m) { ; RV32-LABEL: mscatter_baseidx_nxv8i8_nxv8f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v10 ; RV32-NEXT: vadd.vv v12, v12, v12 -; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_nxv8i8_nxv8f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v10 ; RV64-NEXT: vadd.vv v16, v16, v16 -; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds half, half* %base, %idxs @@ -1144,19 +1144,19 @@ define void @mscatter_baseidx_sext_nxv8i8_nxv8f16( %val, half* %base, %idxs, %m) { ; RV32-LABEL: mscatter_baseidx_sext_nxv8i8_nxv8f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v10 ; RV32-NEXT: vadd.vv v12, v12, v12 -; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_sext_nxv8i8_nxv8f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v10 ; RV64-NEXT: vadd.vv v16, v16, v16 -; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = sext %idxs to @@ -1168,19 +1168,19 @@ define void @mscatter_baseidx_zext_nxv8i8_nxv8f16( %val, half* %base, %idxs, %m) { ; RV32-LABEL: mscatter_baseidx_zext_nxv8i8_nxv8f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf4 v12, v10 ; RV32-NEXT: vadd.vv v12, v12, v12 -; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_zext_nxv8i8_nxv8f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf8 v16, v10 ; RV64-NEXT: vadd.vv v16, v16, v16 -; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = zext %idxs to @@ -1192,19 +1192,19 @@ define void @mscatter_baseidx_nxv8f16( %val, half* %base, %idxs, %m) { ; RV32-LABEL: mscatter_baseidx_nxv8f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v12, v10 ; RV32-NEXT: vadd.vv v12, v12, v12 -; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_nxv8f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v16, v10 ; RV64-NEXT: vadd.vv v16, v16, v16 -; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds half, half* %base, %idxs @@ -1217,13 +1217,13 @@ define void @mscatter_nxv1f32( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv1f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv1f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv1f32.nxv1p0f32( %val, %ptrs, i32 4, %m) @@ -1235,13 +1235,13 @@ define void @mscatter_nxv2f32( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv2f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv2f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv2f32.nxv2p0f32( %val, %ptrs, i32 4, %m) @@ -1253,13 +1253,13 @@ define void @mscatter_nxv4f32( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv4f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv4f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv4f32.nxv4p0f32( %val, %ptrs, i32 4, %m) @@ -1269,13 +1269,13 @@ define void @mscatter_truemask_nxv4f32( %val, %ptrs) { ; RV32-LABEL: mscatter_truemask_nxv4f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_truemask_nxv4f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v12 ; RV64-NEXT: ret %mhead = insertelement poison, i1 1, i32 0 @@ -1297,13 +1297,13 @@ define void @mscatter_nxv8f32( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; RV64-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv8f32.nxv8p0f32( %val, %ptrs, i32 4, %m) @@ -1313,7 +1313,7 @@ define void @mscatter_baseidx_nxv8i8_nxv8f32( %val, float* %base, %idxs, %m) { ; RV32-LABEL: mscatter_baseidx_nxv8i8_nxv8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t @@ -1321,10 +1321,10 @@ ; ; RV64-LABEL: mscatter_baseidx_nxv8i8_nxv8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v12 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds float, float* %base, %idxs @@ -1335,7 +1335,7 @@ define void @mscatter_baseidx_sext_nxv8i8_nxv8f32( %val, float* %base, %idxs, %m) { ; RV32-LABEL: mscatter_baseidx_sext_nxv8i8_nxv8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t @@ -1343,10 +1343,10 @@ ; ; RV64-LABEL: mscatter_baseidx_sext_nxv8i8_nxv8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v12 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = sext %idxs to @@ -1358,7 +1358,7 @@ define void @mscatter_baseidx_zext_nxv8i8_nxv8f32( %val, float* %base, %idxs, %m) { ; RV32-LABEL: mscatter_baseidx_zext_nxv8i8_nxv8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf4 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t @@ -1366,10 +1366,10 @@ ; ; RV64-LABEL: mscatter_baseidx_zext_nxv8i8_nxv8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf8 v16, v12 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = zext %idxs to @@ -1381,7 +1381,7 @@ define void @mscatter_baseidx_nxv8i16_nxv8f32( %val, float* %base, %idxs, %m) { ; RV32-LABEL: mscatter_baseidx_nxv8i16_nxv8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t @@ -1389,10 +1389,10 @@ ; ; RV64-LABEL: mscatter_baseidx_nxv8i16_nxv8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v16, v12 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds float, float* %base, %idxs @@ -1403,7 +1403,7 @@ define void @mscatter_baseidx_sext_nxv8i16_nxv8f32( %val, float* %base, %idxs, %m) { ; RV32-LABEL: mscatter_baseidx_sext_nxv8i16_nxv8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t @@ -1411,10 +1411,10 @@ ; ; RV64-LABEL: mscatter_baseidx_sext_nxv8i16_nxv8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v16, v12 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = sext %idxs to @@ -1426,7 +1426,7 @@ define void @mscatter_baseidx_zext_nxv8i16_nxv8f32( %val, float* %base, %idxs, %m) { ; RV32-LABEL: mscatter_baseidx_zext_nxv8i16_nxv8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf2 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t @@ -1434,10 +1434,10 @@ ; ; RV64-LABEL: mscatter_baseidx_zext_nxv8i16_nxv8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf4 v16, v12 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = zext %idxs to @@ -1449,17 +1449,17 @@ define void @mscatter_baseidx_nxv8f32( %val, float* %base, %idxs, %m) { ; RV32-LABEL: mscatter_baseidx_nxv8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsll.vi v12, v12, 2 ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_nxv8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf2 v16, v12 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds float, float* %base, %idxs @@ -1472,13 +1472,13 @@ define void @mscatter_nxv1f64( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv1f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv1f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv1f64.nxv1p0f64( %val, %ptrs, i32 8, %m) @@ -1490,13 +1490,13 @@ define void @mscatter_nxv2f64( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv2f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv2f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv2f64.nxv2p0f64( %val, %ptrs, i32 8, %m) @@ -1508,13 +1508,13 @@ define void @mscatter_nxv4f64( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv4f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv4f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv4f64.nxv4p0f64( %val, %ptrs, i32 8, %m) @@ -1524,13 +1524,13 @@ define void @mscatter_truemask_nxv4f64( %val, %ptrs) { ; RV32-LABEL: mscatter_truemask_nxv4f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_truemask_nxv4f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v12 ; RV64-NEXT: ret %mhead = insertelement poison, i1 1, i32 0 @@ -1552,13 +1552,13 @@ define void @mscatter_nxv8f64( %val, %ptrs, %m) { ; RV32-LABEL: mscatter_nxv8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_nxv8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t ; RV64-NEXT: ret call void @llvm.masked.scatter.nxv8f64.nxv8p0f64( %val, %ptrs, i32 8, %m) @@ -1568,16 +1568,16 @@ define void @mscatter_baseidx_nxv8i8_nxv8f64( %val, double* %base, %idxs, %m) { ; RV32-LABEL: mscatter_baseidx_nxv8i8_nxv8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v20, v16 ; RV32-NEXT: vsll.vi v16, v20, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_nxv8i8_nxv8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t @@ -1590,16 +1590,16 @@ define void @mscatter_baseidx_sext_nxv8i8_nxv8f64( %val, double* %base, %idxs, %m) { ; RV32-LABEL: mscatter_baseidx_sext_nxv8i8_nxv8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v20, v16 ; RV32-NEXT: vsll.vi v16, v20, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_sext_nxv8i8_nxv8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t @@ -1613,16 +1613,16 @@ define void @mscatter_baseidx_zext_nxv8i8_nxv8f64( %val, double* %base, %idxs, %m) { ; RV32-LABEL: mscatter_baseidx_zext_nxv8i8_nxv8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf4 v20, v16 ; RV32-NEXT: vsll.vi v16, v20, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_zext_nxv8i8_nxv8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf8 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t @@ -1636,16 +1636,16 @@ define void @mscatter_baseidx_nxv8i16_nxv8f64( %val, double* %base, %idxs, %m) { ; RV32-LABEL: mscatter_baseidx_nxv8i16_nxv8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v20, v16 ; RV32-NEXT: vsll.vi v16, v20, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_nxv8i16_nxv8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t @@ -1658,16 +1658,16 @@ define void @mscatter_baseidx_sext_nxv8i16_nxv8f64( %val, double* %base, %idxs, %m) { ; RV32-LABEL: mscatter_baseidx_sext_nxv8i16_nxv8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v20, v16 ; RV32-NEXT: vsll.vi v16, v20, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_sext_nxv8i16_nxv8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t @@ -1681,16 +1681,16 @@ define void @mscatter_baseidx_zext_nxv8i16_nxv8f64( %val, double* %base, %idxs, %m) { ; RV32-LABEL: mscatter_baseidx_zext_nxv8i16_nxv8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf2 v20, v16 ; RV32-NEXT: vsll.vi v16, v20, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_zext_nxv8i16_nxv8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf4 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t @@ -1704,15 +1704,15 @@ define void @mscatter_baseidx_nxv8i32_nxv8f64( %val, double* %base, %idxs, %m) { ; RV32-LABEL: mscatter_baseidx_nxv8i32_nxv8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsll.vi v16, v16, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_nxv8i32_nxv8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf2 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t @@ -1725,15 +1725,15 @@ define void @mscatter_baseidx_sext_nxv8i32_nxv8f64( %val, double* %base, %idxs, %m) { ; RV32-LABEL: mscatter_baseidx_sext_nxv8i32_nxv8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsll.vi v16, v16, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_sext_nxv8i32_nxv8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf2 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t @@ -1747,15 +1747,15 @@ define void @mscatter_baseidx_zext_nxv8i32_nxv8f64( %val, double* %base, %idxs, %m) { ; RV32-LABEL: mscatter_baseidx_zext_nxv8i32_nxv8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vsll.vi v16, v16, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_zext_nxv8i32_nxv8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf2 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t @@ -1769,16 +1769,16 @@ define void @mscatter_baseidx_nxv8f64( %val, double* %base, %idxs, %m) { ; RV32-LABEL: mscatter_baseidx_nxv8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vnsrl.wi v24, v16, 0 ; RV32-NEXT: vsll.vi v16, v24, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_nxv8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsll.vi v16, v16, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret @@ -1797,13 +1797,13 @@ ; RV32: # %bb.0: ; RV32-NEXT: vl4re32.v v24, (a0) ; RV32-NEXT: vl4re32.v v28, (a1) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v24, v0.t ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: srli a0, a0, 3 -; RV32-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; RV32-NEXT: vslidedown.vx v0, v0, a0 -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v16, (zero), v28, v0.t ; RV32-NEXT: ret ; @@ -1818,13 +1818,13 @@ ; RV64-NEXT: addi a0, sp, 16 ; RV64-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill ; RV64-NEXT: vl8re64.v v16, (a1) -; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v24, v0.t ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: srli a0, a0, 3 -; RV64-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; RV64-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; RV64-NEXT: vslidedown.vx v0, v0, a0 -; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; RV64-NEXT: addi a0, sp, 16 ; RV64-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload ; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t @@ -1845,23 +1845,23 @@ ; RV32-LABEL: mscatter_baseidx_nxv16i8_nxv16f64: ; RV32: # %bb.0: ; RV32-NEXT: vl2r.v v2, (a1) -; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; RV32-NEXT: vsext.vf4 v24, v2 ; RV32-NEXT: vsll.vi v24, v24, 3 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: srli a1, a1, 3 -; RV32-NEXT: vsetvli a2, zero, e8, mf4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e8, mf4, ta, ma ; RV32-NEXT: vslidedown.vx v0, v0, a1 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v16, (a0), v28, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_nxv16i8_nxv16f64: ; RV64: # %bb.0: ; RV64-NEXT: vl2r.v v2, (a1) -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v24, v2 ; RV64-NEXT: vsll.vi v24, v24, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t @@ -1869,9 +1869,9 @@ ; RV64-NEXT: vsll.vi v8, v8, 3 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: srli a1, a1, 3 -; RV64-NEXT: vsetvli a2, zero, e8, mf4, ta, mu +; RV64-NEXT: vsetvli a2, zero, e8, mf4, ta, ma ; RV64-NEXT: vslidedown.vx v0, v0, a1 -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsoxei64.v v16, (a0), v8, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, %idxs @@ -1885,23 +1885,23 @@ ; RV32-LABEL: mscatter_baseidx_nxv16i16_nxv16f64: ; RV32: # %bb.0: ; RV32-NEXT: vl4re16.v v4, (a1) -; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; RV32-NEXT: vsext.vf2 v24, v4 ; RV32-NEXT: vsll.vi v24, v24, 3 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: srli a1, a1, 3 -; RV32-NEXT: vsetvli a2, zero, e8, mf4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e8, mf4, ta, ma ; RV32-NEXT: vslidedown.vx v0, v0, a1 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v16, (a0), v28, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: mscatter_baseidx_nxv16i16_nxv16f64: ; RV64: # %bb.0: ; RV64-NEXT: vl4re16.v v4, (a1) -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v24, v4 ; RV64-NEXT: vsll.vi v24, v24, 3 ; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t @@ -1909,9 +1909,9 @@ ; RV64-NEXT: vsll.vi v8, v8, 3 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: srli a1, a1, 3 -; RV64-NEXT: vsetvli a2, zero, e8, mf4, ta, mu +; RV64-NEXT: vsetvli a2, zero, e8, mf4, ta, ma ; RV64-NEXT: vslidedown.vx v0, v0, a1 -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsoxei64.v v16, (a0), v8, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, %idxs diff --git a/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll b/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll --- a/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll +++ b/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll @@ -13,16 +13,16 @@ define @reverse_nxv2i1( %a) { ; RV32-BITS-UNKNOWN-LABEL: reverse_nxv2i1: ; RV32-BITS-UNKNOWN: # %bb.0: -; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0 ; RV32-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV32-BITS-UNKNOWN-NEXT: srli a0, a0, 2 ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 -; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vid.v v9 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v9, v9, a0 -; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v8, v9 ; RV32-BITS-UNKNOWN-NEXT: vand.vi v8, v10, 1 ; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 @@ -30,7 +30,7 @@ ; ; RV32-BITS-256-LABEL: reverse_nxv2i1: ; RV32-BITS-256: # %bb.0: -; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; RV32-BITS-256-NEXT: vmv.v.i v8, 0 ; RV32-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-256-NEXT: csrr a0, vlenb @@ -45,7 +45,7 @@ ; ; RV32-BITS-512-LABEL: reverse_nxv2i1: ; RV32-BITS-512: # %bb.0: -; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; RV32-BITS-512-NEXT: vmv.v.i v8, 0 ; RV32-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-512-NEXT: csrr a0, vlenb @@ -60,16 +60,16 @@ ; ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv2i1: ; RV64-BITS-UNKNOWN: # %bb.0: -; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0 ; RV64-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: srli a0, a0, 2 ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 -; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vid.v v9 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v9, v9, a0 -; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v8, v9 ; RV64-BITS-UNKNOWN-NEXT: vand.vi v8, v10, 1 ; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 @@ -77,7 +77,7 @@ ; ; RV64-BITS-256-LABEL: reverse_nxv2i1: ; RV64-BITS-256: # %bb.0: -; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; RV64-BITS-256-NEXT: vmv.v.i v8, 0 ; RV64-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-256-NEXT: csrr a0, vlenb @@ -92,7 +92,7 @@ ; ; RV64-BITS-512-LABEL: reverse_nxv2i1: ; RV64-BITS-512: # %bb.0: -; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; RV64-BITS-512-NEXT: vmv.v.i v8, 0 ; RV64-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-512-NEXT: csrr a0, vlenb @@ -111,16 +111,16 @@ define @reverse_nxv4i1( %a) { ; RV32-BITS-UNKNOWN-LABEL: reverse_nxv4i1: ; RV32-BITS-UNKNOWN: # %bb.0: -; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0 ; RV32-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV32-BITS-UNKNOWN-NEXT: srli a0, a0, 1 ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 -; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vid.v v9 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v9, v9, a0 -; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf2, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v8, v9 ; RV32-BITS-UNKNOWN-NEXT: vand.vi v8, v10, 1 ; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 @@ -128,7 +128,7 @@ ; ; RV32-BITS-256-LABEL: reverse_nxv4i1: ; RV32-BITS-256: # %bb.0: -; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; RV32-BITS-256-NEXT: vmv.v.i v8, 0 ; RV32-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-256-NEXT: csrr a0, vlenb @@ -143,7 +143,7 @@ ; ; RV32-BITS-512-LABEL: reverse_nxv4i1: ; RV32-BITS-512: # %bb.0: -; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; RV32-BITS-512-NEXT: vmv.v.i v8, 0 ; RV32-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-512-NEXT: csrr a0, vlenb @@ -158,16 +158,16 @@ ; ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv4i1: ; RV64-BITS-UNKNOWN: # %bb.0: -; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0 ; RV64-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: srli a0, a0, 1 ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 -; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vid.v v9 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v9, v9, a0 -; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf2, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v8, v9 ; RV64-BITS-UNKNOWN-NEXT: vand.vi v8, v10, 1 ; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 @@ -175,7 +175,7 @@ ; ; RV64-BITS-256-LABEL: reverse_nxv4i1: ; RV64-BITS-256: # %bb.0: -; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; RV64-BITS-256-NEXT: vmv.v.i v8, 0 ; RV64-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-256-NEXT: csrr a0, vlenb @@ -190,7 +190,7 @@ ; ; RV64-BITS-512-LABEL: reverse_nxv4i1: ; RV64-BITS-512: # %bb.0: -; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; RV64-BITS-512-NEXT: vmv.v.i v8, 0 ; RV64-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-512-NEXT: csrr a0, vlenb @@ -209,15 +209,15 @@ define @reverse_nxv8i1( %a) { ; RV32-BITS-UNKNOWN-LABEL: reverse_nxv8i1: ; RV32-BITS-UNKNOWN: # %bb.0: -; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0 ; RV32-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 -; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vid.v v10 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v10, v10, a0 -; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m1, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m1, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10 ; RV32-BITS-UNKNOWN-NEXT: vand.vi v8, v9, 1 ; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 @@ -225,7 +225,7 @@ ; ; RV32-BITS-256-LABEL: reverse_nxv8i1: ; RV32-BITS-256: # %bb.0: -; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; RV32-BITS-256-NEXT: vmv.v.i v8, 0 ; RV32-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-256-NEXT: csrr a0, vlenb @@ -239,7 +239,7 @@ ; ; RV32-BITS-512-LABEL: reverse_nxv8i1: ; RV32-BITS-512: # %bb.0: -; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; RV32-BITS-512-NEXT: vmv.v.i v8, 0 ; RV32-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-512-NEXT: csrr a0, vlenb @@ -253,15 +253,15 @@ ; ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv8i1: ; RV64-BITS-UNKNOWN: # %bb.0: -; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0 ; RV64-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 -; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vid.v v10 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v10, v10, a0 -; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m1, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m1, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10 ; RV64-BITS-UNKNOWN-NEXT: vand.vi v8, v9, 1 ; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 @@ -269,7 +269,7 @@ ; ; RV64-BITS-256-LABEL: reverse_nxv8i1: ; RV64-BITS-256: # %bb.0: -; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; RV64-BITS-256-NEXT: vmv.v.i v8, 0 ; RV64-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-256-NEXT: csrr a0, vlenb @@ -283,7 +283,7 @@ ; ; RV64-BITS-512-LABEL: reverse_nxv8i1: ; RV64-BITS-512: # %bb.0: -; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; RV64-BITS-512-NEXT: vmv.v.i v8, 0 ; RV64-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-512-NEXT: csrr a0, vlenb @@ -301,16 +301,16 @@ define @reverse_nxv16i1( %a) { ; RV32-BITS-UNKNOWN-LABEL: reverse_nxv16i1: ; RV32-BITS-UNKNOWN: # %bb.0: -; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0 ; RV32-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV32-BITS-UNKNOWN-NEXT: slli a0, a0, 1 ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 -; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, m4, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, m4, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vid.v v12 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v12, v12, a0 -; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m2, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m2, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v8, v12 ; RV32-BITS-UNKNOWN-NEXT: vand.vi v8, v10, 1 ; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 @@ -318,7 +318,7 @@ ; ; RV32-BITS-256-LABEL: reverse_nxv16i1: ; RV32-BITS-256: # %bb.0: -; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; RV32-BITS-256-NEXT: vmv.v.i v8, 0 ; RV32-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-256-NEXT: csrr a0, vlenb @@ -333,7 +333,7 @@ ; ; RV32-BITS-512-LABEL: reverse_nxv16i1: ; RV32-BITS-512: # %bb.0: -; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; RV32-BITS-512-NEXT: vmv.v.i v8, 0 ; RV32-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-512-NEXT: csrr a0, vlenb @@ -348,16 +348,16 @@ ; ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv16i1: ; RV64-BITS-UNKNOWN: # %bb.0: -; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0 ; RV64-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: slli a0, a0, 1 ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 -; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, m4, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, m4, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vid.v v12 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v12, v12, a0 -; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m2, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m2, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v8, v12 ; RV64-BITS-UNKNOWN-NEXT: vand.vi v8, v10, 1 ; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 @@ -365,7 +365,7 @@ ; ; RV64-BITS-256-LABEL: reverse_nxv16i1: ; RV64-BITS-256: # %bb.0: -; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; RV64-BITS-256-NEXT: vmv.v.i v8, 0 ; RV64-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-256-NEXT: csrr a0, vlenb @@ -380,7 +380,7 @@ ; ; RV64-BITS-512-LABEL: reverse_nxv16i1: ; RV64-BITS-512: # %bb.0: -; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; RV64-BITS-512-NEXT: vmv.v.i v8, 0 ; RV64-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-512-NEXT: csrr a0, vlenb @@ -399,16 +399,16 @@ define @reverse_nxv32i1( %a) { ; RV32-BITS-UNKNOWN-LABEL: reverse_nxv32i1: ; RV32-BITS-UNKNOWN: # %bb.0: -; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0 ; RV32-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV32-BITS-UNKNOWN-NEXT: slli a0, a0, 2 ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 -; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, m8, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, m8, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vid.v v16 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v16, v16, a0 -; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m4, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m4, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v12, v8, v16 ; RV32-BITS-UNKNOWN-NEXT: vand.vi v8, v12, 1 ; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 @@ -416,7 +416,7 @@ ; ; RV32-BITS-256-LABEL: reverse_nxv32i1: ; RV32-BITS-256: # %bb.0: -; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; RV32-BITS-256-NEXT: vmv.v.i v8, 0 ; RV32-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-256-NEXT: csrr a0, vlenb @@ -431,7 +431,7 @@ ; ; RV32-BITS-512-LABEL: reverse_nxv32i1: ; RV32-BITS-512: # %bb.0: -; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; RV32-BITS-512-NEXT: vmv.v.i v8, 0 ; RV32-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-512-NEXT: csrr a0, vlenb @@ -446,16 +446,16 @@ ; ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv32i1: ; RV64-BITS-UNKNOWN: # %bb.0: -; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vmv.v.i v8, 0 ; RV64-BITS-UNKNOWN-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: slli a0, a0, 2 ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 -; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, m8, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e16, m8, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vid.v v16 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v16, v16, a0 -; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m4, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m4, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v12, v8, v16 ; RV64-BITS-UNKNOWN-NEXT: vand.vi v8, v12, 1 ; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 @@ -463,7 +463,7 @@ ; ; RV64-BITS-256-LABEL: reverse_nxv32i1: ; RV64-BITS-256: # %bb.0: -; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; RV64-BITS-256-NEXT: vmv.v.i v8, 0 ; RV64-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-256-NEXT: csrr a0, vlenb @@ -478,7 +478,7 @@ ; ; RV64-BITS-512-LABEL: reverse_nxv32i1: ; RV64-BITS-512: # %bb.0: -; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; RV64-BITS-512-NEXT: vmv.v.i v8, 0 ; RV64-BITS-512-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-512-NEXT: csrr a0, vlenb @@ -500,23 +500,23 @@ ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV32-BITS-UNKNOWN-NEXT: slli a0, a0, 2 ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 -; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vid.v v8 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v8, v8, a0 -; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vmv.v.i v16, 0 ; RV32-BITS-UNKNOWN-NEXT: vmerge.vim v16, v16, 1, v0 -; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v28, v16, v8 ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v24, v20, v8 -; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vand.vi v8, v24, 1 ; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 ; RV32-BITS-UNKNOWN-NEXT: ret ; ; RV32-BITS-256-LABEL: reverse_nxv64i1: ; RV32-BITS-256: # %bb.0: -; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; RV32-BITS-256-NEXT: vmv.v.i v8, 0 ; RV32-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 ; RV32-BITS-256-NEXT: csrr a0, vlenb @@ -534,16 +534,16 @@ ; RV32-BITS-512-NEXT: csrr a0, vlenb ; RV32-BITS-512-NEXT: slli a0, a0, 2 ; RV32-BITS-512-NEXT: addi a0, a0, -1 -; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; RV32-BITS-512-NEXT: vid.v v8 ; RV32-BITS-512-NEXT: vrsub.vx v8, v8, a0 -; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; RV32-BITS-512-NEXT: vmv.v.i v16, 0 ; RV32-BITS-512-NEXT: vmerge.vim v16, v16, 1, v0 -; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; RV32-BITS-512-NEXT: vrgather.vv v28, v16, v8 ; RV32-BITS-512-NEXT: vrgather.vv v24, v20, v8 -; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; RV32-BITS-512-NEXT: vand.vi v8, v24, 1 ; RV32-BITS-512-NEXT: vmsne.vi v0, v8, 0 ; RV32-BITS-512-NEXT: ret @@ -553,23 +553,23 @@ ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: slli a0, a0, 2 ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 -; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vid.v v8 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v8, v8, a0 -; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vmv.v.i v16, 0 ; RV64-BITS-UNKNOWN-NEXT: vmerge.vim v16, v16, 1, v0 -; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v28, v16, v8 ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v24, v20, v8 -; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vand.vi v8, v24, 1 ; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 ; RV64-BITS-UNKNOWN-NEXT: ret ; ; RV64-BITS-256-LABEL: reverse_nxv64i1: ; RV64-BITS-256: # %bb.0: -; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; RV64-BITS-256-NEXT: vmv.v.i v8, 0 ; RV64-BITS-256-NEXT: vmerge.vim v8, v8, 1, v0 ; RV64-BITS-256-NEXT: csrr a0, vlenb @@ -587,16 +587,16 @@ ; RV64-BITS-512-NEXT: csrr a0, vlenb ; RV64-BITS-512-NEXT: slli a0, a0, 2 ; RV64-BITS-512-NEXT: addi a0, a0, -1 -; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; RV64-BITS-512-NEXT: vid.v v8 ; RV64-BITS-512-NEXT: vrsub.vx v8, v8, a0 -; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; RV64-BITS-512-NEXT: vmv.v.i v16, 0 ; RV64-BITS-512-NEXT: vmerge.vim v16, v16, 1, v0 -; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; RV64-BITS-512-NEXT: vrgather.vv v28, v16, v8 ; RV64-BITS-512-NEXT: vrgather.vv v24, v20, v8 -; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; RV64-BITS-512-NEXT: vand.vi v8, v24, 1 ; RV64-BITS-512-NEXT: vmsne.vi v0, v8, 0 ; RV64-BITS-512-NEXT: ret @@ -614,10 +614,10 @@ ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV32-BITS-UNKNOWN-NEXT: srli a0, a0, 3 ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 -; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vid.v v9 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v10, v9, a0 -; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10 ; RV32-BITS-UNKNOWN-NEXT: vmv1r.v v8, v9 ; RV32-BITS-UNKNOWN-NEXT: ret @@ -627,7 +627,7 @@ ; RV32-BITS-256-NEXT: csrr a0, vlenb ; RV32-BITS-256-NEXT: srli a0, a0, 3 ; RV32-BITS-256-NEXT: addi a0, a0, -1 -; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; RV32-BITS-256-NEXT: vid.v v9 ; RV32-BITS-256-NEXT: vrsub.vx v10, v9, a0 ; RV32-BITS-256-NEXT: vrgather.vv v9, v8, v10 @@ -639,7 +639,7 @@ ; RV32-BITS-512-NEXT: csrr a0, vlenb ; RV32-BITS-512-NEXT: srli a0, a0, 3 ; RV32-BITS-512-NEXT: addi a0, a0, -1 -; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; RV32-BITS-512-NEXT: vid.v v9 ; RV32-BITS-512-NEXT: vrsub.vx v10, v9, a0 ; RV32-BITS-512-NEXT: vrgather.vv v9, v8, v10 @@ -651,10 +651,10 @@ ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: srli a0, a0, 3 ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 -; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vid.v v9 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v10, v9, a0 -; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10 ; RV64-BITS-UNKNOWN-NEXT: vmv1r.v v8, v9 ; RV64-BITS-UNKNOWN-NEXT: ret @@ -664,7 +664,7 @@ ; RV64-BITS-256-NEXT: csrr a0, vlenb ; RV64-BITS-256-NEXT: srli a0, a0, 3 ; RV64-BITS-256-NEXT: addi a0, a0, -1 -; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; RV64-BITS-256-NEXT: vid.v v9 ; RV64-BITS-256-NEXT: vrsub.vx v10, v9, a0 ; RV64-BITS-256-NEXT: vrgather.vv v9, v8, v10 @@ -676,7 +676,7 @@ ; RV64-BITS-512-NEXT: csrr a0, vlenb ; RV64-BITS-512-NEXT: srli a0, a0, 3 ; RV64-BITS-512-NEXT: addi a0, a0, -1 -; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; RV64-BITS-512-NEXT: vid.v v9 ; RV64-BITS-512-NEXT: vrsub.vx v10, v9, a0 ; RV64-BITS-512-NEXT: vrgather.vv v9, v8, v10 @@ -692,10 +692,10 @@ ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV32-BITS-UNKNOWN-NEXT: srli a0, a0, 2 ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 -; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vid.v v9 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v10, v9, a0 -; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10 ; RV32-BITS-UNKNOWN-NEXT: vmv1r.v v8, v9 ; RV32-BITS-UNKNOWN-NEXT: ret @@ -705,7 +705,7 @@ ; RV32-BITS-256-NEXT: csrr a0, vlenb ; RV32-BITS-256-NEXT: srli a0, a0, 2 ; RV32-BITS-256-NEXT: addi a0, a0, -1 -; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; RV32-BITS-256-NEXT: vid.v v9 ; RV32-BITS-256-NEXT: vrsub.vx v10, v9, a0 ; RV32-BITS-256-NEXT: vrgather.vv v9, v8, v10 @@ -717,7 +717,7 @@ ; RV32-BITS-512-NEXT: csrr a0, vlenb ; RV32-BITS-512-NEXT: srli a0, a0, 2 ; RV32-BITS-512-NEXT: addi a0, a0, -1 -; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; RV32-BITS-512-NEXT: vid.v v9 ; RV32-BITS-512-NEXT: vrsub.vx v10, v9, a0 ; RV32-BITS-512-NEXT: vrgather.vv v9, v8, v10 @@ -729,10 +729,10 @@ ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: srli a0, a0, 2 ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 -; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vid.v v9 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v10, v9, a0 -; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10 ; RV64-BITS-UNKNOWN-NEXT: vmv1r.v v8, v9 ; RV64-BITS-UNKNOWN-NEXT: ret @@ -742,7 +742,7 @@ ; RV64-BITS-256-NEXT: csrr a0, vlenb ; RV64-BITS-256-NEXT: srli a0, a0, 2 ; RV64-BITS-256-NEXT: addi a0, a0, -1 -; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; RV64-BITS-256-NEXT: vid.v v9 ; RV64-BITS-256-NEXT: vrsub.vx v10, v9, a0 ; RV64-BITS-256-NEXT: vrgather.vv v9, v8, v10 @@ -754,7 +754,7 @@ ; RV64-BITS-512-NEXT: csrr a0, vlenb ; RV64-BITS-512-NEXT: srli a0, a0, 2 ; RV64-BITS-512-NEXT: addi a0, a0, -1 -; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; RV64-BITS-512-NEXT: vid.v v9 ; RV64-BITS-512-NEXT: vrsub.vx v10, v9, a0 ; RV64-BITS-512-NEXT: vrgather.vv v9, v8, v10 @@ -770,10 +770,10 @@ ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV32-BITS-UNKNOWN-NEXT: srli a0, a0, 1 ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 -; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vid.v v9 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v10, v9, a0 -; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf2, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10 ; RV32-BITS-UNKNOWN-NEXT: vmv1r.v v8, v9 ; RV32-BITS-UNKNOWN-NEXT: ret @@ -783,7 +783,7 @@ ; RV32-BITS-256-NEXT: csrr a0, vlenb ; RV32-BITS-256-NEXT: srli a0, a0, 1 ; RV32-BITS-256-NEXT: addi a0, a0, -1 -; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; RV32-BITS-256-NEXT: vid.v v9 ; RV32-BITS-256-NEXT: vrsub.vx v10, v9, a0 ; RV32-BITS-256-NEXT: vrgather.vv v9, v8, v10 @@ -795,7 +795,7 @@ ; RV32-BITS-512-NEXT: csrr a0, vlenb ; RV32-BITS-512-NEXT: srli a0, a0, 1 ; RV32-BITS-512-NEXT: addi a0, a0, -1 -; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; RV32-BITS-512-NEXT: vid.v v9 ; RV32-BITS-512-NEXT: vrsub.vx v10, v9, a0 ; RV32-BITS-512-NEXT: vrgather.vv v9, v8, v10 @@ -807,10 +807,10 @@ ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: srli a0, a0, 1 ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 -; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vid.v v9 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v10, v9, a0 -; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf2, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10 ; RV64-BITS-UNKNOWN-NEXT: vmv1r.v v8, v9 ; RV64-BITS-UNKNOWN-NEXT: ret @@ -820,7 +820,7 @@ ; RV64-BITS-256-NEXT: csrr a0, vlenb ; RV64-BITS-256-NEXT: srli a0, a0, 1 ; RV64-BITS-256-NEXT: addi a0, a0, -1 -; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; RV64-BITS-256-NEXT: vid.v v9 ; RV64-BITS-256-NEXT: vrsub.vx v10, v9, a0 ; RV64-BITS-256-NEXT: vrgather.vv v9, v8, v10 @@ -832,7 +832,7 @@ ; RV64-BITS-512-NEXT: csrr a0, vlenb ; RV64-BITS-512-NEXT: srli a0, a0, 1 ; RV64-BITS-512-NEXT: addi a0, a0, -1 -; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; RV64-BITS-512-NEXT: vid.v v9 ; RV64-BITS-512-NEXT: vrsub.vx v10, v9, a0 ; RV64-BITS-512-NEXT: vrgather.vv v9, v8, v10 @@ -847,10 +847,10 @@ ; RV32-BITS-UNKNOWN: # %bb.0: ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 -; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vid.v v10 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v10, v10, a0 -; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m1, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m1, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10 ; RV32-BITS-UNKNOWN-NEXT: vmv.v.v v8, v9 ; RV32-BITS-UNKNOWN-NEXT: ret @@ -859,7 +859,7 @@ ; RV32-BITS-256: # %bb.0: ; RV32-BITS-256-NEXT: csrr a0, vlenb ; RV32-BITS-256-NEXT: addi a0, a0, -1 -; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV32-BITS-256-NEXT: vid.v v9 ; RV32-BITS-256-NEXT: vrsub.vx v10, v9, a0 ; RV32-BITS-256-NEXT: vrgather.vv v9, v8, v10 @@ -870,7 +870,7 @@ ; RV32-BITS-512: # %bb.0: ; RV32-BITS-512-NEXT: csrr a0, vlenb ; RV32-BITS-512-NEXT: addi a0, a0, -1 -; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV32-BITS-512-NEXT: vid.v v9 ; RV32-BITS-512-NEXT: vrsub.vx v10, v9, a0 ; RV32-BITS-512-NEXT: vrgather.vv v9, v8, v10 @@ -881,10 +881,10 @@ ; RV64-BITS-UNKNOWN: # %bb.0: ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 -; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vid.v v10 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v10, v10, a0 -; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m1, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m1, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10 ; RV64-BITS-UNKNOWN-NEXT: vmv.v.v v8, v9 ; RV64-BITS-UNKNOWN-NEXT: ret @@ -893,7 +893,7 @@ ; RV64-BITS-256: # %bb.0: ; RV64-BITS-256-NEXT: csrr a0, vlenb ; RV64-BITS-256-NEXT: addi a0, a0, -1 -; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV64-BITS-256-NEXT: vid.v v9 ; RV64-BITS-256-NEXT: vrsub.vx v10, v9, a0 ; RV64-BITS-256-NEXT: vrgather.vv v9, v8, v10 @@ -904,7 +904,7 @@ ; RV64-BITS-512: # %bb.0: ; RV64-BITS-512-NEXT: csrr a0, vlenb ; RV64-BITS-512-NEXT: addi a0, a0, -1 -; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV64-BITS-512-NEXT: vid.v v9 ; RV64-BITS-512-NEXT: vrsub.vx v10, v9, a0 ; RV64-BITS-512-NEXT: vrgather.vv v9, v8, v10 @@ -920,10 +920,10 @@ ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV32-BITS-UNKNOWN-NEXT: slli a0, a0, 1 ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 -; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vid.v v12 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v12, v12, a0 -; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m2, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m2, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v8, v12 ; RV32-BITS-UNKNOWN-NEXT: vmv.v.v v8, v10 ; RV32-BITS-UNKNOWN-NEXT: ret @@ -933,7 +933,7 @@ ; RV32-BITS-256-NEXT: csrr a0, vlenb ; RV32-BITS-256-NEXT: slli a0, a0, 1 ; RV32-BITS-256-NEXT: addi a0, a0, -1 -; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; RV32-BITS-256-NEXT: vid.v v10 ; RV32-BITS-256-NEXT: vrsub.vx v12, v10, a0 ; RV32-BITS-256-NEXT: vrgather.vv v10, v8, v12 @@ -945,7 +945,7 @@ ; RV32-BITS-512-NEXT: csrr a0, vlenb ; RV32-BITS-512-NEXT: slli a0, a0, 1 ; RV32-BITS-512-NEXT: addi a0, a0, -1 -; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; RV32-BITS-512-NEXT: vid.v v10 ; RV32-BITS-512-NEXT: vrsub.vx v12, v10, a0 ; RV32-BITS-512-NEXT: vrgather.vv v10, v8, v12 @@ -957,10 +957,10 @@ ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: slli a0, a0, 1 ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 -; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vid.v v12 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v12, v12, a0 -; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m2, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m2, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v8, v12 ; RV64-BITS-UNKNOWN-NEXT: vmv.v.v v8, v10 ; RV64-BITS-UNKNOWN-NEXT: ret @@ -970,7 +970,7 @@ ; RV64-BITS-256-NEXT: csrr a0, vlenb ; RV64-BITS-256-NEXT: slli a0, a0, 1 ; RV64-BITS-256-NEXT: addi a0, a0, -1 -; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; RV64-BITS-256-NEXT: vid.v v10 ; RV64-BITS-256-NEXT: vrsub.vx v12, v10, a0 ; RV64-BITS-256-NEXT: vrgather.vv v10, v8, v12 @@ -982,7 +982,7 @@ ; RV64-BITS-512-NEXT: csrr a0, vlenb ; RV64-BITS-512-NEXT: slli a0, a0, 1 ; RV64-BITS-512-NEXT: addi a0, a0, -1 -; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; RV64-BITS-512-NEXT: vid.v v10 ; RV64-BITS-512-NEXT: vrsub.vx v12, v10, a0 ; RV64-BITS-512-NEXT: vrgather.vv v10, v8, v12 @@ -998,10 +998,10 @@ ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV32-BITS-UNKNOWN-NEXT: slli a0, a0, 2 ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 -; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vid.v v16 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v16, v16, a0 -; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m4, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m4, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v12, v8, v16 ; RV32-BITS-UNKNOWN-NEXT: vmv.v.v v8, v12 ; RV32-BITS-UNKNOWN-NEXT: ret @@ -1011,7 +1011,7 @@ ; RV32-BITS-256-NEXT: csrr a0, vlenb ; RV32-BITS-256-NEXT: slli a0, a0, 2 ; RV32-BITS-256-NEXT: addi a0, a0, -1 -; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; RV32-BITS-256-NEXT: vid.v v12 ; RV32-BITS-256-NEXT: vrsub.vx v16, v12, a0 ; RV32-BITS-256-NEXT: vrgather.vv v12, v8, v16 @@ -1023,7 +1023,7 @@ ; RV32-BITS-512-NEXT: csrr a0, vlenb ; RV32-BITS-512-NEXT: slli a0, a0, 2 ; RV32-BITS-512-NEXT: addi a0, a0, -1 -; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; RV32-BITS-512-NEXT: vid.v v12 ; RV32-BITS-512-NEXT: vrsub.vx v16, v12, a0 ; RV32-BITS-512-NEXT: vrgather.vv v12, v8, v16 @@ -1035,10 +1035,10 @@ ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: slli a0, a0, 2 ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 -; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vid.v v16 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v16, v16, a0 -; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m4, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m4, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v12, v8, v16 ; RV64-BITS-UNKNOWN-NEXT: vmv.v.v v8, v12 ; RV64-BITS-UNKNOWN-NEXT: ret @@ -1048,7 +1048,7 @@ ; RV64-BITS-256-NEXT: csrr a0, vlenb ; RV64-BITS-256-NEXT: slli a0, a0, 2 ; RV64-BITS-256-NEXT: addi a0, a0, -1 -; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; RV64-BITS-256-NEXT: vid.v v12 ; RV64-BITS-256-NEXT: vrsub.vx v16, v12, a0 ; RV64-BITS-256-NEXT: vrgather.vv v12, v8, v16 @@ -1060,7 +1060,7 @@ ; RV64-BITS-512-NEXT: csrr a0, vlenb ; RV64-BITS-512-NEXT: slli a0, a0, 2 ; RV64-BITS-512-NEXT: addi a0, a0, -1 -; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; RV64-BITS-512-NEXT: vid.v v12 ; RV64-BITS-512-NEXT: vrsub.vx v16, v12, a0 ; RV64-BITS-512-NEXT: vrgather.vv v12, v8, v16 @@ -1076,10 +1076,10 @@ ; RV32-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV32-BITS-UNKNOWN-NEXT: slli a0, a0, 2 ; RV32-BITS-UNKNOWN-NEXT: addi a0, a0, -1 -; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vid.v v16 ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v24, v16, a0 -; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m4, ta, mu +; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m4, ta, ma ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v20, v8, v24 ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v16, v12, v24 ; RV32-BITS-UNKNOWN-NEXT: vmv8r.v v8, v16 @@ -1090,7 +1090,7 @@ ; RV32-BITS-256-NEXT: csrr a0, vlenb ; RV32-BITS-256-NEXT: slli a0, a0, 3 ; RV32-BITS-256-NEXT: addi a0, a0, -1 -; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; RV32-BITS-256-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; RV32-BITS-256-NEXT: vid.v v16 ; RV32-BITS-256-NEXT: vrsub.vx v24, v16, a0 ; RV32-BITS-256-NEXT: vrgather.vv v16, v8, v24 @@ -1102,7 +1102,7 @@ ; RV32-BITS-512-NEXT: csrr a0, vlenb ; RV32-BITS-512-NEXT: slli a0, a0, 2 ; RV32-BITS-512-NEXT: addi a0, a0, -1 -; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; RV32-BITS-512-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; RV32-BITS-512-NEXT: vid.v v16 ; RV32-BITS-512-NEXT: vrsub.vx v24, v16, a0 ; RV32-BITS-512-NEXT: vrgather.vv v20, v8, v24 @@ -1115,10 +1115,10 @@ ; RV64-BITS-UNKNOWN-NEXT: csrr a0, vlenb ; RV64-BITS-UNKNOWN-NEXT: slli a0, a0, 2 ; RV64-BITS-UNKNOWN-NEXT: addi a0, a0, -1 -; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vid.v v16 ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v24, v16, a0 -; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m4, ta, mu +; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m4, ta, ma ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v20, v8, v24 ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v16, v12, v24 ; RV64-BITS-UNKNOWN-NEXT: vmv8r.v v8, v16 @@ -1129,7 +1129,7 @@ ; RV64-BITS-256-NEXT: csrr a0, vlenb ; RV64-BITS-256-NEXT: slli a0, a0, 3 ; RV64-BITS-256-NEXT: addi a0, a0, -1 -; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; RV64-BITS-256-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; RV64-BITS-256-NEXT: vid.v v16 ; RV64-BITS-256-NEXT: vrsub.vx v24, v16, a0 ; RV64-BITS-256-NEXT: vrgather.vv v16, v8, v24 @@ -1141,7 +1141,7 @@ ; RV64-BITS-512-NEXT: csrr a0, vlenb ; RV64-BITS-512-NEXT: slli a0, a0, 2 ; RV64-BITS-512-NEXT: addi a0, a0, -1 -; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; RV64-BITS-512-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; RV64-BITS-512-NEXT: vid.v v16 ; RV64-BITS-512-NEXT: vrsub.vx v24, v16, a0 ; RV64-BITS-512-NEXT: vrgather.vv v20, v8, v24 @@ -1158,7 +1158,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 @@ -1174,7 +1174,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 @@ -1190,7 +1190,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 @@ -1205,7 +1205,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vid.v v10 ; CHECK-NEXT: vrsub.vx v12, v10, a0 ; CHECK-NEXT: vrgather.vv v10, v8, v12 @@ -1221,7 +1221,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vid.v v12 ; CHECK-NEXT: vrsub.vx v16, v12, a0 ; CHECK-NEXT: vrgather.vv v12, v8, v16 @@ -1237,7 +1237,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; CHECK-NEXT: vid.v v16 ; CHECK-NEXT: vrsub.vx v24, v16, a0 ; CHECK-NEXT: vrgather.vv v16, v8, v24 @@ -1253,7 +1253,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 @@ -1269,7 +1269,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 @@ -1285,7 +1285,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vid.v v10 ; CHECK-NEXT: vrsub.vx v12, v10, a0 ; CHECK-NEXT: vrgather.vv v10, v8, v12 @@ -1300,7 +1300,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vid.v v12 ; CHECK-NEXT: vrsub.vx v16, v12, a0 ; CHECK-NEXT: vrgather.vv v12, v8, v16 @@ -1316,7 +1316,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; CHECK-NEXT: vid.v v16 ; CHECK-NEXT: vrsub.vx v24, v16, a0 ; CHECK-NEXT: vrgather.vv v16, v8, v24 @@ -1332,7 +1332,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 @@ -1348,7 +1348,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; CHECK-NEXT: vid.v v10 ; CHECK-NEXT: vrsub.vx v12, v10, a0 ; CHECK-NEXT: vrgather.vv v10, v8, v12 @@ -1364,7 +1364,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; CHECK-NEXT: vid.v v12 ; CHECK-NEXT: vrsub.vx v16, v12, a0 ; CHECK-NEXT: vrgather.vv v12, v8, v16 @@ -1379,7 +1379,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; CHECK-NEXT: vid.v v16 ; CHECK-NEXT: vrsub.vx v24, v16, a0 ; CHECK-NEXT: vrgather.vv v16, v8, v24 @@ -1399,7 +1399,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 @@ -1415,7 +1415,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 @@ -1431,7 +1431,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 @@ -1446,7 +1446,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vid.v v10 ; CHECK-NEXT: vrsub.vx v12, v10, a0 ; CHECK-NEXT: vrgather.vv v10, v8, v12 @@ -1462,7 +1462,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vid.v v12 ; CHECK-NEXT: vrsub.vx v16, v12, a0 ; CHECK-NEXT: vrgather.vv v12, v8, v16 @@ -1478,7 +1478,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; CHECK-NEXT: vid.v v16 ; CHECK-NEXT: vrsub.vx v24, v16, a0 ; CHECK-NEXT: vrgather.vv v16, v8, v24 @@ -1494,7 +1494,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 @@ -1510,7 +1510,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 @@ -1526,7 +1526,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vid.v v10 ; CHECK-NEXT: vrsub.vx v12, v10, a0 ; CHECK-NEXT: vrgather.vv v10, v8, v12 @@ -1541,7 +1541,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vid.v v12 ; CHECK-NEXT: vrsub.vx v16, v12, a0 ; CHECK-NEXT: vrgather.vv v12, v8, v16 @@ -1557,7 +1557,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; CHECK-NEXT: vid.v v16 ; CHECK-NEXT: vrsub.vx v24, v16, a0 ; CHECK-NEXT: vrgather.vv v16, v8, v24 @@ -1573,7 +1573,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 @@ -1589,7 +1589,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; CHECK-NEXT: vid.v v10 ; CHECK-NEXT: vrsub.vx v12, v10, a0 ; CHECK-NEXT: vrgather.vv v10, v8, v12 @@ -1605,7 +1605,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; CHECK-NEXT: vid.v v12 ; CHECK-NEXT: vrsub.vx v16, v12, a0 ; CHECK-NEXT: vrgather.vv v12, v8, v16 @@ -1620,7 +1620,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; CHECK-NEXT: vid.v v16 ; CHECK-NEXT: vrsub.vx v24, v16, a0 ; CHECK-NEXT: vrgather.vv v16, v8, v24 @@ -1638,7 +1638,7 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; CHECK-NEXT: vid.v v12 ; CHECK-NEXT: vrsub.vx v12, v12, a0 ; CHECK-NEXT: vrgather.vv v16, v8, v12 @@ -1655,7 +1655,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; CHECK-NEXT: vid.v v16 ; CHECK-NEXT: vrsub.vx v16, v16, a0 ; CHECK-NEXT: vrgather.vv v24, v8, v16 @@ -1680,7 +1680,7 @@ ; CHECK-NEXT: andi sp, sp, -64 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: addi a1, a0, -1 -; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; CHECK-NEXT: vid.v v24 ; CHECK-NEXT: vrsub.vx v24, v24, a1 ; CHECK-NEXT: vrgather.vv v0, v16, v24 diff --git a/llvm/test/CodeGen/RISCV/rvv/pr52475.ll b/llvm/test/CodeGen/RISCV/rvv/pr52475.ll --- a/llvm/test/CodeGen/RISCV/rvv/pr52475.ll +++ b/llvm/test/CodeGen/RISCV/rvv/pr52475.ll @@ -7,7 +7,7 @@ define <128 x i32> @ret_split_v128i32(<128 x i32>* %x) { ; CHECK-LABEL: ret_split_v128i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: addi a2, a1, 448 diff --git a/llvm/test/CodeGen/RISCV/rvv/reg-alloc-reserve-bp.ll b/llvm/test/CodeGen/RISCV/rvv/reg-alloc-reserve-bp.ll --- a/llvm/test/CodeGen/RISCV/rvv/reg-alloc-reserve-bp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/reg-alloc-reserve-bp.ll @@ -23,7 +23,7 @@ ; CHECK-NEXT: andi sp, sp, -64 ; CHECK-NEXT: mv s1, sp ; CHECK-NEXT: mv s2, a0 -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: addi a0, s1, 160 ; CHECK-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill @@ -40,7 +40,7 @@ ; CHECK-NEXT: sd t0, 0(sp) ; CHECK-NEXT: call bar@plt ; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (s2) ; CHECK-NEXT: addi a0, s1, 160 ; CHECK-NEXT: vl2re8.v v10, (a0) # Unknown-size Folded Reload diff --git a/llvm/test/CodeGen/RISCV/rvv/regalloc-fast-crash.ll b/llvm/test/CodeGen/RISCV/rvv/regalloc-fast-crash.ll --- a/llvm/test/CodeGen/RISCV/rvv/regalloc-fast-crash.ll +++ b/llvm/test/CodeGen/RISCV/rvv/regalloc-fast-crash.ll @@ -11,7 +11,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/round-vp.ll b/llvm/test/CodeGen/RISCV/rvv/round-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/round-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/round-vp.ll @@ -674,7 +674,7 @@ ; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a4, a1, 3 -; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma ; CHECK-NEXT: sub a3, a0, a1 ; CHECK-NEXT: vslidedown.vx v2, v0, a4 ; CHECK-NEXT: bltu a0, a3, .LBB32_2 diff --git a/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll b/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll @@ -674,7 +674,7 @@ ; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a4, a1, 3 -; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma ; CHECK-NEXT: sub a3, a0, a1 ; CHECK-NEXT: vslidedown.vx v2, v0, a4 ; CHECK-NEXT: bltu a0, a3, .LBB32_2 diff --git a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll --- a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll @@ -19,7 +19,7 @@ ; SPILL-O0-NEXT: add a1, sp, a1 ; SPILL-O0-NEXT: addi a1, a1, 16 ; SPILL-O0-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill -; SPILL-O0-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; SPILL-O0-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; SPILL-O0-NEXT: vfadd.vv v8, v8, v9 ; SPILL-O0-NEXT: addi a0, sp, 16 ; SPILL-O0-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill @@ -34,7 +34,7 @@ ; SPILL-O0-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload ; SPILL-O0-NEXT: # kill: def $x11 killed $x10 ; SPILL-O0-NEXT: lw a0, 8(sp) # 4-byte Folded Reload -; SPILL-O0-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; SPILL-O0-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; SPILL-O0-NEXT: vfadd.vv v8, v8, v9 ; SPILL-O0-NEXT: csrr a0, vlenb ; SPILL-O0-NEXT: slli a0, a0, 1 @@ -54,7 +54,7 @@ ; SPILL-O2-NEXT: mv s0, a0 ; SPILL-O2-NEXT: addi a1, sp, 16 ; SPILL-O2-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill -; SPILL-O2-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; SPILL-O2-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; SPILL-O2-NEXT: vfadd.vv v9, v8, v9 ; SPILL-O2-NEXT: csrr a0, vlenb ; SPILL-O2-NEXT: add a0, sp, a0 @@ -63,7 +63,7 @@ ; SPILL-O2-NEXT: lui a0, %hi(.L.str) ; SPILL-O2-NEXT: addi a0, a0, %lo(.L.str) ; SPILL-O2-NEXT: call puts@plt -; SPILL-O2-NEXT: vsetvli zero, s0, e64, m1, ta, mu +; SPILL-O2-NEXT: vsetvli zero, s0, e64, m1, ta, ma ; SPILL-O2-NEXT: csrr a0, vlenb ; SPILL-O2-NEXT: add a0, sp, a0 ; SPILL-O2-NEXT: addi a0, a0, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll --- a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll @@ -11,7 +11,7 @@ ; SPILL-O0-NEXT: csrr a2, vlenb ; SPILL-O0-NEXT: slli a2, a2, 1 ; SPILL-O0-NEXT: sub sp, sp, a2 -; SPILL-O0-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; SPILL-O0-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; SPILL-O0-NEXT: vlseg2e32.v v8, (a0) ; SPILL-O0-NEXT: vmv1r.v v8, v9 ; SPILL-O0-NEXT: addi a0, sp, 16 @@ -32,7 +32,7 @@ ; SPILL-O2-NEXT: csrr a2, vlenb ; SPILL-O2-NEXT: slli a2, a2, 1 ; SPILL-O2-NEXT: sub sp, sp, a2 -; SPILL-O2-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; SPILL-O2-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; SPILL-O2-NEXT: vlseg2e32.v v8, (a0) ; SPILL-O2-NEXT: addi a0, sp, 16 ; SPILL-O2-NEXT: csrr a1, vlenb @@ -67,7 +67,7 @@ ; SPILL-O0-NEXT: csrr a2, vlenb ; SPILL-O0-NEXT: slli a2, a2, 1 ; SPILL-O0-NEXT: sub sp, sp, a2 -; SPILL-O0-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; SPILL-O0-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; SPILL-O0-NEXT: vlseg2e32.v v8, (a0) ; SPILL-O0-NEXT: vmv1r.v v8, v9 ; SPILL-O0-NEXT: addi a0, sp, 16 @@ -88,7 +88,7 @@ ; SPILL-O2-NEXT: csrr a2, vlenb ; SPILL-O2-NEXT: slli a2, a2, 1 ; SPILL-O2-NEXT: sub sp, sp, a2 -; SPILL-O2-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; SPILL-O2-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; SPILL-O2-NEXT: vlseg2e32.v v8, (a0) ; SPILL-O2-NEXT: addi a0, sp, 16 ; SPILL-O2-NEXT: csrr a1, vlenb @@ -123,7 +123,7 @@ ; SPILL-O0-NEXT: csrr a2, vlenb ; SPILL-O0-NEXT: slli a2, a2, 1 ; SPILL-O0-NEXT: sub sp, sp, a2 -; SPILL-O0-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; SPILL-O0-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; SPILL-O0-NEXT: vlseg2e32.v v8, (a0) ; SPILL-O0-NEXT: vmv2r.v v8, v10 ; SPILL-O0-NEXT: addi a0, sp, 16 @@ -144,7 +144,7 @@ ; SPILL-O2-NEXT: csrr a2, vlenb ; SPILL-O2-NEXT: slli a2, a2, 2 ; SPILL-O2-NEXT: sub sp, sp, a2 -; SPILL-O2-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; SPILL-O2-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; SPILL-O2-NEXT: vlseg2e32.v v8, (a0) ; SPILL-O2-NEXT: addi a0, sp, 16 ; SPILL-O2-NEXT: csrr a1, vlenb @@ -181,7 +181,7 @@ ; SPILL-O0-NEXT: csrr a2, vlenb ; SPILL-O0-NEXT: slli a2, a2, 2 ; SPILL-O0-NEXT: sub sp, sp, a2 -; SPILL-O0-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; SPILL-O0-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; SPILL-O0-NEXT: vlseg2e32.v v8, (a0) ; SPILL-O0-NEXT: vmv4r.v v8, v12 ; SPILL-O0-NEXT: addi a0, sp, 16 @@ -202,7 +202,7 @@ ; SPILL-O2-NEXT: csrr a2, vlenb ; SPILL-O2-NEXT: slli a2, a2, 3 ; SPILL-O2-NEXT: sub sp, sp, a2 -; SPILL-O2-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; SPILL-O2-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; SPILL-O2-NEXT: vlseg2e32.v v8, (a0) ; SPILL-O2-NEXT: addi a0, sp, 16 ; SPILL-O2-NEXT: csrr a1, vlenb @@ -239,7 +239,7 @@ ; SPILL-O0-NEXT: csrr a2, vlenb ; SPILL-O0-NEXT: slli a2, a2, 1 ; SPILL-O0-NEXT: sub sp, sp, a2 -; SPILL-O0-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; SPILL-O0-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; SPILL-O0-NEXT: vlseg3e32.v v8, (a0) ; SPILL-O0-NEXT: vmv2r.v v8, v10 ; SPILL-O0-NEXT: addi a0, sp, 16 @@ -261,7 +261,7 @@ ; SPILL-O2-NEXT: li a3, 6 ; SPILL-O2-NEXT: mul a2, a2, a3 ; SPILL-O2-NEXT: sub sp, sp, a2 -; SPILL-O2-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; SPILL-O2-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; SPILL-O2-NEXT: vlseg3e32.v v8, (a0) ; SPILL-O2-NEXT: addi a0, sp, 16 ; SPILL-O2-NEXT: csrr a1, vlenb diff --git a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll --- a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll @@ -19,7 +19,7 @@ ; SPILL-O0-NEXT: add a1, sp, a1 ; SPILL-O0-NEXT: addi a1, a1, 32 ; SPILL-O0-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill -; SPILL-O0-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; SPILL-O0-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; SPILL-O0-NEXT: vfadd.vv v8, v8, v9 ; SPILL-O0-NEXT: addi a0, sp, 32 ; SPILL-O0-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill @@ -34,7 +34,7 @@ ; SPILL-O0-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload ; SPILL-O0-NEXT: # kill: def $x11 killed $x10 ; SPILL-O0-NEXT: ld a0, 16(sp) # 8-byte Folded Reload -; SPILL-O0-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; SPILL-O0-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; SPILL-O0-NEXT: vfadd.vv v8, v8, v9 ; SPILL-O0-NEXT: csrr a0, vlenb ; SPILL-O0-NEXT: slli a0, a0, 1 @@ -54,7 +54,7 @@ ; SPILL-O2-NEXT: mv s0, a0 ; SPILL-O2-NEXT: addi a1, sp, 16 ; SPILL-O2-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill -; SPILL-O2-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; SPILL-O2-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; SPILL-O2-NEXT: vfadd.vv v9, v8, v9 ; SPILL-O2-NEXT: csrr a0, vlenb ; SPILL-O2-NEXT: add a0, sp, a0 @@ -63,7 +63,7 @@ ; SPILL-O2-NEXT: lui a0, %hi(.L.str) ; SPILL-O2-NEXT: addi a0, a0, %lo(.L.str) ; SPILL-O2-NEXT: call puts@plt -; SPILL-O2-NEXT: vsetvli zero, s0, e64, m1, ta, mu +; SPILL-O2-NEXT: vsetvli zero, s0, e64, m1, ta, ma ; SPILL-O2-NEXT: csrr a0, vlenb ; SPILL-O2-NEXT: add a0, sp, a0 ; SPILL-O2-NEXT: addi a0, a0, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll --- a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll @@ -11,7 +11,7 @@ ; SPILL-O0-NEXT: csrr a2, vlenb ; SPILL-O0-NEXT: slli a2, a2, 1 ; SPILL-O0-NEXT: sub sp, sp, a2 -; SPILL-O0-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; SPILL-O0-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; SPILL-O0-NEXT: vlseg2e32.v v8, (a0) ; SPILL-O0-NEXT: vmv1r.v v8, v9 ; SPILL-O0-NEXT: addi a0, sp, 16 @@ -32,7 +32,7 @@ ; SPILL-O2-NEXT: csrr a2, vlenb ; SPILL-O2-NEXT: slli a2, a2, 1 ; SPILL-O2-NEXT: sub sp, sp, a2 -; SPILL-O2-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; SPILL-O2-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; SPILL-O2-NEXT: vlseg2e32.v v8, (a0) ; SPILL-O2-NEXT: addi a0, sp, 16 ; SPILL-O2-NEXT: csrr a1, vlenb @@ -67,7 +67,7 @@ ; SPILL-O0-NEXT: csrr a2, vlenb ; SPILL-O0-NEXT: slli a2, a2, 1 ; SPILL-O0-NEXT: sub sp, sp, a2 -; SPILL-O0-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; SPILL-O0-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; SPILL-O0-NEXT: vlseg2e32.v v8, (a0) ; SPILL-O0-NEXT: vmv1r.v v8, v9 ; SPILL-O0-NEXT: addi a0, sp, 16 @@ -88,7 +88,7 @@ ; SPILL-O2-NEXT: csrr a2, vlenb ; SPILL-O2-NEXT: slli a2, a2, 1 ; SPILL-O2-NEXT: sub sp, sp, a2 -; SPILL-O2-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; SPILL-O2-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; SPILL-O2-NEXT: vlseg2e32.v v8, (a0) ; SPILL-O2-NEXT: addi a0, sp, 16 ; SPILL-O2-NEXT: csrr a1, vlenb @@ -123,7 +123,7 @@ ; SPILL-O0-NEXT: csrr a2, vlenb ; SPILL-O0-NEXT: slli a2, a2, 1 ; SPILL-O0-NEXT: sub sp, sp, a2 -; SPILL-O0-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; SPILL-O0-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; SPILL-O0-NEXT: vlseg2e32.v v8, (a0) ; SPILL-O0-NEXT: vmv2r.v v8, v10 ; SPILL-O0-NEXT: addi a0, sp, 16 @@ -144,7 +144,7 @@ ; SPILL-O2-NEXT: csrr a2, vlenb ; SPILL-O2-NEXT: slli a2, a2, 2 ; SPILL-O2-NEXT: sub sp, sp, a2 -; SPILL-O2-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; SPILL-O2-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; SPILL-O2-NEXT: vlseg2e32.v v8, (a0) ; SPILL-O2-NEXT: addi a0, sp, 16 ; SPILL-O2-NEXT: csrr a1, vlenb @@ -181,7 +181,7 @@ ; SPILL-O0-NEXT: csrr a2, vlenb ; SPILL-O0-NEXT: slli a2, a2, 2 ; SPILL-O0-NEXT: sub sp, sp, a2 -; SPILL-O0-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; SPILL-O0-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; SPILL-O0-NEXT: vlseg2e32.v v8, (a0) ; SPILL-O0-NEXT: vmv4r.v v8, v12 ; SPILL-O0-NEXT: addi a0, sp, 16 @@ -202,7 +202,7 @@ ; SPILL-O2-NEXT: csrr a2, vlenb ; SPILL-O2-NEXT: slli a2, a2, 3 ; SPILL-O2-NEXT: sub sp, sp, a2 -; SPILL-O2-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; SPILL-O2-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; SPILL-O2-NEXT: vlseg2e32.v v8, (a0) ; SPILL-O2-NEXT: addi a0, sp, 16 ; SPILL-O2-NEXT: csrr a1, vlenb @@ -239,7 +239,7 @@ ; SPILL-O0-NEXT: csrr a2, vlenb ; SPILL-O0-NEXT: slli a2, a2, 1 ; SPILL-O0-NEXT: sub sp, sp, a2 -; SPILL-O0-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; SPILL-O0-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; SPILL-O0-NEXT: vlseg3e32.v v8, (a0) ; SPILL-O0-NEXT: vmv2r.v v8, v10 ; SPILL-O0-NEXT: addi a0, sp, 16 @@ -261,7 +261,7 @@ ; SPILL-O2-NEXT: li a3, 6 ; SPILL-O2-NEXT: mul a2, a2, a3 ; SPILL-O2-NEXT: sub sp, sp, a2 -; SPILL-O2-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; SPILL-O2-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; SPILL-O2-NEXT: vlseg3e32.v v8, (a0) ; SPILL-O2-NEXT: addi a0, sp, 16 ; SPILL-O2-NEXT: csrr a1, vlenb diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll --- a/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll @@ -8,7 +8,7 @@ ; CHECK-NEXT: ld a1, 8(sp) ; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vl8re32.v v0, (a1) -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v24 ; CHECK-NEXT: vadd.vv v16, v16, v0 ; CHECK-NEXT: vadd.vv v8, v8, v16 diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-out-arguments.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-out-arguments.ll --- a/llvm/test/CodeGen/RISCV/rvv/rvv-out-arguments.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rvv-out-arguments.ll @@ -78,7 +78,7 @@ ; CHECK-NEXT: sd a0, -64(s0) ; CHECK-NEXT: ld a0, -64(s0) ; CHECK-NEXT: addi a1, s0, -56 -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: csrr s1, vlenb ; CHECK-NEXT: slli s1, s1, 3 diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll --- a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll @@ -26,7 +26,7 @@ define @vpmerge_vpadd2( %passthru, %x, %y, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpadd2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmseq.vv v0, v9, v10 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t @@ -43,7 +43,7 @@ define @vpmerge_vpadd3( %passthru, %x, %y, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpadd3: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vadd.vv v8, v9, v10 ; CHECK-NEXT: ret %splat = insertelement poison, i1 -1, i32 0 @@ -73,7 +73,7 @@ define @vpmerge_vrgatherei16( %passthru, %x, %y, %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vrgatherei16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vrgatherei16.vv v8, v9, v10 ; CHECK-NEXT: ret %1 = zext i32 %vl to i64 @@ -191,7 +191,7 @@ define @vpmerge_vpload2( %passthru, * %p, %x, %y, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpload2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmseq.vv v0, v9, v10 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vle32.v v8, (a0), v0.t @@ -225,9 +225,9 @@ define @vpmerge_vleff( %passthru, * %p, %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vleff: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vle32ff.v v9, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret %1 = zext i32 %vl to i64 @@ -472,7 +472,7 @@ define @vpselect_vpadd3( %passthru, %x, %y, i32 zeroext %vl) { ; CHECK-LABEL: vpselect_vpadd3: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vadd.vv v8, v9, v10 ; CHECK-NEXT: ret %splat = insertelement poison, i1 -1, i32 0 @@ -500,7 +500,7 @@ define @vpselect_vrgatherei16( %passthru, %x, %y, %m, i32 zeroext %vl) { ; CHECK-LABEL: vpselect_vrgatherei16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vrgatherei16.vv v8, v9, v10 ; CHECK-NEXT: ret %1 = zext i32 %vl to i64 @@ -643,9 +643,9 @@ define @vpselect_vleff( %passthru, * %p, %m, i32 zeroext %vl) { ; CHECK-LABEL: vpselect_vleff: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vle32ff.v v9, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret %1 = zext i32 %vl to i64 @@ -849,21 +849,21 @@ define void @test_dag_loop() { ; CHECK-LABEL: test_dag_loop: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 1, e16, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v8, (zero) -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vmclr.m v0 ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vsetivli zero, 0, e8, m4, tu, mu ; CHECK-NEXT: vmv4r.v v20, v16 ; CHECK-NEXT: vssubu.vx v20, v16, zero, v0.t -; CHECK-NEXT: vsetvli zero, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, m4, ta, ma ; CHECK-NEXT: vmseq.vv v0, v20, v16 -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vmv.v.i v16, 0 -; CHECK-NEXT: vsetivli zero, 1, e16, m8, tu, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m8, tu, ma ; CHECK-NEXT: vmerge.vvm v16, v16, v8, v0 -; CHECK-NEXT: vsetivli zero, 0, e16, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m8, ta, ma ; CHECK-NEXT: vse16.v v16, (zero) ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-vmerge-to-vadd.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-vmerge-to-vadd.ll --- a/llvm/test/CodeGen/RISCV/rvv/rvv-vmerge-to-vadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rvv-vmerge-to-vadd.ll @@ -3,7 +3,7 @@ define @vpmerge_mf8( %x, %y, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_mf8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vadd.vi v8, v9, 0 ; CHECK-NEXT: ret %splat = insertelement poison, i1 -1, i8 0 @@ -15,7 +15,7 @@ define @vpmerge_mf4( %x, %y, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_mf4: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vadd.vi v8, v9, 0 ; CHECK-NEXT: ret %splat = insertelement poison, i1 -1, i8 0 @@ -27,7 +27,7 @@ define @vpmerge_mf2( %x, %y, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_mf2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vadd.vi v8, v9, 0 ; CHECK-NEXT: ret %splat = insertelement poison, i1 -1, i8 0 @@ -39,7 +39,7 @@ define @vpmerge_m1( %x, %y, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_m1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vadd.vi v8, v9, 0 ; CHECK-NEXT: ret %splat = insertelement poison, i1 -1, i8 0 @@ -51,7 +51,7 @@ define @vpmerge_m2( %x, %y, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_m2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vadd.vi v8, v10, 0 ; CHECK-NEXT: ret %splat = insertelement poison, i1 -1, i16 0 @@ -63,7 +63,7 @@ define @vpmerge_m4( %x, %y, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_m4: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vadd.vi v8, v12, 0 ; CHECK-NEXT: ret %splat = insertelement poison, i1 -1, i32 0 @@ -75,7 +75,7 @@ define @vpmerge_m8( %x, %y, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_m8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vadd.vi v8, v16, 0 ; CHECK-NEXT: ret %splat = insertelement poison, i1 -1, i64 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/saddo-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/saddo-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/saddo-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/saddo-sdnode.ll @@ -6,7 +6,7 @@ define @saddo_nvx2i32( %x, %y) { ; CHECK-LABEL: saddo_nvx2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vsadd.vv v10, v8, v9 ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: vmsne.vv v0, v8, v10 diff --git a/llvm/test/CodeGen/RISCV/rvv/select-fp.ll b/llvm/test/CodeGen/RISCV/rvv/select-fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/select-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/select-fp.ll @@ -7,10 +7,10 @@ define @select_nxv1f16(i1 zeroext %c, %a, %b) { ; CHECK-LABEL: select_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b @@ -21,10 +21,10 @@ ; CHECK-LABEL: selectcc_nxv1f16: ; CHECK: # %bb.0: ; CHECK-NEXT: feq.h a0, fa0, fa1 -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %cmp = fcmp oeq half %a, %b @@ -35,10 +35,10 @@ define @select_nxv2f16(i1 zeroext %c, %a, %b) { ; CHECK-LABEL: select_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b @@ -49,10 +49,10 @@ ; CHECK-LABEL: selectcc_nxv2f16: ; CHECK: # %bb.0: ; CHECK-NEXT: feq.h a0, fa0, fa1 -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %cmp = fcmp oeq half %a, %b @@ -63,10 +63,10 @@ define @select_nxv4f16(i1 zeroext %c, %a, %b) { ; CHECK-LABEL: select_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b @@ -77,10 +77,10 @@ ; CHECK-LABEL: selectcc_nxv4f16: ; CHECK: # %bb.0: ; CHECK-NEXT: feq.h a0, fa0, fa1 -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %cmp = fcmp oeq half %a, %b @@ -91,10 +91,10 @@ define @select_nxv8f16(i1 zeroext %c, %a, %b) { ; CHECK-LABEL: select_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsne.vi v0, v12, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b @@ -105,10 +105,10 @@ ; CHECK-LABEL: selectcc_nxv8f16: ; CHECK: # %bb.0: ; CHECK-NEXT: feq.h a0, fa0, fa1 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsne.vi v0, v12, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %cmp = fcmp oeq half %a, %b @@ -119,10 +119,10 @@ define @select_nxv16f16(i1 zeroext %c, %a, %b) { ; CHECK-LABEL: select_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vmsne.vi v0, v16, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b @@ -133,10 +133,10 @@ ; CHECK-LABEL: selectcc_nxv16f16: ; CHECK: # %bb.0: ; CHECK-NEXT: feq.h a0, fa0, fa1 -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vmsne.vi v0, v16, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 ; CHECK-NEXT: ret %cmp = fcmp oeq half %a, %b @@ -147,10 +147,10 @@ define @select_nxv32f16(i1 zeroext %c, %a, %b) { ; CHECK-LABEL: select_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vmv.v.x v24, a0 ; CHECK-NEXT: vmsne.vi v0, v24, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b @@ -161,10 +161,10 @@ ; CHECK-LABEL: selectcc_nxv32f16: ; CHECK: # %bb.0: ; CHECK-NEXT: feq.h a0, fa0, fa1 -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vmv.v.x v24, a0 ; CHECK-NEXT: vmsne.vi v0, v24, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 ; CHECK-NEXT: ret %cmp = fcmp oeq half %a, %b @@ -175,10 +175,10 @@ define @select_nxv1f32(i1 zeroext %c, %a, %b) { ; CHECK-LABEL: select_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b @@ -189,10 +189,10 @@ ; CHECK-LABEL: selectcc_nxv1f32: ; CHECK: # %bb.0: ; CHECK-NEXT: feq.s a0, fa0, fa1 -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %cmp = fcmp oeq float %a, %b @@ -203,10 +203,10 @@ define @select_nxv2f32(i1 zeroext %c, %a, %b) { ; CHECK-LABEL: select_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b @@ -217,10 +217,10 @@ ; CHECK-LABEL: selectcc_nxv2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: feq.s a0, fa0, fa1 -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %cmp = fcmp oeq float %a, %b @@ -231,10 +231,10 @@ define @select_nxv4f32(i1 zeroext %c, %a, %b) { ; CHECK-LABEL: select_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsne.vi v0, v12, 0 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b @@ -245,10 +245,10 @@ ; CHECK-LABEL: selectcc_nxv4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: feq.s a0, fa0, fa1 -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsne.vi v0, v12, 0 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %cmp = fcmp oeq float %a, %b @@ -259,10 +259,10 @@ define @select_nxv8f32(i1 zeroext %c, %a, %b) { ; CHECK-LABEL: select_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vmsne.vi v0, v16, 0 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b @@ -273,10 +273,10 @@ ; CHECK-LABEL: selectcc_nxv8f32: ; CHECK: # %bb.0: ; CHECK-NEXT: feq.s a0, fa0, fa1 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vmsne.vi v0, v16, 0 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 ; CHECK-NEXT: ret %cmp = fcmp oeq float %a, %b @@ -287,10 +287,10 @@ define @select_nxv16f32(i1 zeroext %c, %a, %b) { ; CHECK-LABEL: select_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vmv.v.x v24, a0 ; CHECK-NEXT: vmsne.vi v0, v24, 0 -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b @@ -301,10 +301,10 @@ ; CHECK-LABEL: selectcc_nxv16f32: ; CHECK: # %bb.0: ; CHECK-NEXT: feq.s a0, fa0, fa1 -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vmv.v.x v24, a0 ; CHECK-NEXT: vmsne.vi v0, v24, 0 -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 ; CHECK-NEXT: ret %cmp = fcmp oeq float %a, %b @@ -315,10 +315,10 @@ define @select_nxv1f64(i1 zeroext %c, %a, %b) { ; CHECK-LABEL: select_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b @@ -329,10 +329,10 @@ ; CHECK-LABEL: selectcc_nxv1f64: ; CHECK: # %bb.0: ; CHECK-NEXT: feq.d a0, fa0, fa1 -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %cmp = fcmp oeq double %a, %b @@ -343,10 +343,10 @@ define @select_nxv2f64(i1 zeroext %c, %a, %b) { ; CHECK-LABEL: select_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsne.vi v0, v12, 0 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b @@ -357,10 +357,10 @@ ; CHECK-LABEL: selectcc_nxv2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: feq.d a0, fa0, fa1 -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsne.vi v0, v12, 0 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %cmp = fcmp oeq double %a, %b @@ -371,10 +371,10 @@ define @select_nxv4f64(i1 zeroext %c, %a, %b) { ; CHECK-LABEL: select_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vmsne.vi v0, v16, 0 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b @@ -385,10 +385,10 @@ ; CHECK-LABEL: selectcc_nxv4f64: ; CHECK: # %bb.0: ; CHECK-NEXT: feq.d a0, fa0, fa1 -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vmsne.vi v0, v16, 0 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 ; CHECK-NEXT: ret %cmp = fcmp oeq double %a, %b @@ -399,10 +399,10 @@ define @select_nxv8f64(i1 zeroext %c, %a, %b) { ; CHECK-LABEL: select_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.x v24, a0 ; CHECK-NEXT: vmsne.vi v0, v24, 0 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b @@ -413,10 +413,10 @@ ; CHECK-LABEL: selectcc_nxv8f64: ; CHECK: # %bb.0: ; CHECK-NEXT: feq.d a0, fa0, fa1 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.x v24, a0 ; CHECK-NEXT: vmsne.vi v0, v24, 0 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 ; CHECK-NEXT: ret %cmp = fcmp oeq double %a, %b diff --git a/llvm/test/CodeGen/RISCV/rvv/select-int.ll b/llvm/test/CodeGen/RISCV/rvv/select-int.ll --- a/llvm/test/CodeGen/RISCV/rvv/select-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/select-int.ll @@ -7,7 +7,7 @@ define @select_nxv1i1(i1 zeroext %c, %a, %b) { ; CHECK-LABEL: select_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsne.vi v9, v9, 0 ; CHECK-NEXT: vmandn.mm v8, v8, v9 @@ -23,7 +23,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: xor a0, a0, a1 ; CHECK-NEXT: andi a0, a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsne.vi v9, v9, 0 ; CHECK-NEXT: vmandn.mm v8, v8, v9 @@ -38,7 +38,7 @@ define @select_nxv2i1(i1 zeroext %c, %a, %b) { ; CHECK-LABEL: select_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsne.vi v9, v9, 0 ; CHECK-NEXT: vmandn.mm v8, v8, v9 @@ -54,7 +54,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: xor a0, a0, a1 ; CHECK-NEXT: andi a0, a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsne.vi v9, v9, 0 ; CHECK-NEXT: vmandn.mm v8, v8, v9 @@ -69,7 +69,7 @@ define @select_nxv4i1(i1 zeroext %c, %a, %b) { ; CHECK-LABEL: select_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsne.vi v9, v9, 0 ; CHECK-NEXT: vmandn.mm v8, v8, v9 @@ -85,7 +85,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: xor a0, a0, a1 ; CHECK-NEXT: andi a0, a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsne.vi v9, v9, 0 ; CHECK-NEXT: vmandn.mm v8, v8, v9 @@ -100,7 +100,7 @@ define @select_nxv8i1(i1 zeroext %c, %a, %b) { ; CHECK-LABEL: select_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsne.vi v9, v9, 0 ; CHECK-NEXT: vmandn.mm v8, v8, v9 @@ -116,7 +116,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: xor a0, a0, a1 ; CHECK-NEXT: andi a0, a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsne.vi v9, v9, 0 ; CHECK-NEXT: vmandn.mm v8, v8, v9 @@ -131,7 +131,7 @@ define @select_nxv16i1(i1 zeroext %c, %a, %b) { ; CHECK-LABEL: select_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v9, v10, 0 ; CHECK-NEXT: vmandn.mm v8, v8, v9 @@ -147,7 +147,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: xor a0, a0, a1 ; CHECK-NEXT: andi a0, a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v9, v10, 0 ; CHECK-NEXT: vmandn.mm v8, v8, v9 @@ -162,7 +162,7 @@ define @select_nxv32i1(i1 zeroext %c, %a, %b) { ; CHECK-LABEL: select_nxv32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsne.vi v9, v12, 0 ; CHECK-NEXT: vmandn.mm v8, v8, v9 @@ -178,7 +178,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: xor a0, a0, a1 ; CHECK-NEXT: andi a0, a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsne.vi v9, v12, 0 ; CHECK-NEXT: vmandn.mm v8, v8, v9 @@ -193,7 +193,7 @@ define @select_nxv64i1(i1 zeroext %c, %a, %b) { ; CHECK-LABEL: select_nxv64i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vmsne.vi v9, v16, 0 ; CHECK-NEXT: vmandn.mm v8, v8, v9 @@ -209,7 +209,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: xor a0, a0, a1 ; CHECK-NEXT: andi a0, a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vmsne.vi v9, v16, 0 ; CHECK-NEXT: vmandn.mm v8, v8, v9 @@ -224,7 +224,7 @@ define @select_nxv1i8(i1 zeroext %c, %a, %b) { ; CHECK-LABEL: select_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 @@ -238,7 +238,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: xor a0, a0, a1 ; CHECK-NEXT: snez a0, a0 -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 @@ -251,7 +251,7 @@ define @select_nxv2i8(i1 zeroext %c, %a, %b) { ; CHECK-LABEL: select_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 @@ -265,7 +265,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: xor a0, a0, a1 ; CHECK-NEXT: snez a0, a0 -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 @@ -278,7 +278,7 @@ define @select_nxv4i8(i1 zeroext %c, %a, %b) { ; CHECK-LABEL: select_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 @@ -292,7 +292,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: xor a0, a0, a1 ; CHECK-NEXT: snez a0, a0 -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 @@ -305,7 +305,7 @@ define @select_nxv8i8(i1 zeroext %c, %a, %b) { ; CHECK-LABEL: select_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 @@ -319,7 +319,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: xor a0, a0, a1 ; CHECK-NEXT: snez a0, a0 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 @@ -332,7 +332,7 @@ define @select_nxv16i8(i1 zeroext %c, %a, %b) { ; CHECK-LABEL: select_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsne.vi v0, v12, 0 ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 @@ -346,7 +346,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: xor a0, a0, a1 ; CHECK-NEXT: snez a0, a0 -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsne.vi v0, v12, 0 ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 @@ -359,7 +359,7 @@ define @select_nxv32i8(i1 zeroext %c, %a, %b) { ; CHECK-LABEL: select_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vmsne.vi v0, v16, 0 ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 @@ -373,7 +373,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: xor a0, a0, a1 ; CHECK-NEXT: snez a0, a0 -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vmsne.vi v0, v16, 0 ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 @@ -386,7 +386,7 @@ define @select_nxv64i8(i1 zeroext %c, %a, %b) { ; CHECK-LABEL: select_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vmv.v.x v24, a0 ; CHECK-NEXT: vmsne.vi v0, v24, 0 ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 @@ -400,7 +400,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: xor a0, a0, a1 ; CHECK-NEXT: snez a0, a0 -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vmv.v.x v24, a0 ; CHECK-NEXT: vmsne.vi v0, v24, 0 ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 @@ -413,10 +413,10 @@ define @select_nxv1i16(i1 zeroext %c, %a, %b) { ; CHECK-LABEL: select_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b @@ -428,10 +428,10 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: xor a0, a0, a1 ; CHECK-NEXT: snez a0, a0 -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %cmp = icmp ne i16 %a, %b @@ -442,10 +442,10 @@ define @select_nxv2i16(i1 zeroext %c, %a, %b) { ; CHECK-LABEL: select_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b @@ -457,10 +457,10 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: xor a0, a0, a1 ; CHECK-NEXT: snez a0, a0 -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %cmp = icmp ne i16 %a, %b @@ -471,10 +471,10 @@ define @select_nxv4i16(i1 zeroext %c, %a, %b) { ; CHECK-LABEL: select_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b @@ -486,10 +486,10 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: xor a0, a0, a1 ; CHECK-NEXT: snez a0, a0 -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %cmp = icmp ne i16 %a, %b @@ -500,10 +500,10 @@ define @select_nxv8i16(i1 zeroext %c, %a, %b) { ; CHECK-LABEL: select_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsne.vi v0, v12, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b @@ -515,10 +515,10 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: xor a0, a0, a1 ; CHECK-NEXT: snez a0, a0 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsne.vi v0, v12, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %cmp = icmp ne i16 %a, %b @@ -529,10 +529,10 @@ define @select_nxv16i16(i1 zeroext %c, %a, %b) { ; CHECK-LABEL: select_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vmsne.vi v0, v16, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b @@ -544,10 +544,10 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: xor a0, a0, a1 ; CHECK-NEXT: snez a0, a0 -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vmsne.vi v0, v16, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 ; CHECK-NEXT: ret %cmp = icmp ne i16 %a, %b @@ -558,10 +558,10 @@ define @select_nxv32i16(i1 zeroext %c, %a, %b) { ; CHECK-LABEL: select_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vmv.v.x v24, a0 ; CHECK-NEXT: vmsne.vi v0, v24, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b @@ -573,10 +573,10 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: xor a0, a0, a1 ; CHECK-NEXT: snez a0, a0 -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vmv.v.x v24, a0 ; CHECK-NEXT: vmsne.vi v0, v24, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 ; CHECK-NEXT: ret %cmp = icmp ne i16 %a, %b @@ -587,10 +587,10 @@ define @select_nxv1i32(i1 zeroext %c, %a, %b) { ; CHECK-LABEL: select_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b @@ -602,10 +602,10 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: xor a0, a0, a1 ; CHECK-NEXT: snez a0, a0 -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %cmp = icmp ne i32 %a, %b @@ -616,10 +616,10 @@ define @select_nxv2i32(i1 zeroext %c, %a, %b) { ; CHECK-LABEL: select_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b @@ -631,10 +631,10 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: xor a0, a0, a1 ; CHECK-NEXT: snez a0, a0 -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %cmp = icmp ne i32 %a, %b @@ -645,10 +645,10 @@ define @select_nxv4i32(i1 zeroext %c, %a, %b) { ; CHECK-LABEL: select_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsne.vi v0, v12, 0 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b @@ -660,10 +660,10 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: xor a0, a0, a1 ; CHECK-NEXT: snez a0, a0 -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsne.vi v0, v12, 0 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %cmp = icmp ne i32 %a, %b @@ -674,10 +674,10 @@ define @select_nxv8i32(i1 zeroext %c, %a, %b) { ; CHECK-LABEL: select_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vmsne.vi v0, v16, 0 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b @@ -689,10 +689,10 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: xor a0, a0, a1 ; CHECK-NEXT: snez a0, a0 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vmsne.vi v0, v16, 0 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 ; CHECK-NEXT: ret %cmp = icmp ne i32 %a, %b @@ -703,10 +703,10 @@ define @select_nxv16i32(i1 zeroext %c, %a, %b) { ; CHECK-LABEL: select_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vmv.v.x v24, a0 ; CHECK-NEXT: vmsne.vi v0, v24, 0 -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b @@ -718,10 +718,10 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: xor a0, a0, a1 ; CHECK-NEXT: snez a0, a0 -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vmv.v.x v24, a0 ; CHECK-NEXT: vmsne.vi v0, v24, 0 -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 ; CHECK-NEXT: ret %cmp = icmp ne i32 %a, %b @@ -732,10 +732,10 @@ define @select_nxv1i64(i1 zeroext %c, %a, %b) { ; CHECK-LABEL: select_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsne.vi v0, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b @@ -749,10 +749,10 @@ ; RV32-NEXT: xor a0, a0, a2 ; RV32-NEXT: or a0, a0, a1 ; RV32-NEXT: snez a0, a0 -; RV32-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; RV32-NEXT: vmv.v.x v10, a0 ; RV32-NEXT: vmsne.vi v0, v10, 0 -; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; RV32-NEXT: vmerge.vvm v8, v9, v8, v0 ; RV32-NEXT: ret ; @@ -760,10 +760,10 @@ ; RV64: # %bb.0: ; RV64-NEXT: xor a0, a0, a1 ; RV64-NEXT: snez a0, a0 -; RV64-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; RV64-NEXT: vmv.v.x v10, a0 ; RV64-NEXT: vmsne.vi v0, v10, 0 -; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; RV64-NEXT: vmerge.vvm v8, v9, v8, v0 ; RV64-NEXT: ret %cmp = icmp ne i64 %a, %b @@ -774,10 +774,10 @@ define @select_nxv2i64(i1 zeroext %c, %a, %b) { ; CHECK-LABEL: select_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsne.vi v0, v12, 0 -; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b @@ -791,10 +791,10 @@ ; RV32-NEXT: xor a0, a0, a2 ; RV32-NEXT: or a0, a0, a1 ; RV32-NEXT: snez a0, a0 -; RV32-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; RV32-NEXT: vmv.v.x v12, a0 ; RV32-NEXT: vmsne.vi v0, v12, 0 -; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; RV32-NEXT: vmerge.vvm v8, v10, v8, v0 ; RV32-NEXT: ret ; @@ -802,10 +802,10 @@ ; RV64: # %bb.0: ; RV64-NEXT: xor a0, a0, a1 ; RV64-NEXT: snez a0, a0 -; RV64-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; RV64-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; RV64-NEXT: vmv.v.x v12, a0 ; RV64-NEXT: vmsne.vi v0, v12, 0 -; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; RV64-NEXT: vmerge.vvm v8, v10, v8, v0 ; RV64-NEXT: ret %cmp = icmp ne i64 %a, %b @@ -816,10 +816,10 @@ define @select_nxv4i64(i1 zeroext %c, %a, %b) { ; CHECK-LABEL: select_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vmsne.vi v0, v16, 0 -; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b @@ -833,10 +833,10 @@ ; RV32-NEXT: xor a0, a0, a2 ; RV32-NEXT: or a0, a0, a1 ; RV32-NEXT: snez a0, a0 -; RV32-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; RV32-NEXT: vmv.v.x v16, a0 ; RV32-NEXT: vmsne.vi v0, v16, 0 -; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; RV32-NEXT: vmerge.vvm v8, v12, v8, v0 ; RV32-NEXT: ret ; @@ -844,10 +844,10 @@ ; RV64: # %bb.0: ; RV64-NEXT: xor a0, a0, a1 ; RV64-NEXT: snez a0, a0 -; RV64-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; RV64-NEXT: vmv.v.x v16, a0 ; RV64-NEXT: vmsne.vi v0, v16, 0 -; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; RV64-NEXT: vmerge.vvm v8, v12, v8, v0 ; RV64-NEXT: ret %cmp = icmp ne i64 %a, %b @@ -858,10 +858,10 @@ define @select_nxv8i64(i1 zeroext %c, %a, %b) { ; CHECK-LABEL: select_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.x v24, a0 ; CHECK-NEXT: vmsne.vi v0, v24, 0 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 ; CHECK-NEXT: ret %v = select i1 %c, %a, %b @@ -875,10 +875,10 @@ ; RV32-NEXT: xor a0, a0, a2 ; RV32-NEXT: or a0, a0, a1 ; RV32-NEXT: snez a0, a0 -; RV32-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV32-NEXT: vmv.v.x v24, a0 ; RV32-NEXT: vmsne.vi v0, v24, 0 -; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; RV32-NEXT: vmerge.vvm v8, v16, v8, v0 ; RV32-NEXT: ret ; @@ -886,10 +886,10 @@ ; RV64: # %bb.0: ; RV64-NEXT: xor a0, a0, a1 ; RV64-NEXT: snez a0, a0 -; RV64-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; RV64-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; RV64-NEXT: vmv.v.x v24, a0 ; RV64-NEXT: vmsne.vi v0, v24, 0 -; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; RV64-NEXT: vmerge.vvm v8, v16, v8, v0 ; RV64-NEXT: ret %cmp = icmp ne i64 %a, %b diff --git a/llvm/test/CodeGen/RISCV/rvv/select-sra.ll b/llvm/test/CodeGen/RISCV/rvv/select-sra.ll --- a/llvm/test/CodeGen/RISCV/rvv/select-sra.ll +++ b/llvm/test/CodeGen/RISCV/rvv/select-sra.ll @@ -10,7 +10,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 284280 ; RV32-NEXT: addi a0, a0, 291 -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vmv.v.x v8, a0 ; RV32-NEXT: lui a0, 214376 ; RV32-NEXT: addi a0, a0, -2030 @@ -21,7 +21,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 284280 ; RV64-NEXT: addiw a0, a0, 291 -; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64-NEXT: vmv.v.x v8, a0 ; RV64-NEXT: lui a0, 214376 ; RV64-NEXT: addiw a0, a0, -2030 diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll @@ -231,7 +231,7 @@ define @fcmp_ord_vf_nxv1f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: fcmp_ord_vf_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmfeq.vf v9, v9, fa0, v0.t @@ -247,7 +247,7 @@ define @fcmp_ord_vf_swap_nxv1f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: fcmp_ord_vf_swap_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmfeq.vf v9, v9, fa0, v0.t @@ -497,7 +497,7 @@ define @fcmp_uno_vf_nxv1f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: fcmp_uno_vf_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmfne.vf v9, v9, fa0, v0.t @@ -513,7 +513,7 @@ define @fcmp_uno_vf_swap_nxv1f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: fcmp_uno_vf_swap_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmfne.vf v9, v9, fa0, v0.t @@ -780,7 +780,7 @@ define @fcmp_ord_vf_nxv8f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: fcmp_ord_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmfeq.vf v12, v10, fa0, v0.t @@ -796,7 +796,7 @@ define @fcmp_ord_vf_swap_nxv8f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: fcmp_ord_vf_swap_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmfeq.vf v12, v10, fa0, v0.t @@ -1049,7 +1049,7 @@ define @fcmp_uno_vf_nxv8f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: fcmp_uno_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmfne.vf v12, v10, fa0, v0.t @@ -1065,7 +1065,7 @@ define @fcmp_uno_vf_swap_nxv8f16( %va, half %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: fcmp_uno_vf_swap_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmfne.vf v12, v10, fa0, v0.t @@ -1094,7 +1094,7 @@ ; CHECK-NEXT: li a4, 0 ; CHECK-NEXT: csrr a3, vlenb ; CHECK-NEXT: srli a1, a3, 1 -; CHECK-NEXT: vsetvli a5, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a5, zero, e8, m1, ta, ma ; CHECK-NEXT: slli a5, a3, 3 ; CHECK-NEXT: add a5, a0, a5 ; CHECK-NEXT: vl8re16.v v24, (a5) @@ -1118,7 +1118,7 @@ ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vmfeq.vv v16, v24, v8, v0.t ; CHECK-NEXT: add a0, a1, a1 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vslideup.vx v16, v2, a1 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: csrr a0, vlenb @@ -1357,7 +1357,7 @@ define @fcmp_ord_vf_nxv1f64( %va, double %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: fcmp_ord_vf_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmfeq.vf v9, v9, fa0, v0.t @@ -1373,7 +1373,7 @@ define @fcmp_ord_vf_swap_nxv1f64( %va, double %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: fcmp_ord_vf_swap_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmfeq.vf v9, v9, fa0, v0.t @@ -1623,7 +1623,7 @@ define @fcmp_uno_vf_nxv1f64( %va, double %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: fcmp_uno_vf_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmfne.vf v9, v9, fa0, v0.t @@ -1639,7 +1639,7 @@ define @fcmp_uno_vf_swap_nxv1f64( %va, double %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: fcmp_uno_vf_swap_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmfne.vf v9, v9, fa0, v0.t @@ -1907,7 +1907,7 @@ define @fcmp_ord_vf_nxv8f64( %va, double %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: fcmp_ord_vf_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmfeq.vf v24, v16, fa0, v0.t @@ -1923,7 +1923,7 @@ define @fcmp_ord_vf_swap_nxv8f64( %va, double %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: fcmp_ord_vf_swap_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmfeq.vf v24, v16, fa0, v0.t @@ -2176,7 +2176,7 @@ define @fcmp_uno_vf_nxv8f64( %va, double %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: fcmp_uno_vf_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmfne.vf v24, v16, fa0, v0.t @@ -2192,7 +2192,7 @@ define @fcmp_uno_vf_swap_nxv8f64( %va, double %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: fcmp_uno_vf_swap_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmfne.vf v24, v16, fa0, v0.t @@ -2238,7 +2238,7 @@ ; CHECK-NEXT: mv t0, a7 ; CHECK-NEXT: .LBB171_2: ; CHECK-NEXT: li t1, 0 -; CHECK-NEXT: vsetvli t2, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli t2, zero, e8, mf4, ta, ma ; CHECK-NEXT: vl8re64.v v16, (a4) ; CHECK-NEXT: srli a4, a3, 2 ; CHECK-NEXT: sub t2, t0, a3 @@ -2248,7 +2248,7 @@ ; CHECK-NEXT: mv t1, t2 ; CHECK-NEXT: .LBB171_4: ; CHECK-NEXT: li t2, 24 -; CHECK-NEXT: vsetvli t3, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli t3, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v1, v24, a4 ; CHECK-NEXT: vl8re64.v v8, (a2) ; CHECK-NEXT: csrr t3, vlenb @@ -2306,7 +2306,7 @@ ; CHECK-NEXT: addi a6, a6, 16 ; CHECK-NEXT: vs8r.v v8, (a6) # Unknown-size Folded Spill ; CHECK-NEXT: add a0, a0, a5 -; CHECK-NEXT: vsetvli zero, a7, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a7, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vx v17, v2, a1 ; CHECK-NEXT: mv a5, t1 ; CHECK-NEXT: bltu t1, a3, .LBB171_10 @@ -2314,7 +2314,7 @@ ; CHECK-NEXT: mv a5, a3 ; CHECK-NEXT: .LBB171_10: ; CHECK-NEXT: li a6, 0 -; CHECK-NEXT: vsetvli a7, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a7, zero, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vx v16, v1, a1 ; CHECK-NEXT: vl8re64.v v8, (a2) ; CHECK-NEXT: csrr a2, vlenb @@ -2340,7 +2340,7 @@ ; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vmfeq.vv v18, v8, v24, v0.t ; CHECK-NEXT: add a0, a4, a1 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: sub a0, t1, a3 ; CHECK-NEXT: vslideup.vx v17, v18, a4 ; CHECK-NEXT: bltu t1, a0, .LBB171_12 @@ -2360,7 +2360,7 @@ ; CHECK-NEXT: slli a0, a1, 1 ; CHECK-NEXT: add a0, a0, a1 ; CHECK-NEXT: add a1, a0, a1 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vx v17, v16, a0 ; CHECK-NEXT: vmv1r.v v0, v17 ; CHECK-NEXT: csrr a0, vlenb diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll @@ -10,7 +10,7 @@ define @fcmp_oeq_vv_nxv8f16( %va, %vb) { ; CHECK-LABEL: fcmp_oeq_vv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmfeq.vv v0, v8, v10 ; CHECK-NEXT: ret %vc = fcmp oeq %va, %vb @@ -20,7 +20,7 @@ define @fcmp_oeq_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_oeq_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -32,7 +32,7 @@ define @fcmp_oeq_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_oeq_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -44,7 +44,7 @@ define @fcmp_oeq_vv_nxv8f16_nonans( %va, %vb) #0 { ; CHECK-LABEL: fcmp_oeq_vv_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmfeq.vv v0, v8, v10 ; CHECK-NEXT: ret %vc = fcmp oeq %va, %vb @@ -54,7 +54,7 @@ define @fcmp_oeq_vf_nxv8f16_nonans( %va, half %b) #0 { ; CHECK-LABEL: fcmp_oeq_vf_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -66,7 +66,7 @@ define @fcmp_ogt_vv_nxv8f16( %va, %vb) { ; CHECK-LABEL: fcmp_ogt_vv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmflt.vv v0, v10, v8 ; CHECK-NEXT: ret %vc = fcmp ogt %va, %vb @@ -76,7 +76,7 @@ define @fcmp_ogt_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_ogt_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -88,7 +88,7 @@ define @fcmp_ogt_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_ogt_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmflt.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -100,7 +100,7 @@ define @fcmp_ogt_vv_nxv8f16_nonans( %va, %vb) #0 { ; CHECK-LABEL: fcmp_ogt_vv_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmflt.vv v0, v10, v8 ; CHECK-NEXT: ret %vc = fcmp ogt %va, %vb @@ -110,7 +110,7 @@ define @fcmp_ogt_vf_nxv8f16_nonans( %va, half %b) #0 { ; CHECK-LABEL: fcmp_ogt_vf_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -122,7 +122,7 @@ define @fcmp_oge_vv_nxv8f16( %va, %vb) { ; CHECK-LABEL: fcmp_oge_vv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmfle.vv v0, v10, v8 ; CHECK-NEXT: ret %vc = fcmp oge %va, %vb @@ -132,7 +132,7 @@ define @fcmp_oge_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_oge_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmfge.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -144,7 +144,7 @@ define @fcmp_oge_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_oge_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmfle.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -156,7 +156,7 @@ define @fcmp_oge_vv_nxv8f16_nonans( %va, %vb) #0 { ; CHECK-LABEL: fcmp_oge_vv_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmfle.vv v0, v10, v8 ; CHECK-NEXT: ret %vc = fcmp oge %va, %vb @@ -166,7 +166,7 @@ define @fcmp_oge_vf_nxv8f16_nonans( %va, half %b) #0 { ; CHECK-LABEL: fcmp_oge_vf_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmfge.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -178,7 +178,7 @@ define @fcmp_olt_vv_nxv8f16( %va, %vb) { ; CHECK-LABEL: fcmp_olt_vv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmflt.vv v0, v8, v10 ; CHECK-NEXT: ret %vc = fcmp olt %va, %vb @@ -188,7 +188,7 @@ define @fcmp_olt_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_olt_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmflt.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -200,7 +200,7 @@ define @fcmp_olt_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_olt_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -212,7 +212,7 @@ define @fcmp_olt_vv_nxv8f16_nonans( %va, %vb) #0 { ; CHECK-LABEL: fcmp_olt_vv_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmflt.vv v0, v8, v10 ; CHECK-NEXT: ret %vc = fcmp olt %va, %vb @@ -222,7 +222,7 @@ define @fcmp_olt_vf_nxv8f16_nonans( %va, half %b) #0 { ; CHECK-LABEL: fcmp_olt_vf_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmflt.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -234,7 +234,7 @@ define @fcmp_ole_vv_nxv8f16( %va, %vb) { ; CHECK-LABEL: fcmp_ole_vv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmfle.vv v0, v8, v10 ; CHECK-NEXT: ret %vc = fcmp ole %va, %vb @@ -244,7 +244,7 @@ define @fcmp_ole_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_ole_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmfle.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -256,7 +256,7 @@ define @fcmp_ole_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_ole_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmfge.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -268,7 +268,7 @@ define @fcmp_ole_vv_nxv8f16_nonans( %va, %vb) #0 { ; CHECK-LABEL: fcmp_ole_vv_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmfle.vv v0, v8, v10 ; CHECK-NEXT: ret %vc = fcmp ole %va, %vb @@ -278,7 +278,7 @@ define @fcmp_ole_vf_nxv8f16_nonans( %va, half %b) #0 { ; CHECK-LABEL: fcmp_ole_vf_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmfle.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -290,7 +290,7 @@ define @fcmp_one_vv_nxv8f16( %va, %vb) { ; CHECK-LABEL: fcmp_one_vv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmflt.vv v12, v8, v10 ; CHECK-NEXT: vmflt.vv v13, v10, v8 ; CHECK-NEXT: vmor.mm v0, v13, v12 @@ -302,7 +302,7 @@ define @fcmp_one_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_one_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmflt.vf v10, v8, fa0 ; CHECK-NEXT: vmfgt.vf v11, v8, fa0 ; CHECK-NEXT: vmor.mm v0, v11, v10 @@ -316,7 +316,7 @@ define @fcmp_one_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_one_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmfgt.vf v10, v8, fa0 ; CHECK-NEXT: vmflt.vf v11, v8, fa0 ; CHECK-NEXT: vmor.mm v0, v11, v10 @@ -330,7 +330,7 @@ define @fcmp_one_vv_nxv8f16_nonans( %va, %vb) #0 { ; CHECK-LABEL: fcmp_one_vv_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v10 ; CHECK-NEXT: ret %vc = fcmp one %va, %vb @@ -340,7 +340,7 @@ define @fcmp_one_vf_nxv8f16_nonans( %va, half %b) #0 { ; CHECK-LABEL: fcmp_one_vf_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmfne.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -352,7 +352,7 @@ define @fcmp_ord_vv_nxv8f16( %va, %vb) { ; CHECK-LABEL: fcmp_ord_vv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmfeq.vv v12, v10, v10 ; CHECK-NEXT: vmfeq.vv v10, v8, v8 ; CHECK-NEXT: vmand.mm v0, v10, v12 @@ -364,7 +364,7 @@ define @fcmp_ord_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_ord_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vmfeq.vf v12, v10, fa0 ; CHECK-NEXT: vmfeq.vv v10, v8, v8 @@ -379,7 +379,7 @@ define @fcmp_ord_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_ord_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vmfeq.vf v12, v10, fa0 ; CHECK-NEXT: vmfeq.vv v10, v8, v8 @@ -394,7 +394,7 @@ define @fcmp_ord_vv_nxv8f16_nonans( %va, %vb) #0 { ; CHECK-LABEL: fcmp_ord_vv_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmfeq.vv v12, v10, v10 ; CHECK-NEXT: vmfeq.vv v10, v8, v8 ; CHECK-NEXT: vmand.mm v0, v10, v12 @@ -406,7 +406,7 @@ define @fcmp_ord_vf_nxv8f16_nonans( %va, half %b) #0 { ; CHECK-LABEL: fcmp_ord_vf_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vmfeq.vf v12, v10, fa0 ; CHECK-NEXT: vmfeq.vv v10, v8, v8 @@ -421,7 +421,7 @@ define @fcmp_ueq_vv_nxv8f16( %va, %vb) { ; CHECK-LABEL: fcmp_ueq_vv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmflt.vv v12, v8, v10 ; CHECK-NEXT: vmflt.vv v13, v10, v8 ; CHECK-NEXT: vmnor.mm v0, v13, v12 @@ -433,7 +433,7 @@ define @fcmp_ueq_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_ueq_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmflt.vf v10, v8, fa0 ; CHECK-NEXT: vmfgt.vf v11, v8, fa0 ; CHECK-NEXT: vmnor.mm v0, v11, v10 @@ -447,7 +447,7 @@ define @fcmp_ueq_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_ueq_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmfgt.vf v10, v8, fa0 ; CHECK-NEXT: vmflt.vf v11, v8, fa0 ; CHECK-NEXT: vmnor.mm v0, v11, v10 @@ -461,7 +461,7 @@ define @fcmp_ueq_vv_nxv8f16_nonans( %va, %vb) #0 { ; CHECK-LABEL: fcmp_ueq_vv_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmfeq.vv v0, v8, v10 ; CHECK-NEXT: ret %vc = fcmp ueq %va, %vb @@ -471,7 +471,7 @@ define @fcmp_ueq_vf_nxv8f16_nonans( %va, half %b) #0 { ; CHECK-LABEL: fcmp_ueq_vf_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -483,7 +483,7 @@ define @fcmp_ugt_vv_nxv8f16( %va, %vb) { ; CHECK-LABEL: fcmp_ugt_vv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmfle.vv v12, v8, v10 ; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret @@ -494,7 +494,7 @@ define @fcmp_ugt_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_ugt_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmfle.vf v10, v8, fa0 ; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret @@ -507,7 +507,7 @@ define @fcmp_ugt_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_ugt_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmfge.vf v10, v8, fa0 ; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret @@ -520,7 +520,7 @@ define @fcmp_ugt_vv_nxv8f16_nonans( %va, %vb) #0 { ; CHECK-LABEL: fcmp_ugt_vv_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmflt.vv v0, v10, v8 ; CHECK-NEXT: ret %vc = fcmp ugt %va, %vb @@ -530,7 +530,7 @@ define @fcmp_ugt_vf_nxv8f16_nonans( %va, half %b) #0 { ; CHECK-LABEL: fcmp_ugt_vf_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -542,7 +542,7 @@ define @fcmp_uge_vv_nxv8f16( %va, %vb) { ; CHECK-LABEL: fcmp_uge_vv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmflt.vv v12, v8, v10 ; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret @@ -553,7 +553,7 @@ define @fcmp_uge_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_uge_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmflt.vf v10, v8, fa0 ; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret @@ -566,7 +566,7 @@ define @fcmp_uge_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_uge_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmfgt.vf v10, v8, fa0 ; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret @@ -579,7 +579,7 @@ define @fcmp_uge_vv_nxv8f16_nonans( %va, %vb) #0 { ; CHECK-LABEL: fcmp_uge_vv_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmfle.vv v0, v10, v8 ; CHECK-NEXT: ret %vc = fcmp uge %va, %vb @@ -589,7 +589,7 @@ define @fcmp_uge_vf_nxv8f16_nonans( %va, half %b) #0 { ; CHECK-LABEL: fcmp_uge_vf_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmfge.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -601,7 +601,7 @@ define @fcmp_ult_vv_nxv8f16( %va, %vb) { ; CHECK-LABEL: fcmp_ult_vv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmfle.vv v12, v10, v8 ; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret @@ -612,7 +612,7 @@ define @fcmp_ult_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_ult_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmfge.vf v10, v8, fa0 ; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret @@ -625,7 +625,7 @@ define @fcmp_ult_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_ult_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmfle.vf v10, v8, fa0 ; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret @@ -638,7 +638,7 @@ define @fcmp_ult_vv_nxv8f16_nonans( %va, %vb) #0 { ; CHECK-LABEL: fcmp_ult_vv_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmflt.vv v0, v8, v10 ; CHECK-NEXT: ret %vc = fcmp ult %va, %vb @@ -648,7 +648,7 @@ define @fcmp_ult_vf_nxv8f16_nonans( %va, half %b) #0 { ; CHECK-LABEL: fcmp_ult_vf_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmflt.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -660,7 +660,7 @@ define @fcmp_ule_vv_nxv8f16( %va, %vb) { ; CHECK-LABEL: fcmp_ule_vv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmflt.vv v12, v10, v8 ; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret @@ -671,7 +671,7 @@ define @fcmp_ule_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_ule_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmfgt.vf v10, v8, fa0 ; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret @@ -684,7 +684,7 @@ define @fcmp_ule_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_ule_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmflt.vf v10, v8, fa0 ; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret @@ -697,7 +697,7 @@ define @fcmp_ule_vv_nxv8f16_nonans( %va, %vb) #0 { ; CHECK-LABEL: fcmp_ule_vv_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmfle.vv v0, v8, v10 ; CHECK-NEXT: ret %vc = fcmp ule %va, %vb @@ -707,7 +707,7 @@ define @fcmp_ule_vf_nxv8f16_nonans( %va, half %b) #0 { ; CHECK-LABEL: fcmp_ule_vf_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmfle.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -719,7 +719,7 @@ define @fcmp_une_vv_nxv8f16( %va, %vb) { ; CHECK-LABEL: fcmp_une_vv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v10 ; CHECK-NEXT: ret %vc = fcmp une %va, %vb @@ -729,7 +729,7 @@ define @fcmp_une_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_une_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmfne.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -741,7 +741,7 @@ define @fcmp_une_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_une_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmfne.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -753,7 +753,7 @@ define @fcmp_une_vv_nxv8f16_nonans( %va, %vb) #0 { ; CHECK-LABEL: fcmp_une_vv_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v10 ; CHECK-NEXT: ret %vc = fcmp une %va, %vb @@ -763,7 +763,7 @@ define @fcmp_une_vf_nxv8f16_nonans( %va, half %b) #0 { ; CHECK-LABEL: fcmp_une_vf_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmfne.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -775,7 +775,7 @@ define @fcmp_uno_vv_nxv8f16( %va, %vb) { ; CHECK-LABEL: fcmp_uno_vv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmfne.vv v12, v10, v10 ; CHECK-NEXT: vmfne.vv v10, v8, v8 ; CHECK-NEXT: vmor.mm v0, v10, v12 @@ -787,7 +787,7 @@ define @fcmp_uno_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_uno_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vmfne.vf v12, v10, fa0 ; CHECK-NEXT: vmfne.vv v10, v8, v8 @@ -802,7 +802,7 @@ define @fcmp_uno_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: fcmp_uno_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vmfne.vf v12, v10, fa0 ; CHECK-NEXT: vmfne.vv v10, v8, v8 @@ -817,7 +817,7 @@ define @fcmp_uno_vv_nxv8f16_nonans( %va, %vb) #0 { ; CHECK-LABEL: fcmp_uno_vv_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmfne.vv v12, v10, v10 ; CHECK-NEXT: vmfne.vv v10, v8, v8 ; CHECK-NEXT: vmor.mm v0, v10, v12 @@ -829,7 +829,7 @@ define @fcmp_uno_vf_nxv8f16_nonans( %va, half %b) #0 { ; CHECK-LABEL: fcmp_uno_vf_nxv8f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfmv.v.f v10, fa0 ; CHECK-NEXT: vmfne.vf v12, v10, fa0 ; CHECK-NEXT: vmfne.vv v10, v8, v8 @@ -844,7 +844,7 @@ define @fcmp_oeq_vv_nxv8f32( %va, %vb) { ; CHECK-LABEL: fcmp_oeq_vv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmfeq.vv v0, v8, v12 ; CHECK-NEXT: ret %vc = fcmp oeq %va, %vb @@ -854,7 +854,7 @@ define @fcmp_oeq_vf_nxv8f32( %va, float %b) { ; CHECK-LABEL: fcmp_oeq_vf_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -866,7 +866,7 @@ define @fcmp_oeq_fv_nxv8f32( %va, float %b) { ; CHECK-LABEL: fcmp_oeq_fv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -878,7 +878,7 @@ define @fcmp_oeq_vv_nxv8f32_nonans( %va, %vb) #0 { ; CHECK-LABEL: fcmp_oeq_vv_nxv8f32_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmfeq.vv v0, v8, v12 ; CHECK-NEXT: ret %vc = fcmp oeq %va, %vb @@ -888,7 +888,7 @@ define @fcmp_oeq_vf_nxv8f32_nonans( %va, float %b) #0 { ; CHECK-LABEL: fcmp_oeq_vf_nxv8f32_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -900,7 +900,7 @@ define @fcmp_ogt_vv_nxv8f32( %va, %vb) { ; CHECK-LABEL: fcmp_ogt_vv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmflt.vv v0, v12, v8 ; CHECK-NEXT: ret %vc = fcmp ogt %va, %vb @@ -910,7 +910,7 @@ define @fcmp_ogt_vf_nxv8f32( %va, float %b) { ; CHECK-LABEL: fcmp_ogt_vf_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -922,7 +922,7 @@ define @fcmp_ogt_fv_nxv8f32( %va, float %b) { ; CHECK-LABEL: fcmp_ogt_fv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmflt.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -934,7 +934,7 @@ define @fcmp_ogt_vv_nxv8f32_nonans( %va, %vb) #0 { ; CHECK-LABEL: fcmp_ogt_vv_nxv8f32_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmflt.vv v0, v12, v8 ; CHECK-NEXT: ret %vc = fcmp ogt %va, %vb @@ -944,7 +944,7 @@ define @fcmp_ogt_vf_nxv8f32_nonans( %va, float %b) #0 { ; CHECK-LABEL: fcmp_ogt_vf_nxv8f32_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -956,7 +956,7 @@ define @fcmp_oge_vv_nxv8f32( %va, %vb) { ; CHECK-LABEL: fcmp_oge_vv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmfle.vv v0, v12, v8 ; CHECK-NEXT: ret %vc = fcmp oge %va, %vb @@ -966,7 +966,7 @@ define @fcmp_oge_vf_nxv8f32( %va, float %b) { ; CHECK-LABEL: fcmp_oge_vf_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmfge.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -978,7 +978,7 @@ define @fcmp_oge_fv_nxv8f32( %va, float %b) { ; CHECK-LABEL: fcmp_oge_fv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmfle.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -990,7 +990,7 @@ define @fcmp_oge_vv_nxv8f32_nonans( %va, %vb) #0 { ; CHECK-LABEL: fcmp_oge_vv_nxv8f32_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmfle.vv v0, v12, v8 ; CHECK-NEXT: ret %vc = fcmp oge %va, %vb @@ -1000,7 +1000,7 @@ define @fcmp_oge_vf_nxv8f32_nonans( %va, float %b) #0 { ; CHECK-LABEL: fcmp_oge_vf_nxv8f32_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmfge.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -1012,7 +1012,7 @@ define @fcmp_olt_vv_nxv8f32( %va, %vb) { ; CHECK-LABEL: fcmp_olt_vv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmflt.vv v0, v8, v12 ; CHECK-NEXT: ret %vc = fcmp olt %va, %vb @@ -1022,7 +1022,7 @@ define @fcmp_olt_vf_nxv8f32( %va, float %b) { ; CHECK-LABEL: fcmp_olt_vf_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmflt.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -1034,7 +1034,7 @@ define @fcmp_olt_fv_nxv8f32( %va, float %b) { ; CHECK-LABEL: fcmp_olt_fv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -1046,7 +1046,7 @@ define @fcmp_olt_vv_nxv8f32_nonans( %va, %vb) #0 { ; CHECK-LABEL: fcmp_olt_vv_nxv8f32_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmflt.vv v0, v8, v12 ; CHECK-NEXT: ret %vc = fcmp olt %va, %vb @@ -1056,7 +1056,7 @@ define @fcmp_olt_vf_nxv8f32_nonans( %va, float %b) #0 { ; CHECK-LABEL: fcmp_olt_vf_nxv8f32_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmflt.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -1068,7 +1068,7 @@ define @fcmp_ole_vv_nxv8f32( %va, %vb) { ; CHECK-LABEL: fcmp_ole_vv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmfle.vv v0, v8, v12 ; CHECK-NEXT: ret %vc = fcmp ole %va, %vb @@ -1078,7 +1078,7 @@ define @fcmp_ole_vf_nxv8f32( %va, float %b) { ; CHECK-LABEL: fcmp_ole_vf_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmfle.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -1090,7 +1090,7 @@ define @fcmp_ole_fv_nxv8f32( %va, float %b) { ; CHECK-LABEL: fcmp_ole_fv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmfge.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -1102,7 +1102,7 @@ define @fcmp_ole_vv_nxv8f32_nonans( %va, %vb) #0 { ; CHECK-LABEL: fcmp_ole_vv_nxv8f32_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmfle.vv v0, v8, v12 ; CHECK-NEXT: ret %vc = fcmp ole %va, %vb @@ -1112,7 +1112,7 @@ define @fcmp_ole_vf_nxv8f32_nonans( %va, float %b) #0 { ; CHECK-LABEL: fcmp_ole_vf_nxv8f32_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmfle.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -1124,7 +1124,7 @@ define @fcmp_one_vv_nxv8f32( %va, %vb) { ; CHECK-LABEL: fcmp_one_vv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmflt.vv v16, v8, v12 ; CHECK-NEXT: vmflt.vv v17, v12, v8 ; CHECK-NEXT: vmor.mm v0, v17, v16 @@ -1136,7 +1136,7 @@ define @fcmp_one_vf_nxv8f32( %va, float %b) { ; CHECK-LABEL: fcmp_one_vf_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmflt.vf v12, v8, fa0 ; CHECK-NEXT: vmfgt.vf v13, v8, fa0 ; CHECK-NEXT: vmor.mm v0, v13, v12 @@ -1150,7 +1150,7 @@ define @fcmp_one_fv_nxv8f32( %va, float %b) { ; CHECK-LABEL: fcmp_one_fv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmfgt.vf v12, v8, fa0 ; CHECK-NEXT: vmflt.vf v13, v8, fa0 ; CHECK-NEXT: vmor.mm v0, v13, v12 @@ -1164,7 +1164,7 @@ define @fcmp_one_vv_nxv8f32_nonans( %va, %vb) #0 { ; CHECK-LABEL: fcmp_one_vv_nxv8f32_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v12 ; CHECK-NEXT: ret %vc = fcmp one %va, %vb @@ -1174,7 +1174,7 @@ define @fcmp_one_vf_nxv8f32_nonans( %va, float %b) #0 { ; CHECK-LABEL: fcmp_one_vf_nxv8f32_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmfne.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -1186,7 +1186,7 @@ define @fcmp_ord_vv_nxv8f32( %va, %vb) { ; CHECK-LABEL: fcmp_ord_vv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmfeq.vv v16, v12, v12 ; CHECK-NEXT: vmfeq.vv v12, v8, v8 ; CHECK-NEXT: vmand.mm v0, v12, v16 @@ -1198,7 +1198,7 @@ define @fcmp_ord_vf_nxv8f32( %va, float %b) { ; CHECK-LABEL: fcmp_ord_vf_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vmfeq.vf v16, v12, fa0 ; CHECK-NEXT: vmfeq.vv v12, v8, v8 @@ -1213,7 +1213,7 @@ define @fcmp_ord_fv_nxv8f32( %va, float %b) { ; CHECK-LABEL: fcmp_ord_fv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vmfeq.vf v16, v12, fa0 ; CHECK-NEXT: vmfeq.vv v12, v8, v8 @@ -1228,7 +1228,7 @@ define @fcmp_ord_vv_nxv8f32_nonans( %va, %vb) #0 { ; CHECK-LABEL: fcmp_ord_vv_nxv8f32_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmfeq.vv v16, v12, v12 ; CHECK-NEXT: vmfeq.vv v12, v8, v8 ; CHECK-NEXT: vmand.mm v0, v12, v16 @@ -1240,7 +1240,7 @@ define @fcmp_ord_vf_nxv8f32_nonans( %va, float %b) #0 { ; CHECK-LABEL: fcmp_ord_vf_nxv8f32_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vmfeq.vf v16, v12, fa0 ; CHECK-NEXT: vmfeq.vv v12, v8, v8 @@ -1255,7 +1255,7 @@ define @fcmp_ueq_vv_nxv8f32( %va, %vb) { ; CHECK-LABEL: fcmp_ueq_vv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmflt.vv v16, v8, v12 ; CHECK-NEXT: vmflt.vv v17, v12, v8 ; CHECK-NEXT: vmnor.mm v0, v17, v16 @@ -1267,7 +1267,7 @@ define @fcmp_ueq_vf_nxv8f32( %va, float %b) { ; CHECK-LABEL: fcmp_ueq_vf_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmflt.vf v12, v8, fa0 ; CHECK-NEXT: vmfgt.vf v13, v8, fa0 ; CHECK-NEXT: vmnor.mm v0, v13, v12 @@ -1281,7 +1281,7 @@ define @fcmp_ueq_fv_nxv8f32( %va, float %b) { ; CHECK-LABEL: fcmp_ueq_fv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmfgt.vf v12, v8, fa0 ; CHECK-NEXT: vmflt.vf v13, v8, fa0 ; CHECK-NEXT: vmnor.mm v0, v13, v12 @@ -1295,7 +1295,7 @@ define @fcmp_ueq_vv_nxv8f32_nonans( %va, %vb) #0 { ; CHECK-LABEL: fcmp_ueq_vv_nxv8f32_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmfeq.vv v0, v8, v12 ; CHECK-NEXT: ret %vc = fcmp ueq %va, %vb @@ -1305,7 +1305,7 @@ define @fcmp_ueq_vf_nxv8f32_nonans( %va, float %b) #0 { ; CHECK-LABEL: fcmp_ueq_vf_nxv8f32_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -1317,7 +1317,7 @@ define @fcmp_ugt_vv_nxv8f32( %va, %vb) { ; CHECK-LABEL: fcmp_ugt_vv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmfle.vv v16, v8, v12 ; CHECK-NEXT: vmnot.m v0, v16 ; CHECK-NEXT: ret @@ -1328,7 +1328,7 @@ define @fcmp_ugt_vf_nxv8f32( %va, float %b) { ; CHECK-LABEL: fcmp_ugt_vf_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmfle.vf v12, v8, fa0 ; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret @@ -1341,7 +1341,7 @@ define @fcmp_ugt_fv_nxv8f32( %va, float %b) { ; CHECK-LABEL: fcmp_ugt_fv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmfge.vf v12, v8, fa0 ; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret @@ -1354,7 +1354,7 @@ define @fcmp_ugt_vv_nxv8f32_nonans( %va, %vb) #0 { ; CHECK-LABEL: fcmp_ugt_vv_nxv8f32_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmflt.vv v0, v12, v8 ; CHECK-NEXT: ret %vc = fcmp ugt %va, %vb @@ -1364,7 +1364,7 @@ define @fcmp_ugt_vf_nxv8f32_nonans( %va, float %b) #0 { ; CHECK-LABEL: fcmp_ugt_vf_nxv8f32_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -1376,7 +1376,7 @@ define @fcmp_uge_vv_nxv8f32( %va, %vb) { ; CHECK-LABEL: fcmp_uge_vv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmflt.vv v16, v8, v12 ; CHECK-NEXT: vmnot.m v0, v16 ; CHECK-NEXT: ret @@ -1387,7 +1387,7 @@ define @fcmp_uge_vf_nxv8f32( %va, float %b) { ; CHECK-LABEL: fcmp_uge_vf_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmflt.vf v12, v8, fa0 ; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret @@ -1400,7 +1400,7 @@ define @fcmp_uge_fv_nxv8f32( %va, float %b) { ; CHECK-LABEL: fcmp_uge_fv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmfgt.vf v12, v8, fa0 ; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret @@ -1413,7 +1413,7 @@ define @fcmp_uge_vv_nxv8f32_nonans( %va, %vb) #0 { ; CHECK-LABEL: fcmp_uge_vv_nxv8f32_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmfle.vv v0, v12, v8 ; CHECK-NEXT: ret %vc = fcmp uge %va, %vb @@ -1423,7 +1423,7 @@ define @fcmp_uge_vf_nxv8f32_nonans( %va, float %b) #0 { ; CHECK-LABEL: fcmp_uge_vf_nxv8f32_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmfge.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -1435,7 +1435,7 @@ define @fcmp_ult_vv_nxv8f32( %va, %vb) { ; CHECK-LABEL: fcmp_ult_vv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmfle.vv v16, v12, v8 ; CHECK-NEXT: vmnot.m v0, v16 ; CHECK-NEXT: ret @@ -1446,7 +1446,7 @@ define @fcmp_ult_vf_nxv8f32( %va, float %b) { ; CHECK-LABEL: fcmp_ult_vf_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmfge.vf v12, v8, fa0 ; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret @@ -1459,7 +1459,7 @@ define @fcmp_ult_fv_nxv8f32( %va, float %b) { ; CHECK-LABEL: fcmp_ult_fv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmfle.vf v12, v8, fa0 ; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret @@ -1472,7 +1472,7 @@ define @fcmp_ult_vv_nxv8f32_nonans( %va, %vb) #0 { ; CHECK-LABEL: fcmp_ult_vv_nxv8f32_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmflt.vv v0, v8, v12 ; CHECK-NEXT: ret %vc = fcmp ult %va, %vb @@ -1482,7 +1482,7 @@ define @fcmp_ult_vf_nxv8f32_nonans( %va, float %b) #0 { ; CHECK-LABEL: fcmp_ult_vf_nxv8f32_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmflt.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -1494,7 +1494,7 @@ define @fcmp_ule_vv_nxv8f32( %va, %vb) { ; CHECK-LABEL: fcmp_ule_vv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmflt.vv v16, v12, v8 ; CHECK-NEXT: vmnot.m v0, v16 ; CHECK-NEXT: ret @@ -1505,7 +1505,7 @@ define @fcmp_ule_vf_nxv8f32( %va, float %b) { ; CHECK-LABEL: fcmp_ule_vf_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmfgt.vf v12, v8, fa0 ; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret @@ -1518,7 +1518,7 @@ define @fcmp_ule_fv_nxv8f32( %va, float %b) { ; CHECK-LABEL: fcmp_ule_fv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmflt.vf v12, v8, fa0 ; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret @@ -1531,7 +1531,7 @@ define @fcmp_ule_vv_nxv8f32_nonans( %va, %vb) #0 { ; CHECK-LABEL: fcmp_ule_vv_nxv8f32_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmfle.vv v0, v8, v12 ; CHECK-NEXT: ret %vc = fcmp ule %va, %vb @@ -1541,7 +1541,7 @@ define @fcmp_ule_vf_nxv8f32_nonans( %va, float %b) #0 { ; CHECK-LABEL: fcmp_ule_vf_nxv8f32_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmfle.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -1553,7 +1553,7 @@ define @fcmp_une_vv_nxv8f32( %va, %vb) { ; CHECK-LABEL: fcmp_une_vv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v12 ; CHECK-NEXT: ret %vc = fcmp une %va, %vb @@ -1563,7 +1563,7 @@ define @fcmp_une_vf_nxv8f32( %va, float %b) { ; CHECK-LABEL: fcmp_une_vf_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmfne.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -1575,7 +1575,7 @@ define @fcmp_une_fv_nxv8f32( %va, float %b) { ; CHECK-LABEL: fcmp_une_fv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmfne.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -1587,7 +1587,7 @@ define @fcmp_une_vv_nxv8f32_nonans( %va, %vb) #0 { ; CHECK-LABEL: fcmp_une_vv_nxv8f32_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v12 ; CHECK-NEXT: ret %vc = fcmp une %va, %vb @@ -1597,7 +1597,7 @@ define @fcmp_une_vf_nxv8f32_nonans( %va, float %b) #0 { ; CHECK-LABEL: fcmp_une_vf_nxv8f32_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmfne.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -1609,7 +1609,7 @@ define @fcmp_uno_vv_nxv8f32( %va, %vb) { ; CHECK-LABEL: fcmp_uno_vv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmfne.vv v16, v12, v12 ; CHECK-NEXT: vmfne.vv v12, v8, v8 ; CHECK-NEXT: vmor.mm v0, v12, v16 @@ -1621,7 +1621,7 @@ define @fcmp_uno_vf_nxv8f32( %va, float %b) { ; CHECK-LABEL: fcmp_uno_vf_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vmfne.vf v16, v12, fa0 ; CHECK-NEXT: vmfne.vv v12, v8, v8 @@ -1636,7 +1636,7 @@ define @fcmp_uno_fv_nxv8f32( %va, float %b) { ; CHECK-LABEL: fcmp_uno_fv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vmfne.vf v16, v12, fa0 ; CHECK-NEXT: vmfne.vv v12, v8, v8 @@ -1651,7 +1651,7 @@ define @fcmp_uno_vv_nxv8f32_nonans( %va, %vb) #0 { ; CHECK-LABEL: fcmp_uno_vv_nxv8f32_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmfne.vv v16, v12, v12 ; CHECK-NEXT: vmfne.vv v12, v8, v8 ; CHECK-NEXT: vmor.mm v0, v12, v16 @@ -1663,7 +1663,7 @@ define @fcmp_uno_vf_nxv8f32_nonans( %va, float %b) #0 { ; CHECK-LABEL: fcmp_uno_vf_nxv8f32_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vmfne.vf v16, v12, fa0 ; CHECK-NEXT: vmfne.vv v12, v8, v8 @@ -1678,7 +1678,7 @@ define @fcmp_oeq_vv_nxv8f64( %va, %vb) { ; CHECK-LABEL: fcmp_oeq_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmfeq.vv v0, v8, v16 ; CHECK-NEXT: ret %vc = fcmp oeq %va, %vb @@ -1688,7 +1688,7 @@ define @fcmp_oeq_vf_nxv8f64( %va, double %b) { ; CHECK-LABEL: fcmp_oeq_vf_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -1700,7 +1700,7 @@ define @fcmp_oeq_fv_nxv8f64( %va, double %b) { ; CHECK-LABEL: fcmp_oeq_fv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -1712,7 +1712,7 @@ define @fcmp_oeq_vv_nxv8f64_nonans( %va, %vb) #0 { ; CHECK-LABEL: fcmp_oeq_vv_nxv8f64_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmfeq.vv v0, v8, v16 ; CHECK-NEXT: ret %vc = fcmp oeq %va, %vb @@ -1722,7 +1722,7 @@ define @fcmp_oeq_vf_nxv8f64_nonans( %va, double %b) #0 { ; CHECK-LABEL: fcmp_oeq_vf_nxv8f64_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -1734,7 +1734,7 @@ define @fcmp_ogt_vv_nxv8f64( %va, %vb) { ; CHECK-LABEL: fcmp_ogt_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmflt.vv v0, v16, v8 ; CHECK-NEXT: ret %vc = fcmp ogt %va, %vb @@ -1744,7 +1744,7 @@ define @fcmp_ogt_vf_nxv8f64( %va, double %b) { ; CHECK-LABEL: fcmp_ogt_vf_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -1756,7 +1756,7 @@ define @fcmp_ogt_fv_nxv8f64( %va, double %b) { ; CHECK-LABEL: fcmp_ogt_fv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmflt.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -1768,7 +1768,7 @@ define @fcmp_ogt_vv_nxv8f64_nonans( %va, %vb) #0 { ; CHECK-LABEL: fcmp_ogt_vv_nxv8f64_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmflt.vv v0, v16, v8 ; CHECK-NEXT: ret %vc = fcmp ogt %va, %vb @@ -1778,7 +1778,7 @@ define @fcmp_ogt_vf_nxv8f64_nonans( %va, double %b) #0 { ; CHECK-LABEL: fcmp_ogt_vf_nxv8f64_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -1790,7 +1790,7 @@ define @fcmp_oge_vv_nxv8f64( %va, %vb) { ; CHECK-LABEL: fcmp_oge_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmfle.vv v0, v16, v8 ; CHECK-NEXT: ret %vc = fcmp oge %va, %vb @@ -1800,7 +1800,7 @@ define @fcmp_oge_vf_nxv8f64( %va, double %b) { ; CHECK-LABEL: fcmp_oge_vf_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmfge.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -1812,7 +1812,7 @@ define @fcmp_oge_fv_nxv8f64( %va, double %b) { ; CHECK-LABEL: fcmp_oge_fv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmfle.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -1824,7 +1824,7 @@ define @fcmp_oge_vv_nxv8f64_nonans( %va, %vb) #0 { ; CHECK-LABEL: fcmp_oge_vv_nxv8f64_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmfle.vv v0, v16, v8 ; CHECK-NEXT: ret %vc = fcmp oge %va, %vb @@ -1834,7 +1834,7 @@ define @fcmp_oge_vf_nxv8f64_nonans( %va, double %b) #0 { ; CHECK-LABEL: fcmp_oge_vf_nxv8f64_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmfge.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -1846,7 +1846,7 @@ define @fcmp_olt_vv_nxv8f64( %va, %vb) { ; CHECK-LABEL: fcmp_olt_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmflt.vv v0, v8, v16 ; CHECK-NEXT: ret %vc = fcmp olt %va, %vb @@ -1856,7 +1856,7 @@ define @fcmp_olt_vf_nxv8f64( %va, double %b) { ; CHECK-LABEL: fcmp_olt_vf_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmflt.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -1868,7 +1868,7 @@ define @fcmp_olt_fv_nxv8f64( %va, double %b) { ; CHECK-LABEL: fcmp_olt_fv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -1880,7 +1880,7 @@ define @fcmp_olt_vv_nxv8f64_nonans( %va, %vb) #0 { ; CHECK-LABEL: fcmp_olt_vv_nxv8f64_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmflt.vv v0, v8, v16 ; CHECK-NEXT: ret %vc = fcmp olt %va, %vb @@ -1890,7 +1890,7 @@ define @fcmp_olt_vf_nxv8f64_nonans( %va, double %b) #0 { ; CHECK-LABEL: fcmp_olt_vf_nxv8f64_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmflt.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -1902,7 +1902,7 @@ define @fcmp_ole_vv_nxv8f64( %va, %vb) { ; CHECK-LABEL: fcmp_ole_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmfle.vv v0, v8, v16 ; CHECK-NEXT: ret %vc = fcmp ole %va, %vb @@ -1912,7 +1912,7 @@ define @fcmp_ole_vf_nxv8f64( %va, double %b) { ; CHECK-LABEL: fcmp_ole_vf_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmfle.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -1924,7 +1924,7 @@ define @fcmp_ole_fv_nxv8f64( %va, double %b) { ; CHECK-LABEL: fcmp_ole_fv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmfge.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -1936,7 +1936,7 @@ define @fcmp_ole_vv_nxv8f64_nonans( %va, %vb) #0 { ; CHECK-LABEL: fcmp_ole_vv_nxv8f64_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmfle.vv v0, v8, v16 ; CHECK-NEXT: ret %vc = fcmp ole %va, %vb @@ -1946,7 +1946,7 @@ define @fcmp_ole_vf_nxv8f64_nonans( %va, double %b) #0 { ; CHECK-LABEL: fcmp_ole_vf_nxv8f64_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmfle.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -1958,7 +1958,7 @@ define @fcmp_one_vv_nxv8f64( %va, %vb) { ; CHECK-LABEL: fcmp_one_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmflt.vv v24, v8, v16 ; CHECK-NEXT: vmflt.vv v25, v16, v8 ; CHECK-NEXT: vmor.mm v0, v25, v24 @@ -1970,7 +1970,7 @@ define @fcmp_one_vf_nxv8f64( %va, double %b) { ; CHECK-LABEL: fcmp_one_vf_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmflt.vf v16, v8, fa0 ; CHECK-NEXT: vmfgt.vf v17, v8, fa0 ; CHECK-NEXT: vmor.mm v0, v17, v16 @@ -1984,7 +1984,7 @@ define @fcmp_one_fv_nxv8f64( %va, double %b) { ; CHECK-LABEL: fcmp_one_fv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmfgt.vf v16, v8, fa0 ; CHECK-NEXT: vmflt.vf v17, v8, fa0 ; CHECK-NEXT: vmor.mm v0, v17, v16 @@ -1998,7 +1998,7 @@ define @fcmp_one_vv_nxv8f64_nonans( %va, %vb) #0 { ; CHECK-LABEL: fcmp_one_vv_nxv8f64_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v16 ; CHECK-NEXT: ret %vc = fcmp one %va, %vb @@ -2008,7 +2008,7 @@ define @fcmp_one_vf_nxv8f64_nonans( %va, double %b) #0 { ; CHECK-LABEL: fcmp_one_vf_nxv8f64_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmfne.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -2020,7 +2020,7 @@ define @fcmp_ord_vv_nxv8f64( %va, %vb) { ; CHECK-LABEL: fcmp_ord_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmfeq.vv v24, v16, v16 ; CHECK-NEXT: vmfeq.vv v16, v8, v8 ; CHECK-NEXT: vmand.mm v0, v16, v24 @@ -2032,7 +2032,7 @@ define @fcmp_ord_vf_nxv8f64( %va, double %b) { ; CHECK-LABEL: fcmp_ord_vf_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vmfeq.vf v24, v16, fa0 ; CHECK-NEXT: vmfeq.vv v16, v8, v8 @@ -2047,7 +2047,7 @@ define @fcmp_ord_fv_nxv8f64( %va, double %b) { ; CHECK-LABEL: fcmp_ord_fv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vmfeq.vf v24, v16, fa0 ; CHECK-NEXT: vmfeq.vv v16, v8, v8 @@ -2062,7 +2062,7 @@ define @fcmp_ord_vv_nxv8f64_nonans( %va, %vb) #0 { ; CHECK-LABEL: fcmp_ord_vv_nxv8f64_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmfeq.vv v24, v16, v16 ; CHECK-NEXT: vmfeq.vv v16, v8, v8 ; CHECK-NEXT: vmand.mm v0, v16, v24 @@ -2074,7 +2074,7 @@ define @fcmp_ord_vf_nxv8f64_nonans( %va, double %b) #0 { ; CHECK-LABEL: fcmp_ord_vf_nxv8f64_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vmfeq.vf v24, v16, fa0 ; CHECK-NEXT: vmfeq.vv v16, v8, v8 @@ -2089,7 +2089,7 @@ define @fcmp_ueq_vv_nxv8f64( %va, %vb) { ; CHECK-LABEL: fcmp_ueq_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmflt.vv v24, v8, v16 ; CHECK-NEXT: vmflt.vv v25, v16, v8 ; CHECK-NEXT: vmnor.mm v0, v25, v24 @@ -2101,7 +2101,7 @@ define @fcmp_ueq_vf_nxv8f64( %va, double %b) { ; CHECK-LABEL: fcmp_ueq_vf_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmflt.vf v16, v8, fa0 ; CHECK-NEXT: vmfgt.vf v17, v8, fa0 ; CHECK-NEXT: vmnor.mm v0, v17, v16 @@ -2115,7 +2115,7 @@ define @fcmp_ueq_fv_nxv8f64( %va, double %b) { ; CHECK-LABEL: fcmp_ueq_fv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmfgt.vf v16, v8, fa0 ; CHECK-NEXT: vmflt.vf v17, v8, fa0 ; CHECK-NEXT: vmnor.mm v0, v17, v16 @@ -2129,7 +2129,7 @@ define @fcmp_ueq_vv_nxv8f64_nonans( %va, %vb) #0 { ; CHECK-LABEL: fcmp_ueq_vv_nxv8f64_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmfeq.vv v0, v8, v16 ; CHECK-NEXT: ret %vc = fcmp ueq %va, %vb @@ -2139,7 +2139,7 @@ define @fcmp_ueq_vf_nxv8f64_nonans( %va, double %b) #0 { ; CHECK-LABEL: fcmp_ueq_vf_nxv8f64_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -2151,7 +2151,7 @@ define @fcmp_ugt_vv_nxv8f64( %va, %vb) { ; CHECK-LABEL: fcmp_ugt_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmfle.vv v24, v8, v16 ; CHECK-NEXT: vmnot.m v0, v24 ; CHECK-NEXT: ret @@ -2162,7 +2162,7 @@ define @fcmp_ugt_vf_nxv8f64( %va, double %b) { ; CHECK-LABEL: fcmp_ugt_vf_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmfle.vf v16, v8, fa0 ; CHECK-NEXT: vmnot.m v0, v16 ; CHECK-NEXT: ret @@ -2175,7 +2175,7 @@ define @fcmp_ugt_fv_nxv8f64( %va, double %b) { ; CHECK-LABEL: fcmp_ugt_fv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmfge.vf v16, v8, fa0 ; CHECK-NEXT: vmnot.m v0, v16 ; CHECK-NEXT: ret @@ -2188,7 +2188,7 @@ define @fcmp_ugt_vv_nxv8f64_nonans( %va, %vb) #0 { ; CHECK-LABEL: fcmp_ugt_vv_nxv8f64_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmflt.vv v0, v16, v8 ; CHECK-NEXT: ret %vc = fcmp ugt %va, %vb @@ -2198,7 +2198,7 @@ define @fcmp_ugt_vf_nxv8f64_nonans( %va, double %b) #0 { ; CHECK-LABEL: fcmp_ugt_vf_nxv8f64_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -2210,7 +2210,7 @@ define @fcmp_uge_vv_nxv8f64( %va, %vb) { ; CHECK-LABEL: fcmp_uge_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmflt.vv v24, v8, v16 ; CHECK-NEXT: vmnot.m v0, v24 ; CHECK-NEXT: ret @@ -2221,7 +2221,7 @@ define @fcmp_uge_vf_nxv8f64( %va, double %b) { ; CHECK-LABEL: fcmp_uge_vf_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmflt.vf v16, v8, fa0 ; CHECK-NEXT: vmnot.m v0, v16 ; CHECK-NEXT: ret @@ -2234,7 +2234,7 @@ define @fcmp_uge_fv_nxv8f64( %va, double %b) { ; CHECK-LABEL: fcmp_uge_fv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmfgt.vf v16, v8, fa0 ; CHECK-NEXT: vmnot.m v0, v16 ; CHECK-NEXT: ret @@ -2247,7 +2247,7 @@ define @fcmp_uge_vv_nxv8f64_nonans( %va, %vb) #0 { ; CHECK-LABEL: fcmp_uge_vv_nxv8f64_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmfle.vv v0, v16, v8 ; CHECK-NEXT: ret %vc = fcmp uge %va, %vb @@ -2257,7 +2257,7 @@ define @fcmp_uge_vf_nxv8f64_nonans( %va, double %b) #0 { ; CHECK-LABEL: fcmp_uge_vf_nxv8f64_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmfge.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -2269,7 +2269,7 @@ define @fcmp_ult_vv_nxv8f64( %va, %vb) { ; CHECK-LABEL: fcmp_ult_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmfle.vv v24, v16, v8 ; CHECK-NEXT: vmnot.m v0, v24 ; CHECK-NEXT: ret @@ -2280,7 +2280,7 @@ define @fcmp_ult_vf_nxv8f64( %va, double %b) { ; CHECK-LABEL: fcmp_ult_vf_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmfge.vf v16, v8, fa0 ; CHECK-NEXT: vmnot.m v0, v16 ; CHECK-NEXT: ret @@ -2293,7 +2293,7 @@ define @fcmp_ult_fv_nxv8f64( %va, double %b) { ; CHECK-LABEL: fcmp_ult_fv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmfle.vf v16, v8, fa0 ; CHECK-NEXT: vmnot.m v0, v16 ; CHECK-NEXT: ret @@ -2306,7 +2306,7 @@ define @fcmp_ult_vv_nxv8f64_nonans( %va, %vb) #0 { ; CHECK-LABEL: fcmp_ult_vv_nxv8f64_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmflt.vv v0, v8, v16 ; CHECK-NEXT: ret %vc = fcmp ult %va, %vb @@ -2316,7 +2316,7 @@ define @fcmp_ult_vf_nxv8f64_nonans( %va, double %b) #0 { ; CHECK-LABEL: fcmp_ult_vf_nxv8f64_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmflt.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -2328,7 +2328,7 @@ define @fcmp_ule_vv_nxv8f64( %va, %vb) { ; CHECK-LABEL: fcmp_ule_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmflt.vv v24, v16, v8 ; CHECK-NEXT: vmnot.m v0, v24 ; CHECK-NEXT: ret @@ -2339,7 +2339,7 @@ define @fcmp_ule_vf_nxv8f64( %va, double %b) { ; CHECK-LABEL: fcmp_ule_vf_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmfgt.vf v16, v8, fa0 ; CHECK-NEXT: vmnot.m v0, v16 ; CHECK-NEXT: ret @@ -2352,7 +2352,7 @@ define @fcmp_ule_fv_nxv8f64( %va, double %b) { ; CHECK-LABEL: fcmp_ule_fv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmflt.vf v16, v8, fa0 ; CHECK-NEXT: vmnot.m v0, v16 ; CHECK-NEXT: ret @@ -2365,7 +2365,7 @@ define @fcmp_ule_vv_nxv8f64_nonans( %va, %vb) #0 { ; CHECK-LABEL: fcmp_ule_vv_nxv8f64_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmfle.vv v0, v8, v16 ; CHECK-NEXT: ret %vc = fcmp ule %va, %vb @@ -2375,7 +2375,7 @@ define @fcmp_ule_vf_nxv8f64_nonans( %va, double %b) #0 { ; CHECK-LABEL: fcmp_ule_vf_nxv8f64_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmfle.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -2387,7 +2387,7 @@ define @fcmp_une_vv_nxv8f64( %va, %vb) { ; CHECK-LABEL: fcmp_une_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v16 ; CHECK-NEXT: ret %vc = fcmp une %va, %vb @@ -2397,7 +2397,7 @@ define @fcmp_une_vf_nxv8f64( %va, double %b) { ; CHECK-LABEL: fcmp_une_vf_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmfne.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -2409,7 +2409,7 @@ define @fcmp_une_fv_nxv8f64( %va, double %b) { ; CHECK-LABEL: fcmp_une_fv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmfne.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -2421,7 +2421,7 @@ define @fcmp_une_vv_nxv8f64_nonans( %va, %vb) #0 { ; CHECK-LABEL: fcmp_une_vv_nxv8f64_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v16 ; CHECK-NEXT: ret %vc = fcmp une %va, %vb @@ -2431,7 +2431,7 @@ define @fcmp_une_vf_nxv8f64_nonans( %va, double %b) #0 { ; CHECK-LABEL: fcmp_une_vf_nxv8f64_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmfne.vf v0, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -2443,7 +2443,7 @@ define @fcmp_uno_vv_nxv8f64( %va, %vb) { ; CHECK-LABEL: fcmp_uno_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmfne.vv v24, v16, v16 ; CHECK-NEXT: vmfne.vv v16, v8, v8 ; CHECK-NEXT: vmor.mm v0, v16, v24 @@ -2455,7 +2455,7 @@ define @fcmp_uno_vf_nxv8f64( %va, double %b) { ; CHECK-LABEL: fcmp_uno_vf_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vmfne.vf v24, v16, fa0 ; CHECK-NEXT: vmfne.vv v16, v8, v8 @@ -2470,7 +2470,7 @@ define @fcmp_uno_fv_nxv8f64( %va, double %b) { ; CHECK-LABEL: fcmp_uno_fv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vmfne.vf v24, v16, fa0 ; CHECK-NEXT: vmfne.vv v16, v8, v8 @@ -2485,7 +2485,7 @@ define @fcmp_uno_vv_nxv8f64_nonans( %va, %vb) #0 { ; CHECK-LABEL: fcmp_uno_vv_nxv8f64_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmfne.vv v24, v16, v16 ; CHECK-NEXT: vmfne.vv v16, v8, v8 ; CHECK-NEXT: vmor.mm v0, v16, v24 @@ -2497,7 +2497,7 @@ define @fcmp_uno_vf_nxv8f64_nonans( %va, double %b) #0 { ; CHECK-LABEL: fcmp_uno_vf_nxv8f64_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vmfne.vf v24, v16, fa0 ; CHECK-NEXT: vmfne.vv v16, v8, v8 @@ -2515,26 +2515,26 @@ ; RV32-LABEL: fcmp_oeq_vf_nx16f64: ; RV32: # %bb.0: ; RV32-NEXT: fcvt.d.w ft0, zero -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; RV32-NEXT: vmfeq.vf v24, v16, ft0 ; RV32-NEXT: vmfeq.vf v0, v8, ft0 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: srli a0, a0, 3 ; RV32-NEXT: add a1, a0, a0 -; RV32-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; RV32-NEXT: vsetvli zero, a1, e8, mf4, tu, ma ; RV32-NEXT: vslideup.vx v0, v24, a0 ; RV32-NEXT: ret ; ; RV64-LABEL: fcmp_oeq_vf_nx16f64: ; RV64: # %bb.0: ; RV64-NEXT: fmv.d.x ft0, zero -; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; RV64-NEXT: vmfeq.vf v24, v16, ft0 ; RV64-NEXT: vmfeq.vf v0, v8, ft0 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: srli a0, a0, 3 ; RV64-NEXT: add a1, a0, a0 -; RV64-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; RV64-NEXT: vsetvli zero, a1, e8, mf4, tu, ma ; RV64-NEXT: vslideup.vx v0, v24, a0 ; RV64-NEXT: ret %vc = fcmp oeq %va, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp-mask.ll --- a/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp-mask.ll @@ -9,7 +9,7 @@ define @icmp_eq_vv_nxv1i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv1i1( %va, %vb, metadata !"eq", %m, i32 %evl) @@ -21,7 +21,7 @@ define @icmp_eq_vv_nxv2i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv2i1( %va, %vb, metadata !"eq", %m, i32 %evl) @@ -33,7 +33,7 @@ define @icmp_eq_vv_nxv4i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv4i1( %va, %vb, metadata !"eq", %m, i32 %evl) @@ -45,7 +45,7 @@ define @icmp_eq_vv_nxv8i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv8i1( %va, %vb, metadata !"eq", %m, i32 %evl) @@ -57,7 +57,7 @@ define @icmp_eq_vv_nxv16i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv16i1( %va, %vb, metadata !"eq", %m, i32 %evl) @@ -69,7 +69,7 @@ define @icmp_eq_vv_nxv32i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_nxv32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv32i1( %va, %vb, metadata !"eq", %m, i32 %evl) @@ -81,7 +81,7 @@ define @icmp_eq_vv_nxv64i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_eq_vv_nxv64i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv64i1( %va, %vb, metadata !"eq", %m, i32 %evl) @@ -91,7 +91,7 @@ define @icmp_ne_vv_nxv1i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_ne_vv_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv1i1( %va, %vb, metadata !"ne", %m, i32 %evl) @@ -101,7 +101,7 @@ define @icmp_ne_vv_nxv2i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_ne_vv_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv2i1( %va, %vb, metadata !"ne", %m, i32 %evl) @@ -111,7 +111,7 @@ define @icmp_ne_vv_nxv4i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_ne_vv_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv4i1( %va, %vb, metadata !"ne", %m, i32 %evl) @@ -121,7 +121,7 @@ define @icmp_ne_vv_nxv8i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_ne_vv_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv8i1( %va, %vb, metadata !"ne", %m, i32 %evl) @@ -131,7 +131,7 @@ define @icmp_ne_vv_nxv16i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_ne_vv_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv16i1( %va, %vb, metadata !"ne", %m, i32 %evl) @@ -141,7 +141,7 @@ define @icmp_ne_vv_nxv32i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_ne_vv_nxv32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv32i1( %va, %vb, metadata !"ne", %m, i32 %evl) @@ -151,7 +151,7 @@ define @icmp_ne_vv_nxv64i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_ne_vv_nxv64i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv64i1( %va, %vb, metadata !"ne", %m, i32 %evl) @@ -161,7 +161,7 @@ define @icmp_slt_vv_nxv1i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_slt_vv_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv1i1( %va, %vb, metadata !"slt", %m, i32 %evl) @@ -171,7 +171,7 @@ define @icmp_slt_vv_nxv2i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_slt_vv_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv2i1( %va, %vb, metadata !"slt", %m, i32 %evl) @@ -181,7 +181,7 @@ define @icmp_slt_vv_nxv4i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_slt_vv_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv4i1( %va, %vb, metadata !"slt", %m, i32 %evl) @@ -191,7 +191,7 @@ define @icmp_slt_vv_nxv8i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_slt_vv_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv8i1( %va, %vb, metadata !"slt", %m, i32 %evl) @@ -201,7 +201,7 @@ define @icmp_slt_vv_nxv16i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_slt_vv_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv16i1( %va, %vb, metadata !"slt", %m, i32 %evl) @@ -211,7 +211,7 @@ define @icmp_slt_vv_nxv32i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_slt_vv_nxv32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv32i1( %va, %vb, metadata !"slt", %m, i32 %evl) @@ -221,7 +221,7 @@ define @icmp_slt_vv_nxv64i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_slt_vv_nxv64i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv64i1( %va, %vb, metadata !"slt", %m, i32 %evl) @@ -231,7 +231,7 @@ define @icmp_ult_vv_nxv1i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_ult_vv_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmandn.mm v0, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv1i1( %va, %vb, metadata !"ult", %m, i32 %evl) @@ -241,7 +241,7 @@ define @icmp_ult_vv_nxv2i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_ult_vv_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmandn.mm v0, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv2i1( %va, %vb, metadata !"ult", %m, i32 %evl) @@ -251,7 +251,7 @@ define @icmp_ult_vv_nxv4i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_ult_vv_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmandn.mm v0, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv4i1( %va, %vb, metadata !"ult", %m, i32 %evl) @@ -261,7 +261,7 @@ define @icmp_ult_vv_nxv8i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_ult_vv_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmandn.mm v0, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv8i1( %va, %vb, metadata !"ult", %m, i32 %evl) @@ -271,7 +271,7 @@ define @icmp_ult_vv_nxv16i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_ult_vv_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmandn.mm v0, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv16i1( %va, %vb, metadata !"ult", %m, i32 %evl) @@ -281,7 +281,7 @@ define @icmp_ult_vv_nxv32i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_ult_vv_nxv32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmandn.mm v0, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv32i1( %va, %vb, metadata !"ult", %m, i32 %evl) @@ -291,7 +291,7 @@ define @icmp_ult_vv_nxv64i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_ult_vv_nxv64i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmandn.mm v0, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv64i1( %va, %vb, metadata !"ult", %m, i32 %evl) @@ -301,7 +301,7 @@ define @icmp_sgt_vv_nxv1i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sgt_vv_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmandn.mm v0, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv1i1( %va, %vb, metadata !"sgt", %m, i32 %evl) @@ -311,7 +311,7 @@ define @icmp_sgt_vv_nxv2i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sgt_vv_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmandn.mm v0, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv2i1( %va, %vb, metadata !"sgt", %m, i32 %evl) @@ -321,7 +321,7 @@ define @icmp_sgt_vv_nxv4i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sgt_vv_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmandn.mm v0, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv4i1( %va, %vb, metadata !"sgt", %m, i32 %evl) @@ -331,7 +331,7 @@ define @icmp_sgt_vv_nxv8i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sgt_vv_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmandn.mm v0, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv8i1( %va, %vb, metadata !"sgt", %m, i32 %evl) @@ -341,7 +341,7 @@ define @icmp_sgt_vv_nxv16i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sgt_vv_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmandn.mm v0, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv16i1( %va, %vb, metadata !"sgt", %m, i32 %evl) @@ -351,7 +351,7 @@ define @icmp_sgt_vv_nxv32i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sgt_vv_nxv32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmandn.mm v0, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv32i1( %va, %vb, metadata !"sgt", %m, i32 %evl) @@ -361,7 +361,7 @@ define @icmp_sgt_vv_nxv64i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sgt_vv_nxv64i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmandn.mm v0, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv64i1( %va, %vb, metadata !"sgt", %m, i32 %evl) @@ -371,7 +371,7 @@ define @icmp_ugt_vv_nxv1i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_ugt_vv_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv1i1( %va, %vb, metadata !"ugt", %m, i32 %evl) @@ -381,7 +381,7 @@ define @icmp_ugt_vv_nxv2i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_ugt_vv_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv2i1( %va, %vb, metadata !"ugt", %m, i32 %evl) @@ -391,7 +391,7 @@ define @icmp_ugt_vv_nxv4i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_ugt_vv_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv4i1( %va, %vb, metadata !"ugt", %m, i32 %evl) @@ -401,7 +401,7 @@ define @icmp_ugt_vv_nxv8i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_ugt_vv_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv8i1( %va, %vb, metadata !"ugt", %m, i32 %evl) @@ -411,7 +411,7 @@ define @icmp_ugt_vv_nxv16i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_ugt_vv_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv16i1( %va, %vb, metadata !"ugt", %m, i32 %evl) @@ -421,7 +421,7 @@ define @icmp_ugt_vv_nxv32i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_ugt_vv_nxv32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv32i1( %va, %vb, metadata !"ugt", %m, i32 %evl) @@ -431,7 +431,7 @@ define @icmp_ugt_vv_nxv64i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_ugt_vv_nxv64i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv64i1( %va, %vb, metadata !"ugt", %m, i32 %evl) @@ -441,7 +441,7 @@ define @icmp_sle_vv_nxv1i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sle_vv_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv1i1( %va, %vb, metadata !"sle", %m, i32 %evl) @@ -451,7 +451,7 @@ define @icmp_sle_vv_nxv2i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sle_vv_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv2i1( %va, %vb, metadata !"sle", %m, i32 %evl) @@ -461,7 +461,7 @@ define @icmp_sle_vv_nxv4i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sle_vv_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv4i1( %va, %vb, metadata !"sle", %m, i32 %evl) @@ -471,7 +471,7 @@ define @icmp_sle_vv_nxv8i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sle_vv_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv8i1( %va, %vb, metadata !"sle", %m, i32 %evl) @@ -481,7 +481,7 @@ define @icmp_sle_vv_nxv16i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sle_vv_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv16i1( %va, %vb, metadata !"sle", %m, i32 %evl) @@ -491,7 +491,7 @@ define @icmp_sle_vv_nxv32i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sle_vv_nxv32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv32i1( %va, %vb, metadata !"sle", %m, i32 %evl) @@ -501,7 +501,7 @@ define @icmp_sle_vv_nxv64i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sle_vv_nxv64i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv64i1( %va, %vb, metadata !"sle", %m, i32 %evl) @@ -511,7 +511,7 @@ define @icmp_ule_vv_nxv1i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_ule_vv_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv1i1( %va, %vb, metadata !"ule", %m, i32 %evl) @@ -521,7 +521,7 @@ define @icmp_ule_vv_nxv2i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_ule_vv_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv2i1( %va, %vb, metadata !"ule", %m, i32 %evl) @@ -531,7 +531,7 @@ define @icmp_ule_vv_nxv4i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_ule_vv_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv4i1( %va, %vb, metadata !"ule", %m, i32 %evl) @@ -541,7 +541,7 @@ define @icmp_ule_vv_nxv8i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_ule_vv_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv8i1( %va, %vb, metadata !"ule", %m, i32 %evl) @@ -551,7 +551,7 @@ define @icmp_ule_vv_nxv16i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_ule_vv_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv16i1( %va, %vb, metadata !"ule", %m, i32 %evl) @@ -561,7 +561,7 @@ define @icmp_ule_vv_nxv32i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_ule_vv_nxv32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv32i1( %va, %vb, metadata !"ule", %m, i32 %evl) @@ -571,7 +571,7 @@ define @icmp_ule_vv_nxv64i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_ule_vv_nxv64i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv64i1( %va, %vb, metadata !"ule", %m, i32 %evl) @@ -581,7 +581,7 @@ define @icmp_sge_vv_nxv1i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sge_vv_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv1i1( %va, %vb, metadata !"sge", %m, i32 %evl) @@ -591,7 +591,7 @@ define @icmp_sge_vv_nxv2i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sge_vv_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv2i1( %va, %vb, metadata !"sge", %m, i32 %evl) @@ -601,7 +601,7 @@ define @icmp_sge_vv_nxv4i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sge_vv_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv4i1( %va, %vb, metadata !"sge", %m, i32 %evl) @@ -611,7 +611,7 @@ define @icmp_sge_vv_nxv8i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sge_vv_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv8i1( %va, %vb, metadata !"sge", %m, i32 %evl) @@ -621,7 +621,7 @@ define @icmp_sge_vv_nxv16i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sge_vv_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv16i1( %va, %vb, metadata !"sge", %m, i32 %evl) @@ -631,7 +631,7 @@ define @icmp_sge_vv_nxv32i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sge_vv_nxv32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv32i1( %va, %vb, metadata !"sge", %m, i32 %evl) @@ -641,7 +641,7 @@ define @icmp_sge_vv_nxv64i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sge_vv_nxv64i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv64i1( %va, %vb, metadata !"sge", %m, i32 %evl) @@ -651,7 +651,7 @@ define @icmp_uge_vv_nxv1i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_uge_vv_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv1i1( %va, %vb, metadata !"uge", %m, i32 %evl) @@ -661,7 +661,7 @@ define @icmp_uge_vv_nxv2i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_uge_vv_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv2i1( %va, %vb, metadata !"uge", %m, i32 %evl) @@ -671,7 +671,7 @@ define @icmp_uge_vv_nxv4i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_uge_vv_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv4i1( %va, %vb, metadata !"uge", %m, i32 %evl) @@ -681,7 +681,7 @@ define @icmp_uge_vv_nxv8i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_uge_vv_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv8i1( %va, %vb, metadata !"uge", %m, i32 %evl) @@ -691,7 +691,7 @@ define @icmp_uge_vv_nxv16i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_uge_vv_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv16i1( %va, %vb, metadata !"uge", %m, i32 %evl) @@ -701,7 +701,7 @@ define @icmp_uge_vv_nxv32i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_uge_vv_nxv32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv32i1( %va, %vb, metadata !"uge", %m, i32 %evl) @@ -711,7 +711,7 @@ define @icmp_uge_vv_nxv64i1( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_uge_vv_nxv64i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.icmp.nxv64i1( %va, %vb, metadata !"uge", %m, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll @@ -196,7 +196,7 @@ define @icmp_uge_vx_nxv1i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_uge_vx_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v9, v8, v0.t @@ -372,7 +372,7 @@ define @icmp_sge_vx_nxv1i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sge_vx_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmsle.vv v0, v9, v8, v0.t @@ -502,7 +502,7 @@ define @icmp_sle_vx_swap_nxv1i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sle_vx_swap_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmsle.vv v0, v9, v8, v0.t @@ -579,7 +579,7 @@ ; CHECK-LABEL: icmp_eq_vv_nxv8i7: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 127 -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma ; CHECK-NEXT: vand.vx v9, v9, a1 ; CHECK-NEXT: vand.vx v8, v8, a1 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma @@ -593,7 +593,7 @@ ; CHECK-LABEL: icmp_eq_vx_nxv8i7: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 127 -; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a2 ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vand.vx v9, v9, a2 @@ -610,7 +610,7 @@ ; CHECK-LABEL: icmp_eq_vx_swap_nxv8i7: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 127 -; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a2 ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vand.vx v9, v9, a2 @@ -812,7 +812,7 @@ define @icmp_uge_vx_nxv8i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_uge_vx_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v9, v8, v0.t @@ -988,7 +988,7 @@ define @icmp_sge_vx_nxv8i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sge_vx_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmsle.vv v0, v9, v8, v0.t @@ -1118,7 +1118,7 @@ define @icmp_sle_vx_swap_nxv8i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sle_vx_swap_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmsle.vv v0, v9, v8, v0.t @@ -1173,7 +1173,7 @@ ; CHECK-NEXT: add a4, sp, a4 ; CHECK-NEXT: addi a4, a4, 16 ; CHECK-NEXT: vs8r.v v24, (a4) # Unknown-size Folded Spill -; CHECK-NEXT: vsetvli a4, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a4, zero, e8, m8, ta, ma ; CHECK-NEXT: vlm.v v25, (a2) ; CHECK-NEXT: sub a4, a3, a1 ; CHECK-NEXT: vmv1r.v v24, v0 @@ -1235,7 +1235,7 @@ ; CHECK-NEXT: mv a4, a3 ; CHECK-NEXT: .LBB97_2: ; CHECK-NEXT: li a5, 0 -; CHECK-NEXT: vsetvli a6, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a6, zero, e8, m8, ta, ma ; CHECK-NEXT: vlm.v v24, (a1) ; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, ma ; CHECK-NEXT: sub a1, a2, a3 @@ -1266,7 +1266,7 @@ ; CHECK-NEXT: mv a4, a3 ; CHECK-NEXT: .LBB98_2: ; CHECK-NEXT: li a5, 0 -; CHECK-NEXT: vsetvli a6, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a6, zero, e8, m8, ta, ma ; CHECK-NEXT: vlm.v v24, (a1) ; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, ma ; CHECK-NEXT: sub a1, a2, a3 @@ -1475,7 +1475,7 @@ define @icmp_uge_vx_nxv1i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_uge_vx_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v9, v8, v0.t @@ -1651,7 +1651,7 @@ define @icmp_sge_vx_nxv1i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sge_vx_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmsle.vv v0, v9, v8, v0.t @@ -1781,7 +1781,7 @@ define @icmp_sle_vx_swap_nxv1i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sle_vx_swap_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmsle.vv v0, v9, v8, v0.t @@ -2021,7 +2021,7 @@ define @icmp_uge_vx_nxv8i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_uge_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmsleu.vv v12, v16, v8, v0.t @@ -2212,7 +2212,7 @@ define @icmp_sge_vx_nxv8i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sge_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmsle.vv v12, v16, v8, v0.t @@ -2353,7 +2353,7 @@ define @icmp_sle_vx_swap_nxv8i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sle_vx_swap_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmsle.vv v12, v16, v8, v0.t @@ -2407,7 +2407,7 @@ ; CHECK-NEXT: li a4, 0 ; CHECK-NEXT: csrr a3, vlenb ; CHECK-NEXT: srli a1, a3, 2 -; CHECK-NEXT: vsetvli a5, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a5, zero, e8, mf2, ta, ma ; CHECK-NEXT: slli a5, a3, 3 ; CHECK-NEXT: add a5, a0, a5 ; CHECK-NEXT: vl8re32.v v24, (a5) @@ -2431,7 +2431,7 @@ ; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vmseq.vv v16, v24, v8, v0.t ; CHECK-NEXT: add a0, a1, a1 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vx v16, v2, a1 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: csrr a0, vlenb @@ -2450,7 +2450,7 @@ ; CHECK-NEXT: li a4, 0 ; CHECK-NEXT: csrr a3, vlenb ; CHECK-NEXT: srli a2, a3, 2 -; CHECK-NEXT: vsetvli a5, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a5, zero, e8, mf2, ta, ma ; CHECK-NEXT: slli a3, a3, 1 ; CHECK-NEXT: sub a5, a1, a3 ; CHECK-NEXT: vslidedown.vx v0, v0, a2 @@ -2468,7 +2468,7 @@ ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vmseq.vx v16, v8, a0, v0.t ; CHECK-NEXT: add a0, a2, a2 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vx v16, v25, a2 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: ret @@ -2485,7 +2485,7 @@ ; CHECK-NEXT: li a4, 0 ; CHECK-NEXT: csrr a3, vlenb ; CHECK-NEXT: srli a2, a3, 2 -; CHECK-NEXT: vsetvli a5, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a5, zero, e8, mf2, ta, ma ; CHECK-NEXT: slli a3, a3, 1 ; CHECK-NEXT: sub a5, a1, a3 ; CHECK-NEXT: vslidedown.vx v0, v0, a2 @@ -2503,7 +2503,7 @@ ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vmseq.vx v16, v8, a0, v0.t ; CHECK-NEXT: add a0, a2, a2 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vx v16, v25, a2 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: ret @@ -2533,7 +2533,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vmseq.vv v0, v8, v9, v0.t @@ -2559,7 +2559,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vmseq.vv v0, v9, v8, v0.t @@ -2619,7 +2619,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vmsne.vv v0, v8, v9, v0.t @@ -2645,7 +2645,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vmsne.vv v0, v9, v8, v0.t @@ -2705,7 +2705,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vmsltu.vv v0, v9, v8, v0.t @@ -2731,7 +2731,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vmsltu.vv v0, v8, v9, v0.t @@ -2791,7 +2791,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vmsleu.vv v0, v9, v8, v0.t @@ -2800,7 +2800,7 @@ ; ; RV64-LABEL: icmp_uge_vx_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m1, ta, ma ; RV64-NEXT: vmv.v.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vmsleu.vv v0, v9, v8, v0.t @@ -2819,7 +2819,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vmsleu.vv v0, v8, v9, v0.t @@ -2879,7 +2879,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vmsltu.vv v0, v8, v9, v0.t @@ -2905,7 +2905,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vmsltu.vv v0, v9, v8, v0.t @@ -2965,7 +2965,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vmslt.vv v0, v9, v8, v0.t @@ -2991,7 +2991,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vmslt.vv v0, v8, v9, v0.t @@ -3051,7 +3051,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vmsle.vv v0, v9, v8, v0.t @@ -3060,7 +3060,7 @@ ; ; RV64-LABEL: icmp_sge_vx_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m1, ta, ma ; RV64-NEXT: vmv.v.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vmsle.vv v0, v9, v8, v0.t @@ -3079,7 +3079,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vmsle.vv v0, v8, v9, v0.t @@ -3139,7 +3139,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vmslt.vv v0, v8, v9, v0.t @@ -3165,7 +3165,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vmslt.vv v0, v9, v8, v0.t @@ -3225,7 +3225,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vmsle.vv v0, v8, v9, v0.t @@ -3251,7 +3251,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vmsle.vv v0, v9, v8, v0.t @@ -3260,7 +3260,7 @@ ; ; RV64-LABEL: icmp_sle_vx_swap_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m1, ta, ma ; RV64-NEXT: vmv.v.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vmsle.vv v0, v9, v8, v0.t @@ -3316,7 +3316,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vmseq.vv v16, v8, v24, v0.t @@ -3344,7 +3344,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vmseq.vv v16, v24, v8, v0.t @@ -3409,7 +3409,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vmsne.vv v16, v8, v24, v0.t @@ -3437,7 +3437,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vmsne.vv v16, v24, v8, v0.t @@ -3502,7 +3502,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vmsltu.vv v16, v24, v8, v0.t @@ -3530,7 +3530,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vmsltu.vv v16, v8, v24, v0.t @@ -3595,7 +3595,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vmsleu.vv v16, v24, v8, v0.t @@ -3605,7 +3605,7 @@ ; ; RV64-LABEL: icmp_uge_vx_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vmv.v.x v24, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vmsleu.vv v16, v24, v8, v0.t @@ -3625,7 +3625,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vmsleu.vv v16, v8, v24, v0.t @@ -3690,7 +3690,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vmsltu.vv v16, v8, v24, v0.t @@ -3718,7 +3718,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vmsltu.vv v16, v24, v8, v0.t @@ -3783,7 +3783,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vmslt.vv v16, v24, v8, v0.t @@ -3811,7 +3811,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vmslt.vv v16, v8, v24, v0.t @@ -3876,7 +3876,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vmsle.vv v16, v24, v8, v0.t @@ -3886,7 +3886,7 @@ ; ; RV64-LABEL: icmp_sge_vx_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vmv.v.x v24, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vmsle.vv v16, v24, v8, v0.t @@ -3906,7 +3906,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vmsle.vv v16, v8, v24, v0.t @@ -3971,7 +3971,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vmslt.vv v16, v8, v24, v0.t @@ -3999,7 +3999,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vmslt.vv v16, v24, v8, v0.t @@ -4064,7 +4064,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vmsle.vv v16, v8, v24, v0.t @@ -4092,7 +4092,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vmsle.vv v16, v24, v8, v0.t @@ -4102,7 +4102,7 @@ ; ; RV64-LABEL: icmp_sle_vx_swap_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vmv.v.x v24, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vmsle.vv v16, v24, v8, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-integer.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-integer.ll --- a/llvm/test/CodeGen/RISCV/rvv/setcc-integer.ll +++ b/llvm/test/CodeGen/RISCV/rvv/setcc-integer.ll @@ -7,7 +7,7 @@ define @icmp_eq_vv_nxv3i8( %va, %vb) { ; CHECK-LABEL: icmp_eq_vv_nxv3i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmseq.vv v0, v8, v9 ; CHECK-NEXT: ret %vc = icmp eq %va, %vb @@ -17,7 +17,7 @@ define @icmp_eq_vx_nxv3i8( %va, i8 %b) { ; CHECK-LABEL: icmp_eq_vx_nxv3i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmseq.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -29,7 +29,7 @@ define @icmp_eq_xv_nxv3i8( %va, i8 %b) { ; CHECK-LABEL: icmp_eq_xv_nxv3i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmseq.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -41,7 +41,7 @@ define @icmp_eq_vv_nxv8i8( %va, %vb) { ; CHECK-LABEL: icmp_eq_vv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmseq.vv v0, v8, v9 ; CHECK-NEXT: ret %vc = icmp eq %va, %vb @@ -51,7 +51,7 @@ define @icmp_eq_vx_nxv8i8( %va, i8 %b) { ; CHECK-LABEL: icmp_eq_vx_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmseq.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -63,7 +63,7 @@ define @icmp_eq_xv_nxv8i8( %va, i8 %b) { ; CHECK-LABEL: icmp_eq_xv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmseq.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -75,7 +75,7 @@ define @icmp_eq_vi_nxv8i8_0( %va) { ; CHECK-LABEL: icmp_eq_vi_nxv8i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 0 ; CHECK-NEXT: ret %head = insertelement poison, i8 0, i32 0 @@ -87,7 +87,7 @@ define @icmp_eq_vi_nxv8i8_1( %va) { ; CHECK-LABEL: icmp_eq_vi_nxv8i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 5 ; CHECK-NEXT: ret %head = insertelement poison, i8 5, i32 0 @@ -99,7 +99,7 @@ define @icmp_eq_iv_nxv8i8_1( %va) { ; CHECK-LABEL: icmp_eq_iv_nxv8i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 5 ; CHECK-NEXT: ret %head = insertelement poison, i8 5, i32 0 @@ -111,7 +111,7 @@ define @icmp_ne_vv_nxv8i8( %va, %vb) { ; CHECK-LABEL: icmp_ne_vv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmsne.vv v0, v8, v9 ; CHECK-NEXT: ret %vc = icmp ne %va, %vb @@ -121,7 +121,7 @@ define @icmp_ne_vx_nxv8i8( %va, i8 %b) { ; CHECK-LABEL: icmp_ne_vx_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmsne.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -133,7 +133,7 @@ define @icmp_ne_xv_nxv8i8( %va, i8 %b) { ; CHECK-LABEL: icmp_ne_xv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmsne.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -145,7 +145,7 @@ define @icmp_ne_vi_nxv8i8_0( %va) { ; CHECK-LABEL: icmp_ne_vi_nxv8i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 5 ; CHECK-NEXT: ret %head = insertelement poison, i8 5, i32 0 @@ -157,7 +157,7 @@ define @icmp_ugt_vv_nxv8i8( %va, %vb) { ; CHECK-LABEL: icmp_ugt_vv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v9, v8 ; CHECK-NEXT: ret %vc = icmp ugt %va, %vb @@ -167,7 +167,7 @@ define @icmp_ugt_vx_nxv8i8( %va, i8 %b) { ; CHECK-LABEL: icmp_ugt_vx_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmsgtu.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -179,7 +179,7 @@ define @icmp_ugt_xv_nxv8i8( %va, i8 %b) { ; CHECK-LABEL: icmp_ugt_xv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -191,7 +191,7 @@ define @icmp_ugt_vi_nxv8i8_0( %va) { ; CHECK-LABEL: icmp_ugt_vi_nxv8i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 5 ; CHECK-NEXT: ret %head = insertelement poison, i8 5, i32 0 @@ -203,7 +203,7 @@ define @icmp_uge_vv_nxv8i8( %va, %vb) { ; CHECK-LABEL: icmp_uge_vv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v9, v8 ; CHECK-NEXT: ret %vc = icmp uge %va, %vb @@ -213,7 +213,7 @@ define @icmp_uge_vx_nxv8i8( %va, i8 %b) { ; CHECK-LABEL: icmp_uge_vx_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsleu.vv v0, v9, v8 ; CHECK-NEXT: ret @@ -226,7 +226,7 @@ define @icmp_uge_xv_nxv8i8( %va, i8 %b) { ; CHECK-LABEL: icmp_uge_xv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmsleu.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -238,7 +238,7 @@ define @icmp_uge_vi_nxv8i8_0( %va) { ; CHECK-LABEL: icmp_uge_vi_nxv8i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, -16 ; CHECK-NEXT: vmsleu.vv v0, v9, v8 ; CHECK-NEXT: ret @@ -251,7 +251,7 @@ define @icmp_uge_vi_nxv8i8_1( %va) { ; CHECK-LABEL: icmp_uge_vi_nxv8i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 14 ; CHECK-NEXT: ret %head = insertelement poison, i8 15, i32 0 @@ -263,7 +263,7 @@ define @icmp_uge_iv_nxv8i8_1( %va) { ; CHECK-LABEL: icmp_uge_iv_nxv8i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement poison, i8 15, i32 0 @@ -275,7 +275,7 @@ define @icmp_uge_vi_nxv8i8_2( %va) { ; CHECK-LABEL: icmp_uge_vi_nxv8i8_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: ret %head = insertelement poison, i8 0, i32 0 @@ -287,7 +287,7 @@ define @icmp_uge_vi_nxv8i8_3( %va) { ; CHECK-LABEL: icmp_uge_vi_nxv8i8_3: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 0 ; CHECK-NEXT: ret %head = insertelement poison, i8 1, i32 0 @@ -299,7 +299,7 @@ define @icmp_uge_vi_nxv8i8_4( %va) { ; CHECK-LABEL: icmp_uge_vi_nxv8i8_4: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, -16 ; CHECK-NEXT: ret %head = insertelement poison, i8 -15, i32 0 @@ -311,7 +311,7 @@ define @icmp_uge_vi_nxv8i8_5( %va) { ; CHECK-LABEL: icmp_uge_vi_nxv8i8_5: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement poison, i8 16, i32 0 @@ -324,9 +324,9 @@ define @icmp_uge_vi_nxv8i8_6( %va, iXLen %vl) { ; CHECK-LABEL: icmp_uge_vi_nxv8i8_6: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v9, v8 ; CHECK-NEXT: ret %splat = call @llvm.riscv.vmv.v.x.nxv8i8.iXLen( undef, i8 0, iXLen %vl) @@ -337,7 +337,7 @@ define @icmp_ult_vv_nxv8i8( %va, %vb) { ; CHECK-LABEL: icmp_ult_vv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v8, v9 ; CHECK-NEXT: ret %vc = icmp ult %va, %vb @@ -347,7 +347,7 @@ define @icmp_ult_vx_nxv8i8( %va, i8 %b) { ; CHECK-LABEL: icmp_ult_vx_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -359,7 +359,7 @@ define @icmp_ult_xv_nxv8i8( %va, i8 %b) { ; CHECK-LABEL: icmp_ult_xv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmsgtu.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -372,7 +372,7 @@ ; CHECK-LABEL: icmp_ult_vi_nxv8i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -16 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 -16, i32 0 @@ -384,7 +384,7 @@ define @icmp_ult_vi_nxv8i8_1( %va) { ; CHECK-LABEL: icmp_ult_vi_nxv8i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, -16 ; CHECK-NEXT: ret %head = insertelement poison, i8 -15, i32 0 @@ -396,7 +396,7 @@ define @icmp_ult_iv_nxv8i8_1( %va) { ; CHECK-LABEL: icmp_ult_iv_nxv8i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, -15 ; CHECK-NEXT: ret %head = insertelement poison, i8 -15, i32 0 @@ -408,7 +408,7 @@ define @icmp_ult_vi_nxv8i8_2( %va) { ; CHECK-LABEL: icmp_ult_vi_nxv8i8_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmclr.m v0 ; CHECK-NEXT: ret %head = insertelement poison, i8 0, i32 0 @@ -420,7 +420,7 @@ define @icmp_ult_vi_nxv8i8_3( %va) { ; CHECK-LABEL: icmp_ult_vi_nxv8i8_3: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 0 ; CHECK-NEXT: ret %head = insertelement poison, i8 1, i32 0 @@ -432,7 +432,7 @@ define @icmp_ult_vi_nxv8i8_4( %va) { ; CHECK-LABEL: icmp_ult_vi_nxv8i8_4: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement poison, i8 16, i32 0 @@ -447,7 +447,7 @@ define @icmp_ult_vi_nxv8i8_5( %va, iXLen %vl) { ; CHECK-LABEL: icmp_ult_vi_nxv8i8_5: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmsltu.vx v0, v8, zero ; CHECK-NEXT: ret %splat = call @llvm.riscv.vmv.v.x.nxv8i8.iXLen( undef, i8 0, iXLen %vl) @@ -458,7 +458,7 @@ define @icmp_ule_vv_nxv8i8( %va, %vb) { ; CHECK-LABEL: icmp_ule_vv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v8, v9 ; CHECK-NEXT: ret %vc = icmp ule %va, %vb @@ -468,7 +468,7 @@ define @icmp_ule_vx_nxv8i8( %va, i8 %b) { ; CHECK-LABEL: icmp_ule_vx_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmsleu.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -480,7 +480,7 @@ define @icmp_ule_xv_nxv8i8( %va, i8 %b) { ; CHECK-LABEL: icmp_ule_xv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsleu.vv v0, v9, v8 ; CHECK-NEXT: ret @@ -493,7 +493,7 @@ define @icmp_ule_vi_nxv8i8_0( %va) { ; CHECK-LABEL: icmp_ule_vi_nxv8i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 5 ; CHECK-NEXT: ret %head = insertelement poison, i8 5, i32 0 @@ -505,7 +505,7 @@ define @icmp_sgt_vv_nxv8i8( %va, %vb) { ; CHECK-LABEL: icmp_sgt_vv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmslt.vv v0, v9, v8 ; CHECK-NEXT: ret %vc = icmp sgt %va, %vb @@ -515,7 +515,7 @@ define @icmp_sgt_vx_nxv8i8( %va, i8 %b) { ; CHECK-LABEL: icmp_sgt_vx_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmsgt.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -527,7 +527,7 @@ define @icmp_sgt_xv_nxv8i8( %va, i8 %b) { ; CHECK-LABEL: icmp_sgt_xv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -539,7 +539,7 @@ define @icmp_sgt_vi_nxv8i8_0( %va) { ; CHECK-LABEL: icmp_sgt_vi_nxv8i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 5 ; CHECK-NEXT: ret %head = insertelement poison, i8 5, i32 0 @@ -551,7 +551,7 @@ define @icmp_sge_vv_nxv8i8( %va, %vb) { ; CHECK-LABEL: icmp_sge_vv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmsle.vv v0, v9, v8 ; CHECK-NEXT: ret %vc = icmp sge %va, %vb @@ -561,7 +561,7 @@ define @icmp_sge_vx_nxv8i8( %va, i8 %b) { ; CHECK-LABEL: icmp_sge_vx_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsle.vv v0, v9, v8 ; CHECK-NEXT: ret @@ -574,7 +574,7 @@ define @icmp_sge_xv_nxv8i8( %va, i8 %b) { ; CHECK-LABEL: icmp_sge_xv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmsle.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -586,7 +586,7 @@ define @icmp_sge_vi_nxv8i8_0( %va) { ; CHECK-LABEL: icmp_sge_vi_nxv8i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, -16 ; CHECK-NEXT: vmsle.vv v0, v9, v8 ; CHECK-NEXT: ret @@ -599,7 +599,7 @@ define @icmp_sge_vi_nxv8i8_1( %va) { ; CHECK-LABEL: icmp_sge_vi_nxv8i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, -16 ; CHECK-NEXT: ret %head = insertelement poison, i8 -15, i32 0 @@ -611,7 +611,7 @@ define @icmp_sge_iv_nxv8i8_1( %va) { ; CHECK-LABEL: icmp_sge_iv_nxv8i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, -15 ; CHECK-NEXT: ret %head = insertelement poison, i8 -15, i32 0 @@ -623,7 +623,7 @@ define @icmp_sge_vi_nxv8i8_2( %va) { ; CHECK-LABEL: icmp_sge_vi_nxv8i8_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, -1 ; CHECK-NEXT: ret %head = insertelement poison, i8 0, i32 0 @@ -635,7 +635,7 @@ define @icmp_sge_vi_nxv8i8_3( %va) { ; CHECK-LABEL: icmp_sge_vi_nxv8i8_3: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement poison, i8 16, i32 0 @@ -647,7 +647,7 @@ define @icmp_slt_vv_nxv8i8( %va, %vb) { ; CHECK-LABEL: icmp_slt_vv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmslt.vv v0, v8, v9 ; CHECK-NEXT: ret %vc = icmp slt %va, %vb @@ -657,7 +657,7 @@ define @icmp_slt_vx_nxv8i8( %va, i8 %b) { ; CHECK-LABEL: icmp_slt_vx_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -669,7 +669,7 @@ define @icmp_slt_xv_nxv8i8( %va, i8 %b) { ; CHECK-LABEL: icmp_slt_xv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmsgt.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -682,7 +682,7 @@ ; CHECK-LABEL: icmp_slt_vi_nxv8i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -16 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 -16, i32 0 @@ -694,7 +694,7 @@ define @icmp_slt_vi_nxv8i8_1( %va) { ; CHECK-LABEL: icmp_slt_vi_nxv8i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, -16 ; CHECK-NEXT: ret %head = insertelement poison, i8 -15, i32 0 @@ -706,7 +706,7 @@ define @icmp_slt_iv_nxv8i8_1( %va) { ; CHECK-LABEL: icmp_slt_iv_nxv8i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, -15 ; CHECK-NEXT: ret %head = insertelement poison, i8 -15, i32 0 @@ -718,7 +718,7 @@ define @icmp_slt_vi_nxv8i8_2( %va) { ; CHECK-LABEL: icmp_slt_vi_nxv8i8_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmslt.vx v0, v8, zero ; CHECK-NEXT: ret %head = insertelement poison, i8 0, i32 0 @@ -730,7 +730,7 @@ define @icmp_slt_vi_nxv8i8_3( %va) { ; CHECK-LABEL: icmp_slt_vi_nxv8i8_3: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement poison, i8 16, i32 0 @@ -742,7 +742,7 @@ define @icmp_sle_vv_nxv8i8( %va, %vb) { ; CHECK-LABEL: icmp_sle_vv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmsle.vv v0, v8, v9 ; CHECK-NEXT: ret %vc = icmp sle %va, %vb @@ -752,7 +752,7 @@ define @icmp_sle_vx_nxv8i8( %va, i8 %b) { ; CHECK-LABEL: icmp_sle_vx_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmsle.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -764,7 +764,7 @@ define @icmp_sle_xv_nxv8i8( %va, i8 %b) { ; CHECK-LABEL: icmp_sle_xv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsle.vv v0, v9, v8 ; CHECK-NEXT: ret @@ -777,7 +777,7 @@ define @icmp_sle_vi_nxv8i8_0( %va) { ; CHECK-LABEL: icmp_sle_vi_nxv8i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 5 ; CHECK-NEXT: ret %head = insertelement poison, i8 5, i32 0 @@ -789,7 +789,7 @@ define @icmp_eq_vv_nxv8i16( %va, %vb) { ; CHECK-LABEL: icmp_eq_vv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmseq.vv v0, v8, v10 ; CHECK-NEXT: ret %vc = icmp eq %va, %vb @@ -799,7 +799,7 @@ define @icmp_eq_vx_nxv8i16( %va, i16 %b) { ; CHECK-LABEL: icmp_eq_vx_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vmseq.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -811,7 +811,7 @@ define @icmp_eq_xv_nxv8i16( %va, i16 %b) { ; CHECK-LABEL: icmp_eq_xv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vmseq.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -823,7 +823,7 @@ define @icmp_eq_vi_nxv8i16_0( %va) { ; CHECK-LABEL: icmp_eq_vi_nxv8i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 0 ; CHECK-NEXT: ret %head = insertelement poison, i16 0, i32 0 @@ -835,7 +835,7 @@ define @icmp_eq_vi_nxv8i16_1( %va) { ; CHECK-LABEL: icmp_eq_vi_nxv8i16_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 5 ; CHECK-NEXT: ret %head = insertelement poison, i16 5, i32 0 @@ -847,7 +847,7 @@ define @icmp_eq_iv_nxv8i16_1( %va) { ; CHECK-LABEL: icmp_eq_iv_nxv8i16_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 5 ; CHECK-NEXT: ret %head = insertelement poison, i16 5, i32 0 @@ -859,7 +859,7 @@ define @icmp_ne_vv_nxv8i16( %va, %vb) { ; CHECK-LABEL: icmp_ne_vv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmsne.vv v0, v8, v10 ; CHECK-NEXT: ret %vc = icmp ne %va, %vb @@ -869,7 +869,7 @@ define @icmp_ne_vx_nxv8i16( %va, i16 %b) { ; CHECK-LABEL: icmp_ne_vx_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vmsne.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -881,7 +881,7 @@ define @icmp_ne_xv_nxv8i16( %va, i16 %b) { ; CHECK-LABEL: icmp_ne_xv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vmsne.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -893,7 +893,7 @@ define @icmp_ne_vi_nxv8i16_0( %va) { ; CHECK-LABEL: icmp_ne_vi_nxv8i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 5 ; CHECK-NEXT: ret %head = insertelement poison, i16 5, i32 0 @@ -905,7 +905,7 @@ define @icmp_ugt_vv_nxv8i16( %va, %vb) { ; CHECK-LABEL: icmp_ugt_vv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v10, v8 ; CHECK-NEXT: ret %vc = icmp ugt %va, %vb @@ -915,7 +915,7 @@ define @icmp_ugt_vx_nxv8i16( %va, i16 %b) { ; CHECK-LABEL: icmp_ugt_vx_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vmsgtu.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -927,7 +927,7 @@ define @icmp_ugt_xv_nxv8i16( %va, i16 %b) { ; CHECK-LABEL: icmp_ugt_xv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -939,7 +939,7 @@ define @icmp_ugt_vi_nxv8i16_0( %va) { ; CHECK-LABEL: icmp_ugt_vi_nxv8i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 5 ; CHECK-NEXT: ret %head = insertelement poison, i16 5, i32 0 @@ -951,7 +951,7 @@ define @icmp_uge_vv_nxv8i16( %va, %vb) { ; CHECK-LABEL: icmp_uge_vv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v10, v8 ; CHECK-NEXT: ret %vc = icmp uge %va, %vb @@ -961,7 +961,7 @@ define @icmp_uge_vx_nxv8i16( %va, i16 %b) { ; CHECK-LABEL: icmp_uge_vx_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsleu.vv v0, v10, v8 ; CHECK-NEXT: ret @@ -974,7 +974,7 @@ define @icmp_uge_xv_nxv8i16( %va, i16 %b) { ; CHECK-LABEL: icmp_uge_xv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vmsleu.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -986,7 +986,7 @@ define @icmp_uge_vi_nxv8i16_0( %va) { ; CHECK-LABEL: icmp_uge_vi_nxv8i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmv.v.i v10, -16 ; CHECK-NEXT: vmsleu.vv v0, v10, v8 ; CHECK-NEXT: ret @@ -999,7 +999,7 @@ define @icmp_uge_vi_nxv8i16_1( %va) { ; CHECK-LABEL: icmp_uge_vi_nxv8i16_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 14 ; CHECK-NEXT: ret %head = insertelement poison, i16 15, i32 0 @@ -1011,7 +1011,7 @@ define @icmp_uge_iv_nxv8i16_1( %va) { ; CHECK-LABEL: icmp_uge_iv_nxv8i16_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement poison, i16 15, i32 0 @@ -1023,7 +1023,7 @@ define @icmp_uge_vi_nxv8i16_2( %va) { ; CHECK-LABEL: icmp_uge_vi_nxv8i16_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: ret %head = insertelement poison, i16 0, i32 0 @@ -1035,7 +1035,7 @@ define @icmp_uge_vi_nxv8i16_3( %va) { ; CHECK-LABEL: icmp_uge_vi_nxv8i16_3: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 0 ; CHECK-NEXT: ret %head = insertelement poison, i16 1, i32 0 @@ -1047,7 +1047,7 @@ define @icmp_uge_vi_nxv8i16_4( %va) { ; CHECK-LABEL: icmp_uge_vi_nxv8i16_4: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, -16 ; CHECK-NEXT: ret %head = insertelement poison, i16 -15, i32 0 @@ -1059,7 +1059,7 @@ define @icmp_uge_vi_nxv8i16_5( %va) { ; CHECK-LABEL: icmp_uge_vi_nxv8i16_5: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement poison, i16 16, i32 0 @@ -1071,7 +1071,7 @@ define @icmp_ult_vv_nxv8i16( %va, %vb) { ; CHECK-LABEL: icmp_ult_vv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v8, v10 ; CHECK-NEXT: ret %vc = icmp ult %va, %vb @@ -1081,7 +1081,7 @@ define @icmp_ult_vx_nxv8i16( %va, i16 %b) { ; CHECK-LABEL: icmp_ult_vx_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -1093,7 +1093,7 @@ define @icmp_ult_xv_nxv8i16( %va, i16 %b) { ; CHECK-LABEL: icmp_ult_xv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vmsgtu.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -1106,7 +1106,7 @@ ; CHECK-LABEL: icmp_ult_vi_nxv8i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -16 -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 -16, i32 0 @@ -1118,7 +1118,7 @@ define @icmp_ult_vi_nxv8i16_1( %va) { ; CHECK-LABEL: icmp_ult_vi_nxv8i16_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, -16 ; CHECK-NEXT: ret %head = insertelement poison, i16 -15, i32 0 @@ -1130,7 +1130,7 @@ define @icmp_ult_iv_nxv8i16_1( %va) { ; CHECK-LABEL: icmp_ult_iv_nxv8i16_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, -15 ; CHECK-NEXT: ret %head = insertelement poison, i16 -15, i32 0 @@ -1142,7 +1142,7 @@ define @icmp_ult_vi_nxv8i16_2( %va) { ; CHECK-LABEL: icmp_ult_vi_nxv8i16_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmclr.m v0 ; CHECK-NEXT: ret %head = insertelement poison, i16 0, i32 0 @@ -1154,7 +1154,7 @@ define @icmp_ult_vi_nxv8i16_3( %va) { ; CHECK-LABEL: icmp_ult_vi_nxv8i16_3: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 0 ; CHECK-NEXT: ret %head = insertelement poison, i16 1, i32 0 @@ -1166,7 +1166,7 @@ define @icmp_ult_vi_nxv8i16_4( %va) { ; CHECK-LABEL: icmp_ult_vi_nxv8i16_4: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement poison, i16 16, i32 0 @@ -1178,7 +1178,7 @@ define @icmp_ule_vv_nxv8i16( %va, %vb) { ; CHECK-LABEL: icmp_ule_vv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v8, v10 ; CHECK-NEXT: ret %vc = icmp ule %va, %vb @@ -1188,7 +1188,7 @@ define @icmp_ule_vx_nxv8i16( %va, i16 %b) { ; CHECK-LABEL: icmp_ule_vx_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vmsleu.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -1200,7 +1200,7 @@ define @icmp_ule_xv_nxv8i16( %va, i16 %b) { ; CHECK-LABEL: icmp_ule_xv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsleu.vv v0, v10, v8 ; CHECK-NEXT: ret @@ -1213,7 +1213,7 @@ define @icmp_ule_vi_nxv8i16_0( %va) { ; CHECK-LABEL: icmp_ule_vi_nxv8i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 5 ; CHECK-NEXT: ret %head = insertelement poison, i16 5, i32 0 @@ -1225,7 +1225,7 @@ define @icmp_sgt_vv_nxv8i16( %va, %vb) { ; CHECK-LABEL: icmp_sgt_vv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmslt.vv v0, v10, v8 ; CHECK-NEXT: ret %vc = icmp sgt %va, %vb @@ -1235,7 +1235,7 @@ define @icmp_sgt_vx_nxv8i16( %va, i16 %b) { ; CHECK-LABEL: icmp_sgt_vx_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vmsgt.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -1247,7 +1247,7 @@ define @icmp_sgt_xv_nxv8i16( %va, i16 %b) { ; CHECK-LABEL: icmp_sgt_xv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -1259,7 +1259,7 @@ define @icmp_sgt_vi_nxv8i16_0( %va) { ; CHECK-LABEL: icmp_sgt_vi_nxv8i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 5 ; CHECK-NEXT: ret %head = insertelement poison, i16 5, i32 0 @@ -1271,7 +1271,7 @@ define @icmp_sge_vv_nxv8i16( %va, %vb) { ; CHECK-LABEL: icmp_sge_vv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmsle.vv v0, v10, v8 ; CHECK-NEXT: ret %vc = icmp sge %va, %vb @@ -1281,7 +1281,7 @@ define @icmp_sge_vx_nxv8i16( %va, i16 %b) { ; CHECK-LABEL: icmp_sge_vx_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsle.vv v0, v10, v8 ; CHECK-NEXT: ret @@ -1294,7 +1294,7 @@ define @icmp_sge_xv_nxv8i16( %va, i16 %b) { ; CHECK-LABEL: icmp_sge_xv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vmsle.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -1306,7 +1306,7 @@ define @icmp_sge_vi_nxv8i16_0( %va) { ; CHECK-LABEL: icmp_sge_vi_nxv8i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmv.v.i v10, -16 ; CHECK-NEXT: vmsle.vv v0, v10, v8 ; CHECK-NEXT: ret @@ -1319,7 +1319,7 @@ define @icmp_sge_vi_nxv8i16_1( %va) { ; CHECK-LABEL: icmp_sge_vi_nxv8i16_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, -16 ; CHECK-NEXT: ret %head = insertelement poison, i16 -15, i32 0 @@ -1331,7 +1331,7 @@ define @icmp_sge_iv_nxv8i16_1( %va) { ; CHECK-LABEL: icmp_sge_iv_nxv8i16_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, -15 ; CHECK-NEXT: ret %head = insertelement poison, i16 -15, i32 0 @@ -1343,7 +1343,7 @@ define @icmp_sge_vi_nxv8i16_2( %va) { ; CHECK-LABEL: icmp_sge_vi_nxv8i16_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, -1 ; CHECK-NEXT: ret %head = insertelement poison, i16 0, i32 0 @@ -1355,7 +1355,7 @@ define @icmp_sge_vi_nxv8i16_3( %va) { ; CHECK-LABEL: icmp_sge_vi_nxv8i16_3: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement poison, i16 16, i32 0 @@ -1367,7 +1367,7 @@ define @icmp_slt_vv_nxv8i16( %va, %vb) { ; CHECK-LABEL: icmp_slt_vv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmslt.vv v0, v8, v10 ; CHECK-NEXT: ret %vc = icmp slt %va, %vb @@ -1377,7 +1377,7 @@ define @icmp_slt_vx_nxv8i16( %va, i16 %b) { ; CHECK-LABEL: icmp_slt_vx_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -1389,7 +1389,7 @@ define @icmp_slt_xv_nxv8i16( %va, i16 %b) { ; CHECK-LABEL: icmp_slt_xv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vmsgt.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -1402,7 +1402,7 @@ ; CHECK-LABEL: icmp_slt_vi_nxv8i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -16 -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 -16, i32 0 @@ -1414,7 +1414,7 @@ define @icmp_slt_vi_nxv8i16_1( %va) { ; CHECK-LABEL: icmp_slt_vi_nxv8i16_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, -16 ; CHECK-NEXT: ret %head = insertelement poison, i16 -15, i32 0 @@ -1426,7 +1426,7 @@ define @icmp_slt_iv_nxv8i16_1( %va) { ; CHECK-LABEL: icmp_slt_iv_nxv8i16_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, -15 ; CHECK-NEXT: ret %head = insertelement poison, i16 -15, i32 0 @@ -1438,7 +1438,7 @@ define @icmp_slt_vi_nxv8i16_2( %va) { ; CHECK-LABEL: icmp_slt_vi_nxv8i16_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmslt.vx v0, v8, zero ; CHECK-NEXT: ret %head = insertelement poison, i16 0, i32 0 @@ -1450,7 +1450,7 @@ define @icmp_slt_vi_nxv8i16_3( %va) { ; CHECK-LABEL: icmp_slt_vi_nxv8i16_3: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement poison, i16 16, i32 0 @@ -1462,7 +1462,7 @@ define @icmp_sle_vv_nxv8i16( %va, %vb) { ; CHECK-LABEL: icmp_sle_vv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmsle.vv v0, v8, v10 ; CHECK-NEXT: ret %vc = icmp sle %va, %vb @@ -1472,7 +1472,7 @@ define @icmp_sle_vx_nxv8i16( %va, i16 %b) { ; CHECK-LABEL: icmp_sle_vx_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vmsle.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -1484,7 +1484,7 @@ define @icmp_sle_xv_nxv8i16( %va, i16 %b) { ; CHECK-LABEL: icmp_sle_xv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vmsle.vv v0, v10, v8 ; CHECK-NEXT: ret @@ -1497,7 +1497,7 @@ define @icmp_sle_vi_nxv8i16_0( %va) { ; CHECK-LABEL: icmp_sle_vi_nxv8i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 5 ; CHECK-NEXT: ret %head = insertelement poison, i16 5, i32 0 @@ -1509,7 +1509,7 @@ define @icmp_eq_vv_nxv8i32( %va, %vb) { ; CHECK-LABEL: icmp_eq_vv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmseq.vv v0, v8, v12 ; CHECK-NEXT: ret %vc = icmp eq %va, %vb @@ -1519,7 +1519,7 @@ define @icmp_eq_vx_nxv8i32( %va, i32 %b) { ; CHECK-LABEL: icmp_eq_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vmseq.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -1531,7 +1531,7 @@ define @icmp_eq_xv_nxv8i32( %va, i32 %b) { ; CHECK-LABEL: icmp_eq_xv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vmseq.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -1543,7 +1543,7 @@ define @icmp_eq_vi_nxv8i32_0( %va) { ; CHECK-LABEL: icmp_eq_vi_nxv8i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 0 ; CHECK-NEXT: ret %head = insertelement poison, i32 0, i32 0 @@ -1555,7 +1555,7 @@ define @icmp_eq_vi_nxv8i32_1( %va) { ; CHECK-LABEL: icmp_eq_vi_nxv8i32_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 5 ; CHECK-NEXT: ret %head = insertelement poison, i32 5, i32 0 @@ -1567,7 +1567,7 @@ define @icmp_eq_iv_nxv8i32_1( %va) { ; CHECK-LABEL: icmp_eq_iv_nxv8i32_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 5 ; CHECK-NEXT: ret %head = insertelement poison, i32 5, i32 0 @@ -1579,7 +1579,7 @@ define @icmp_ne_vv_nxv8i32( %va, %vb) { ; CHECK-LABEL: icmp_ne_vv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmsne.vv v0, v8, v12 ; CHECK-NEXT: ret %vc = icmp ne %va, %vb @@ -1589,7 +1589,7 @@ define @icmp_ne_vx_nxv8i32( %va, i32 %b) { ; CHECK-LABEL: icmp_ne_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vmsne.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -1601,7 +1601,7 @@ define @icmp_ne_xv_nxv8i32( %va, i32 %b) { ; CHECK-LABEL: icmp_ne_xv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vmsne.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -1613,7 +1613,7 @@ define @icmp_ne_vi_nxv8i32_0( %va) { ; CHECK-LABEL: icmp_ne_vi_nxv8i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 5 ; CHECK-NEXT: ret %head = insertelement poison, i32 5, i32 0 @@ -1625,7 +1625,7 @@ define @icmp_ugt_vv_nxv8i32( %va, %vb) { ; CHECK-LABEL: icmp_ugt_vv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v12, v8 ; CHECK-NEXT: ret %vc = icmp ugt %va, %vb @@ -1635,7 +1635,7 @@ define @icmp_ugt_vx_nxv8i32( %va, i32 %b) { ; CHECK-LABEL: icmp_ugt_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vmsgtu.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -1647,7 +1647,7 @@ define @icmp_ugt_xv_nxv8i32( %va, i32 %b) { ; CHECK-LABEL: icmp_ugt_xv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -1659,7 +1659,7 @@ define @icmp_ugt_vi_nxv8i32_0( %va) { ; CHECK-LABEL: icmp_ugt_vi_nxv8i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 5 ; CHECK-NEXT: ret %head = insertelement poison, i32 5, i32 0 @@ -1671,7 +1671,7 @@ define @icmp_uge_vv_nxv8i32( %va, %vb) { ; CHECK-LABEL: icmp_uge_vv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v12, v8 ; CHECK-NEXT: ret %vc = icmp uge %va, %vb @@ -1681,7 +1681,7 @@ define @icmp_uge_vx_nxv8i32( %va, i32 %b) { ; CHECK-LABEL: icmp_uge_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsleu.vv v0, v12, v8 ; CHECK-NEXT: ret @@ -1694,7 +1694,7 @@ define @icmp_uge_xv_nxv8i32( %va, i32 %b) { ; CHECK-LABEL: icmp_uge_xv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vmsleu.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -1706,7 +1706,7 @@ define @icmp_uge_vi_nxv8i32_0( %va) { ; CHECK-LABEL: icmp_uge_vi_nxv8i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmv.v.i v12, -16 ; CHECK-NEXT: vmsleu.vv v0, v12, v8 ; CHECK-NEXT: ret @@ -1719,7 +1719,7 @@ define @icmp_uge_vi_nxv8i32_1( %va) { ; CHECK-LABEL: icmp_uge_vi_nxv8i32_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 14 ; CHECK-NEXT: ret %head = insertelement poison, i32 15, i32 0 @@ -1731,7 +1731,7 @@ define @icmp_uge_iv_nxv8i32_1( %va) { ; CHECK-LABEL: icmp_uge_iv_nxv8i32_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement poison, i32 15, i32 0 @@ -1743,7 +1743,7 @@ define @icmp_uge_vi_nxv8i32_2( %va) { ; CHECK-LABEL: icmp_uge_vi_nxv8i32_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: ret %head = insertelement poison, i32 0, i32 0 @@ -1755,7 +1755,7 @@ define @icmp_uge_vi_nxv8i32_3( %va) { ; CHECK-LABEL: icmp_uge_vi_nxv8i32_3: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 0 ; CHECK-NEXT: ret %head = insertelement poison, i32 1, i32 0 @@ -1767,7 +1767,7 @@ define @icmp_uge_vi_nxv8i32_4( %va) { ; CHECK-LABEL: icmp_uge_vi_nxv8i32_4: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, -16 ; CHECK-NEXT: ret %head = insertelement poison, i32 -15, i32 0 @@ -1779,7 +1779,7 @@ define @icmp_uge_vi_nxv8i32_5( %va) { ; CHECK-LABEL: icmp_uge_vi_nxv8i32_5: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement poison, i32 16, i32 0 @@ -1791,7 +1791,7 @@ define @icmp_ult_vv_nxv8i32( %va, %vb) { ; CHECK-LABEL: icmp_ult_vv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v8, v12 ; CHECK-NEXT: ret %vc = icmp ult %va, %vb @@ -1801,7 +1801,7 @@ define @icmp_ult_vx_nxv8i32( %va, i32 %b) { ; CHECK-LABEL: icmp_ult_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -1813,7 +1813,7 @@ define @icmp_ult_xv_nxv8i32( %va, i32 %b) { ; CHECK-LABEL: icmp_ult_xv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vmsgtu.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -1826,7 +1826,7 @@ ; CHECK-LABEL: icmp_ult_vi_nxv8i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -16 -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 -16, i32 0 @@ -1838,7 +1838,7 @@ define @icmp_ult_vi_nxv8i32_1( %va) { ; CHECK-LABEL: icmp_ult_vi_nxv8i32_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, -16 ; CHECK-NEXT: ret %head = insertelement poison, i32 -15, i32 0 @@ -1850,7 +1850,7 @@ define @icmp_ult_iv_nxv8i32_1( %va) { ; CHECK-LABEL: icmp_ult_iv_nxv8i32_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, -15 ; CHECK-NEXT: ret %head = insertelement poison, i32 -15, i32 0 @@ -1862,7 +1862,7 @@ define @icmp_ult_vi_nxv8i32_2( %va) { ; CHECK-LABEL: icmp_ult_vi_nxv8i32_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmclr.m v0 ; CHECK-NEXT: ret %head = insertelement poison, i32 0, i32 0 @@ -1874,7 +1874,7 @@ define @icmp_ult_vi_nxv8i32_3( %va) { ; CHECK-LABEL: icmp_ult_vi_nxv8i32_3: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 0 ; CHECK-NEXT: ret %head = insertelement poison, i32 1, i32 0 @@ -1886,7 +1886,7 @@ define @icmp_ult_vi_nxv8i32_4( %va) { ; CHECK-LABEL: icmp_ult_vi_nxv8i32_4: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement poison, i32 16, i32 0 @@ -1898,7 +1898,7 @@ define @icmp_ule_vv_nxv8i32( %va, %vb) { ; CHECK-LABEL: icmp_ule_vv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v8, v12 ; CHECK-NEXT: ret %vc = icmp ule %va, %vb @@ -1908,7 +1908,7 @@ define @icmp_ule_vx_nxv8i32( %va, i32 %b) { ; CHECK-LABEL: icmp_ule_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vmsleu.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -1920,7 +1920,7 @@ define @icmp_ule_xv_nxv8i32( %va, i32 %b) { ; CHECK-LABEL: icmp_ule_xv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsleu.vv v0, v12, v8 ; CHECK-NEXT: ret @@ -1933,7 +1933,7 @@ define @icmp_ule_vi_nxv8i32_0( %va) { ; CHECK-LABEL: icmp_ule_vi_nxv8i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 5 ; CHECK-NEXT: ret %head = insertelement poison, i32 5, i32 0 @@ -1945,7 +1945,7 @@ define @icmp_sgt_vv_nxv8i32( %va, %vb) { ; CHECK-LABEL: icmp_sgt_vv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmslt.vv v0, v12, v8 ; CHECK-NEXT: ret %vc = icmp sgt %va, %vb @@ -1955,7 +1955,7 @@ define @icmp_sgt_vx_nxv8i32( %va, i32 %b) { ; CHECK-LABEL: icmp_sgt_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vmsgt.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -1967,7 +1967,7 @@ define @icmp_sgt_xv_nxv8i32( %va, i32 %b) { ; CHECK-LABEL: icmp_sgt_xv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -1979,7 +1979,7 @@ define @icmp_sgt_vi_nxv8i32_0( %va) { ; CHECK-LABEL: icmp_sgt_vi_nxv8i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 5 ; CHECK-NEXT: ret %head = insertelement poison, i32 5, i32 0 @@ -1991,7 +1991,7 @@ define @icmp_sge_vv_nxv8i32( %va, %vb) { ; CHECK-LABEL: icmp_sge_vv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmsle.vv v0, v12, v8 ; CHECK-NEXT: ret %vc = icmp sge %va, %vb @@ -2001,7 +2001,7 @@ define @icmp_sge_vx_nxv8i32( %va, i32 %b) { ; CHECK-LABEL: icmp_sge_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsle.vv v0, v12, v8 ; CHECK-NEXT: ret @@ -2014,7 +2014,7 @@ define @icmp_sge_xv_nxv8i32( %va, i32 %b) { ; CHECK-LABEL: icmp_sge_xv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vmsle.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -2026,7 +2026,7 @@ define @icmp_sge_vi_nxv8i32_0( %va) { ; CHECK-LABEL: icmp_sge_vi_nxv8i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmv.v.i v12, -16 ; CHECK-NEXT: vmsle.vv v0, v12, v8 ; CHECK-NEXT: ret @@ -2039,7 +2039,7 @@ define @icmp_sge_vi_nxv8i32_1( %va) { ; CHECK-LABEL: icmp_sge_vi_nxv8i32_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, -16 ; CHECK-NEXT: ret %head = insertelement poison, i32 -15, i32 0 @@ -2051,7 +2051,7 @@ define @icmp_sge_iv_nxv8i32_1( %va) { ; CHECK-LABEL: icmp_sge_iv_nxv8i32_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, -15 ; CHECK-NEXT: ret %head = insertelement poison, i32 -15, i32 0 @@ -2063,7 +2063,7 @@ define @icmp_sge_vi_nxv8i32_2( %va) { ; CHECK-LABEL: icmp_sge_vi_nxv8i32_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, -1 ; CHECK-NEXT: ret %head = insertelement poison, i32 0, i32 0 @@ -2075,7 +2075,7 @@ define @icmp_sge_vi_nxv8i32_3( %va) { ; CHECK-LABEL: icmp_sge_vi_nxv8i32_3: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement poison, i32 16, i32 0 @@ -2087,7 +2087,7 @@ define @icmp_slt_vv_nxv8i32( %va, %vb) { ; CHECK-LABEL: icmp_slt_vv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmslt.vv v0, v8, v12 ; CHECK-NEXT: ret %vc = icmp slt %va, %vb @@ -2097,7 +2097,7 @@ define @icmp_slt_vx_nxv8i32( %va, i32 %b) { ; CHECK-LABEL: icmp_slt_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -2109,7 +2109,7 @@ define @icmp_slt_xv_nxv8i32( %va, i32 %b) { ; CHECK-LABEL: icmp_slt_xv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vmsgt.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -2122,7 +2122,7 @@ ; CHECK-LABEL: icmp_slt_vi_nxv8i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -16 -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 -16, i32 0 @@ -2134,7 +2134,7 @@ define @icmp_slt_vi_nxv8i32_1( %va) { ; CHECK-LABEL: icmp_slt_vi_nxv8i32_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, -16 ; CHECK-NEXT: ret %head = insertelement poison, i32 -15, i32 0 @@ -2146,7 +2146,7 @@ define @icmp_slt_iv_nxv8i32_1( %va) { ; CHECK-LABEL: icmp_slt_iv_nxv8i32_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, -15 ; CHECK-NEXT: ret %head = insertelement poison, i32 -15, i32 0 @@ -2158,7 +2158,7 @@ define @icmp_slt_vi_nxv8i32_2( %va) { ; CHECK-LABEL: icmp_slt_vi_nxv8i32_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmslt.vx v0, v8, zero ; CHECK-NEXT: ret %head = insertelement poison, i32 0, i32 0 @@ -2170,7 +2170,7 @@ define @icmp_slt_vi_nxv8i32_3( %va) { ; CHECK-LABEL: icmp_slt_vi_nxv8i32_3: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement poison, i32 16, i32 0 @@ -2182,7 +2182,7 @@ define @icmp_sle_vv_nxv8i32( %va, %vb) { ; CHECK-LABEL: icmp_sle_vv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmsle.vv v0, v8, v12 ; CHECK-NEXT: ret %vc = icmp sle %va, %vb @@ -2192,7 +2192,7 @@ define @icmp_sle_vx_nxv8i32( %va, i32 %b) { ; CHECK-LABEL: icmp_sle_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vmsle.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -2204,7 +2204,7 @@ define @icmp_sle_xv_nxv8i32( %va, i32 %b) { ; CHECK-LABEL: icmp_sle_xv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vmsle.vv v0, v12, v8 ; CHECK-NEXT: ret @@ -2217,7 +2217,7 @@ define @icmp_sle_vi_nxv8i32_0( %va) { ; CHECK-LABEL: icmp_sle_vi_nxv8i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 5 ; CHECK-NEXT: ret %head = insertelement poison, i32 5, i32 0 @@ -2229,7 +2229,7 @@ define @icmp_eq_vv_nxv8i64( %va, %vb) { ; CHECK-LABEL: icmp_eq_vv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmseq.vv v0, v8, v16 ; CHECK-NEXT: ret %vc = icmp eq %va, %vb @@ -2244,7 +2244,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmseq.vv v0, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -2252,7 +2252,7 @@ ; ; RV64-LABEL: icmp_eq_vx_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vmseq.vx v0, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -2269,7 +2269,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmseq.vv v0, v16, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -2277,7 +2277,7 @@ ; ; RV64-LABEL: icmp_eq_xv_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vmseq.vx v0, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -2289,7 +2289,7 @@ define @icmp_eq_vi_nxv8i64_0( %va) { ; CHECK-LABEL: icmp_eq_vi_nxv8i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 0 ; CHECK-NEXT: ret %head = insertelement poison, i64 0, i32 0 @@ -2301,7 +2301,7 @@ define @icmp_eq_vi_nxv8i64_1( %va) { ; CHECK-LABEL: icmp_eq_vi_nxv8i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 5 ; CHECK-NEXT: ret %head = insertelement poison, i64 5, i32 0 @@ -2313,7 +2313,7 @@ define @icmp_eq_iv_nxv8i64_1( %va) { ; CHECK-LABEL: icmp_eq_iv_nxv8i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 5 ; CHECK-NEXT: ret %head = insertelement poison, i64 5, i32 0 @@ -2325,7 +2325,7 @@ define @icmp_ne_vv_nxv8i64( %va, %vb) { ; CHECK-LABEL: icmp_ne_vv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmsne.vv v0, v8, v16 ; CHECK-NEXT: ret %vc = icmp ne %va, %vb @@ -2340,7 +2340,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmsne.vv v0, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -2348,7 +2348,7 @@ ; ; RV64-LABEL: icmp_ne_vx_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vmsne.vx v0, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -2365,7 +2365,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmsne.vv v0, v16, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -2373,7 +2373,7 @@ ; ; RV64-LABEL: icmp_ne_xv_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vmsne.vx v0, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -2385,7 +2385,7 @@ define @icmp_ne_vi_nxv8i64_0( %va) { ; CHECK-LABEL: icmp_ne_vi_nxv8i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 5 ; CHECK-NEXT: ret %head = insertelement poison, i64 5, i32 0 @@ -2397,7 +2397,7 @@ define @icmp_ugt_vv_nxv8i64( %va, %vb) { ; CHECK-LABEL: icmp_ugt_vv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v16, v8 ; CHECK-NEXT: ret %vc = icmp ugt %va, %vb @@ -2412,7 +2412,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmsltu.vv v0, v16, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -2420,7 +2420,7 @@ ; ; RV64-LABEL: icmp_ugt_vx_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vmsgtu.vx v0, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -2437,7 +2437,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmsltu.vv v0, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -2445,7 +2445,7 @@ ; ; RV64-LABEL: icmp_ugt_xv_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vmsltu.vx v0, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -2457,7 +2457,7 @@ define @icmp_ugt_vi_nxv8i64_0( %va) { ; CHECK-LABEL: icmp_ugt_vi_nxv8i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 5 ; CHECK-NEXT: ret %head = insertelement poison, i64 5, i32 0 @@ -2469,7 +2469,7 @@ define @icmp_uge_vv_nxv8i64( %va, %vb) { ; CHECK-LABEL: icmp_uge_vv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v16, v8 ; CHECK-NEXT: ret %vc = icmp uge %va, %vb @@ -2484,7 +2484,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmsleu.vv v0, v16, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -2492,7 +2492,7 @@ ; ; RV64-LABEL: icmp_uge_vx_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vmv.v.x v16, a0 ; RV64-NEXT: vmsleu.vv v0, v16, v8 ; RV64-NEXT: ret @@ -2510,7 +2510,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmsleu.vv v0, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -2518,7 +2518,7 @@ ; ; RV64-LABEL: icmp_uge_xv_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vmsleu.vx v0, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -2530,7 +2530,7 @@ define @icmp_uge_vi_nxv8i64_0( %va) { ; CHECK-LABEL: icmp_uge_vi_nxv8i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmv.v.i v16, -16 ; CHECK-NEXT: vmsleu.vv v0, v16, v8 ; CHECK-NEXT: ret @@ -2543,7 +2543,7 @@ define @icmp_uge_vi_nxv8i64_1( %va) { ; CHECK-LABEL: icmp_uge_vi_nxv8i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 14 ; CHECK-NEXT: ret %head = insertelement poison, i64 15, i32 0 @@ -2555,7 +2555,7 @@ define @icmp_uge_iv_nxv8i64_1( %va) { ; CHECK-LABEL: icmp_uge_iv_nxv8i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement poison, i64 15, i32 0 @@ -2567,7 +2567,7 @@ define @icmp_uge_vi_nxv8i64_2( %va) { ; CHECK-LABEL: icmp_uge_vi_nxv8i64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: ret %head = insertelement poison, i64 0, i32 0 @@ -2579,7 +2579,7 @@ define @icmp_uge_vi_nxv8i64_3( %va) { ; CHECK-LABEL: icmp_uge_vi_nxv8i64_3: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 0 ; CHECK-NEXT: ret %head = insertelement poison, i64 1, i32 0 @@ -2591,7 +2591,7 @@ define @icmp_uge_vi_nxv8i64_4( %va) { ; CHECK-LABEL: icmp_uge_vi_nxv8i64_4: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, -16 ; CHECK-NEXT: ret %head = insertelement poison, i64 -15, i32 0 @@ -2603,7 +2603,7 @@ define @icmp_uge_vi_nxv8i64_5( %va) { ; CHECK-LABEL: icmp_uge_vi_nxv8i64_5: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement poison, i64 16, i32 0 @@ -2615,7 +2615,7 @@ define @icmp_ult_vv_nxv8i64( %va, %vb) { ; CHECK-LABEL: icmp_ult_vv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v8, v16 ; CHECK-NEXT: ret %vc = icmp ult %va, %vb @@ -2630,7 +2630,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmsltu.vv v0, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -2638,7 +2638,7 @@ ; ; RV64-LABEL: icmp_ult_vx_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vmsltu.vx v0, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -2655,7 +2655,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmsltu.vv v0, v16, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -2663,7 +2663,7 @@ ; ; RV64-LABEL: icmp_ult_xv_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vmsgtu.vx v0, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -2676,7 +2676,7 @@ ; CHECK-LABEL: icmp_ult_vi_nxv8i64_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -16 -; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 -16, i32 0 @@ -2688,7 +2688,7 @@ define @icmp_ult_vi_nxv8i64_1( %va) { ; CHECK-LABEL: icmp_ult_vi_nxv8i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, -16 ; CHECK-NEXT: ret %head = insertelement poison, i64 -15, i32 0 @@ -2700,7 +2700,7 @@ define @icmp_ult_iv_nxv8i64_1( %va) { ; CHECK-LABEL: icmp_ult_iv_nxv8i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, -15 ; CHECK-NEXT: ret %head = insertelement poison, i64 -15, i32 0 @@ -2712,7 +2712,7 @@ define @icmp_ult_vi_nxv8i64_2( %va) { ; CHECK-LABEL: icmp_ult_vi_nxv8i64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmclr.m v0 ; CHECK-NEXT: ret %head = insertelement poison, i64 0, i32 0 @@ -2724,7 +2724,7 @@ define @icmp_ult_vi_nxv8i64_3( %va) { ; CHECK-LABEL: icmp_ult_vi_nxv8i64_3: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 0 ; CHECK-NEXT: ret %head = insertelement poison, i64 1, i32 0 @@ -2736,7 +2736,7 @@ define @icmp_ult_vi_nxv8i64_4( %va) { ; CHECK-LABEL: icmp_ult_vi_nxv8i64_4: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement poison, i64 16, i32 0 @@ -2748,7 +2748,7 @@ define @icmp_ule_vv_nxv8i64( %va, %vb) { ; CHECK-LABEL: icmp_ule_vv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v8, v16 ; CHECK-NEXT: ret %vc = icmp ule %va, %vb @@ -2763,7 +2763,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmsleu.vv v0, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -2771,7 +2771,7 @@ ; ; RV64-LABEL: icmp_ule_vx_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vmsleu.vx v0, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -2788,7 +2788,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmsleu.vv v0, v16, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -2796,7 +2796,7 @@ ; ; RV64-LABEL: icmp_ule_xv_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vmv.v.x v16, a0 ; RV64-NEXT: vmsleu.vv v0, v16, v8 ; RV64-NEXT: ret @@ -2809,7 +2809,7 @@ define @icmp_ule_vi_nxv8i64_0( %va) { ; CHECK-LABEL: icmp_ule_vi_nxv8i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 5 ; CHECK-NEXT: ret %head = insertelement poison, i64 5, i32 0 @@ -2821,7 +2821,7 @@ define @icmp_sgt_vv_nxv8i64( %va, %vb) { ; CHECK-LABEL: icmp_sgt_vv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmslt.vv v0, v16, v8 ; CHECK-NEXT: ret %vc = icmp sgt %va, %vb @@ -2836,7 +2836,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmslt.vv v0, v16, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -2844,7 +2844,7 @@ ; ; RV64-LABEL: icmp_sgt_vx_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vmsgt.vx v0, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -2861,7 +2861,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmslt.vv v0, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -2869,7 +2869,7 @@ ; ; RV64-LABEL: icmp_sgt_xv_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vmslt.vx v0, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -2881,7 +2881,7 @@ define @icmp_sgt_vi_nxv8i64_0( %va) { ; CHECK-LABEL: icmp_sgt_vi_nxv8i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 5 ; CHECK-NEXT: ret %head = insertelement poison, i64 5, i32 0 @@ -2893,7 +2893,7 @@ define @icmp_sge_vv_nxv8i64( %va, %vb) { ; CHECK-LABEL: icmp_sge_vv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmsle.vv v0, v16, v8 ; CHECK-NEXT: ret %vc = icmp sge %va, %vb @@ -2908,7 +2908,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmsle.vv v0, v16, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -2916,7 +2916,7 @@ ; ; RV64-LABEL: icmp_sge_vx_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vmv.v.x v16, a0 ; RV64-NEXT: vmsle.vv v0, v16, v8 ; RV64-NEXT: ret @@ -2934,7 +2934,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmsle.vv v0, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -2942,7 +2942,7 @@ ; ; RV64-LABEL: icmp_sge_xv_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vmsle.vx v0, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -2954,7 +2954,7 @@ define @icmp_sge_vi_nxv8i64_0( %va) { ; CHECK-LABEL: icmp_sge_vi_nxv8i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmv.v.i v16, -16 ; CHECK-NEXT: vmsle.vv v0, v16, v8 ; CHECK-NEXT: ret @@ -2967,7 +2967,7 @@ define @icmp_sge_vi_nxv8i64_1( %va) { ; CHECK-LABEL: icmp_sge_vi_nxv8i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, -16 ; CHECK-NEXT: ret %head = insertelement poison, i64 -15, i32 0 @@ -2979,7 +2979,7 @@ define @icmp_sge_iv_nxv8i64_1( %va) { ; CHECK-LABEL: icmp_sge_iv_nxv8i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, -15 ; CHECK-NEXT: ret %head = insertelement poison, i64 -15, i32 0 @@ -2991,7 +2991,7 @@ define @icmp_sge_vi_nxv8i64_2( %va) { ; CHECK-LABEL: icmp_sge_vi_nxv8i64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, -1 ; CHECK-NEXT: ret %head = insertelement poison, i64 0, i32 0 @@ -3003,7 +3003,7 @@ define @icmp_sge_vi_nxv8i64_3( %va) { ; CHECK-LABEL: icmp_sge_vi_nxv8i64_3: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement poison, i64 16, i32 0 @@ -3015,7 +3015,7 @@ define @icmp_slt_vv_nxv8i64( %va, %vb) { ; CHECK-LABEL: icmp_slt_vv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmslt.vv v0, v8, v16 ; CHECK-NEXT: ret %vc = icmp slt %va, %vb @@ -3030,7 +3030,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmslt.vv v0, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -3038,7 +3038,7 @@ ; ; RV64-LABEL: icmp_slt_vx_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vmslt.vx v0, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -3055,7 +3055,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmslt.vv v0, v16, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -3063,7 +3063,7 @@ ; ; RV64-LABEL: icmp_slt_xv_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vmsgt.vx v0, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -3076,7 +3076,7 @@ ; CHECK-LABEL: icmp_slt_vi_nxv8i64_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -16 -; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 -16, i32 0 @@ -3088,7 +3088,7 @@ define @icmp_slt_vi_nxv8i64_1( %va) { ; CHECK-LABEL: icmp_slt_vi_nxv8i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, -16 ; CHECK-NEXT: ret %head = insertelement poison, i64 -15, i32 0 @@ -3100,7 +3100,7 @@ define @icmp_slt_iv_nxv8i64_1( %va) { ; CHECK-LABEL: icmp_slt_iv_nxv8i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, -15 ; CHECK-NEXT: ret %head = insertelement poison, i64 -15, i32 0 @@ -3112,7 +3112,7 @@ define @icmp_slt_vi_nxv8i64_2( %va) { ; CHECK-LABEL: icmp_slt_vi_nxv8i64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmslt.vx v0, v8, zero ; CHECK-NEXT: ret %head = insertelement poison, i64 0, i32 0 @@ -3124,7 +3124,7 @@ define @icmp_slt_vi_nxv8i64_3( %va) { ; CHECK-LABEL: icmp_slt_vi_nxv8i64_3: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement poison, i64 16, i32 0 @@ -3136,7 +3136,7 @@ define @icmp_sle_vv_nxv8i64( %va, %vb) { ; CHECK-LABEL: icmp_sle_vv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmsle.vv v0, v8, v16 ; CHECK-NEXT: ret %vc = icmp sle %va, %vb @@ -3151,7 +3151,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmsle.vv v0, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -3159,7 +3159,7 @@ ; ; RV64-LABEL: icmp_sle_vx_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vmsle.vx v0, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -3176,7 +3176,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmsle.vv v0, v16, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -3184,7 +3184,7 @@ ; ; RV64-LABEL: icmp_sle_xv_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vmv.v.x v16, a0 ; RV64-NEXT: vmsle.vv v0, v16, v8 ; RV64-NEXT: ret @@ -3197,7 +3197,7 @@ define @icmp_sle_vi_nxv8i64_0( %va) { ; CHECK-LABEL: icmp_sle_vi_nxv8i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 5 ; CHECK-NEXT: ret %head = insertelement poison, i64 5, i32 0 @@ -3213,7 +3213,7 @@ define @icmp_eq_ii_nxv8i8() { ; CHECK-LABEL: icmp_eq_ii_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmclr.m v0 ; CHECK-NEXT: ret %heada = insertelement poison, i8 5, i32 0 @@ -3232,10 +3232,10 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: add a1, a0, a0 -; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; CHECK-NEXT: vmseq.vi v24, v16, 0 ; CHECK-NEXT: vmseq.vi v0, v8, 0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma ; CHECK-NEXT: vslideup.vx v0, v24, a0 ; CHECK-NEXT: ret %vc = icmp eq %va, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll --- a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll @@ -6,7 +6,7 @@ ; CHECK-LABEL: sink_splat_mul: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a2, 1024 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: .LBB0_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) @@ -42,7 +42,7 @@ ; CHECK-LABEL: sink_splat_add: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a2, 1024 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: .LBB1_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) @@ -78,7 +78,7 @@ ; CHECK-LABEL: sink_splat_sub: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a2, 1024 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: .LBB2_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) @@ -114,7 +114,7 @@ ; CHECK-LABEL: sink_splat_rsub: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a2, 1024 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: .LBB3_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) @@ -150,7 +150,7 @@ ; CHECK-LABEL: sink_splat_and: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a2, 1024 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: .LBB4_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) @@ -186,7 +186,7 @@ ; CHECK-LABEL: sink_splat_or: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a2, 1024 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: .LBB5_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) @@ -222,7 +222,7 @@ ; CHECK-LABEL: sink_splat_xor: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a2, 1024 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: .LBB6_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) @@ -269,7 +269,7 @@ ; CHECK-NEXT: andi a4, a2, 1024 ; CHECK-NEXT: xori a2, a4, 1024 ; CHECK-NEXT: slli a5, a5, 1 -; CHECK-NEXT: vsetvli a6, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a6, zero, e32, m2, ta, ma ; CHECK-NEXT: mv a6, a0 ; CHECK-NEXT: mv a7, a2 ; CHECK-NEXT: .LBB7_3: # %vector.body @@ -362,7 +362,7 @@ ; CHECK-NEXT: andi a4, a2, 1024 ; CHECK-NEXT: xori a2, a4, 1024 ; CHECK-NEXT: slli a5, a5, 1 -; CHECK-NEXT: vsetvli a6, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a6, zero, e32, m2, ta, ma ; CHECK-NEXT: mv a6, a0 ; CHECK-NEXT: mv a7, a2 ; CHECK-NEXT: .LBB8_3: # %vector.body @@ -455,7 +455,7 @@ ; CHECK-NEXT: andi a4, a2, 1024 ; CHECK-NEXT: xori a2, a4, 1024 ; CHECK-NEXT: slli a5, a5, 1 -; CHECK-NEXT: vsetvli a6, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a6, zero, e32, m2, ta, ma ; CHECK-NEXT: mv a6, a0 ; CHECK-NEXT: mv a7, a2 ; CHECK-NEXT: .LBB9_3: # %vector.body @@ -548,7 +548,7 @@ ; CHECK-NEXT: andi a4, a2, 1024 ; CHECK-NEXT: xori a2, a4, 1024 ; CHECK-NEXT: slli a5, a5, 1 -; CHECK-NEXT: vsetvli a6, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a6, zero, e32, m2, ta, ma ; CHECK-NEXT: mv a6, a0 ; CHECK-NEXT: mv a7, a2 ; CHECK-NEXT: .LBB10_3: # %vector.body @@ -641,7 +641,7 @@ ; CHECK-NEXT: andi a4, a2, 1024 ; CHECK-NEXT: xori a2, a4, 1024 ; CHECK-NEXT: slli a5, a5, 1 -; CHECK-NEXT: vsetvli a6, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a6, zero, e32, m2, ta, ma ; CHECK-NEXT: mv a6, a0 ; CHECK-NEXT: mv a7, a2 ; CHECK-NEXT: .LBB11_3: # %vector.body @@ -734,7 +734,7 @@ ; CHECK-NEXT: andi a4, a2, 1024 ; CHECK-NEXT: xori a2, a4, 1024 ; CHECK-NEXT: slli a5, a5, 1 -; CHECK-NEXT: vsetvli a6, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a6, zero, e32, m2, ta, ma ; CHECK-NEXT: mv a6, a0 ; CHECK-NEXT: mv a7, a2 ; CHECK-NEXT: .LBB12_3: # %vector.body @@ -827,7 +827,7 @@ ; CHECK-NEXT: andi a4, a2, 1024 ; CHECK-NEXT: xori a2, a4, 1024 ; CHECK-NEXT: slli a5, a5, 1 -; CHECK-NEXT: vsetvli a6, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a6, zero, e32, m2, ta, ma ; CHECK-NEXT: mv a6, a0 ; CHECK-NEXT: mv a7, a2 ; CHECK-NEXT: .LBB13_3: # %vector.body @@ -909,7 +909,7 @@ ; CHECK-LABEL: sink_splat_shl: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a2, 1024 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: .LBB14_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) @@ -945,7 +945,7 @@ ; CHECK-LABEL: sink_splat_lshr: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a2, 1024 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: .LBB15_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) @@ -981,7 +981,7 @@ ; CHECK-LABEL: sink_splat_ashr: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a2, 1024 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: .LBB16_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) @@ -1028,7 +1028,7 @@ ; CHECK-NEXT: andi a4, a2, 1024 ; CHECK-NEXT: xori a2, a4, 1024 ; CHECK-NEXT: slli a5, a5, 1 -; CHECK-NEXT: vsetvli a6, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a6, zero, e32, m2, ta, ma ; CHECK-NEXT: mv a6, a0 ; CHECK-NEXT: mv a7, a2 ; CHECK-NEXT: .LBB17_3: # %vector.body @@ -1121,7 +1121,7 @@ ; CHECK-NEXT: andi a4, a2, 1024 ; CHECK-NEXT: xori a2, a4, 1024 ; CHECK-NEXT: slli a5, a5, 1 -; CHECK-NEXT: vsetvli a6, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a6, zero, e32, m2, ta, ma ; CHECK-NEXT: mv a6, a0 ; CHECK-NEXT: mv a7, a2 ; CHECK-NEXT: .LBB18_3: # %vector.body @@ -1214,7 +1214,7 @@ ; CHECK-NEXT: andi a3, a1, 1024 ; CHECK-NEXT: xori a1, a3, 1024 ; CHECK-NEXT: slli a4, a4, 1 -; CHECK-NEXT: vsetvli a5, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a5, zero, e32, m2, ta, ma ; CHECK-NEXT: mv a5, a0 ; CHECK-NEXT: mv a6, a1 ; CHECK-NEXT: .LBB19_3: # %vector.body @@ -1296,7 +1296,7 @@ ; CHECK-LABEL: sink_splat_fmul: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a1, 1024 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: .LBB20_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) @@ -1332,7 +1332,7 @@ ; CHECK-LABEL: sink_splat_fdiv: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a1, 1024 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: .LBB21_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) @@ -1368,7 +1368,7 @@ ; CHECK-LABEL: sink_splat_frdiv: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a1, 1024 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: .LBB22_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) @@ -1404,7 +1404,7 @@ ; CHECK-LABEL: sink_splat_fadd: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a1, 1024 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: .LBB23_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) @@ -1440,7 +1440,7 @@ ; CHECK-LABEL: sink_splat_fsub: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a1, 1024 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: .LBB24_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) @@ -1476,7 +1476,7 @@ ; CHECK-LABEL: sink_splat_frsub: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a1, 1024 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: .LBB25_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) @@ -1522,7 +1522,7 @@ ; CHECK-NEXT: addiw a1, a3, -1 ; CHECK-NEXT: andi a4, a1, 1024 ; CHECK-NEXT: xori a1, a4, 1024 -; CHECK-NEXT: vsetvli a5, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a5, zero, e32, m1, ta, ma ; CHECK-NEXT: mv a5, a0 ; CHECK-NEXT: mv a6, a1 ; CHECK-NEXT: .LBB26_3: # %vector.body @@ -1614,7 +1614,7 @@ ; CHECK-NEXT: addiw a1, a3, -1 ; CHECK-NEXT: andi a4, a1, 1024 ; CHECK-NEXT: xori a1, a4, 1024 -; CHECK-NEXT: vsetvli a5, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a5, zero, e32, m1, ta, ma ; CHECK-NEXT: mv a5, a0 ; CHECK-NEXT: mv a6, a1 ; CHECK-NEXT: .LBB27_3: # %vector.body @@ -1706,7 +1706,7 @@ ; CHECK-NEXT: addiw a1, a3, -1 ; CHECK-NEXT: andi a4, a1, 1024 ; CHECK-NEXT: xori a1, a4, 1024 -; CHECK-NEXT: vsetvli a5, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a5, zero, e32, m1, ta, ma ; CHECK-NEXT: mv a5, a0 ; CHECK-NEXT: mv a6, a1 ; CHECK-NEXT: .LBB28_3: # %vector.body @@ -1798,7 +1798,7 @@ ; CHECK-NEXT: addiw a1, a3, -1 ; CHECK-NEXT: andi a4, a1, 1024 ; CHECK-NEXT: xori a1, a4, 1024 -; CHECK-NEXT: vsetvli a5, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a5, zero, e32, m1, ta, ma ; CHECK-NEXT: mv a5, a0 ; CHECK-NEXT: mv a6, a1 ; CHECK-NEXT: .LBB29_3: # %vector.body @@ -1890,7 +1890,7 @@ ; CHECK-NEXT: addiw a1, a3, -1 ; CHECK-NEXT: andi a4, a1, 1024 ; CHECK-NEXT: xori a1, a4, 1024 -; CHECK-NEXT: vsetvli a5, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a5, zero, e32, m1, ta, ma ; CHECK-NEXT: mv a5, a0 ; CHECK-NEXT: mv a6, a1 ; CHECK-NEXT: .LBB30_3: # %vector.body @@ -1982,7 +1982,7 @@ ; CHECK-NEXT: addiw a1, a3, -1 ; CHECK-NEXT: andi a4, a1, 1024 ; CHECK-NEXT: xori a1, a4, 1024 -; CHECK-NEXT: vsetvli a5, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a5, zero, e32, m1, ta, ma ; CHECK-NEXT: mv a5, a0 ; CHECK-NEXT: mv a6, a1 ; CHECK-NEXT: .LBB31_3: # %vector.body @@ -2064,7 +2064,7 @@ ; CHECK-LABEL: sink_splat_fma: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a2, 1024 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: .LBB32_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) @@ -2105,7 +2105,7 @@ ; CHECK-LABEL: sink_splat_fma_commute: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a2, 1024 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: .LBB33_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) @@ -2157,7 +2157,7 @@ ; CHECK-NEXT: addiw a4, a3, -1 ; CHECK-NEXT: andi a5, a4, 1024 ; CHECK-NEXT: xori a4, a5, 1024 -; CHECK-NEXT: vsetvli a7, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a7, zero, e32, m1, ta, ma ; CHECK-NEXT: mv a7, a4 ; CHECK-NEXT: .LBB34_3: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 @@ -2260,7 +2260,7 @@ ; CHECK-NEXT: addiw a4, a3, -1 ; CHECK-NEXT: andi a5, a4, 1024 ; CHECK-NEXT: xori a4, a5, 1024 -; CHECK-NEXT: vsetvli a7, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a7, zero, e32, m1, ta, ma ; CHECK-NEXT: mv a7, a4 ; CHECK-NEXT: .LBB35_3: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 @@ -2357,7 +2357,7 @@ ; CHECK-LABEL: sink_splat_icmp: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a2, 1024 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: .LBB36_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 @@ -2395,7 +2395,7 @@ ; CHECK-LABEL: sink_splat_fcmp: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a1, 1024 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: .LBB37_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 @@ -2433,7 +2433,7 @@ ; CHECK-LABEL: sink_splat_udiv: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a2, 1024 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: .LBB38_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) @@ -2469,7 +2469,7 @@ ; CHECK-LABEL: sink_splat_sdiv: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a2, 1024 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: .LBB39_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) @@ -2505,7 +2505,7 @@ ; CHECK-LABEL: sink_splat_urem: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a2, 1024 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: .LBB40_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) @@ -2541,7 +2541,7 @@ ; CHECK-LABEL: sink_splat_srem: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a2, 1024 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: .LBB41_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) @@ -2588,7 +2588,7 @@ ; CHECK-NEXT: andi a4, a2, 1024 ; CHECK-NEXT: xori a2, a4, 1024 ; CHECK-NEXT: slli a5, a5, 1 -; CHECK-NEXT: vsetvli a6, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a6, zero, e32, m2, ta, ma ; CHECK-NEXT: mv a6, a0 ; CHECK-NEXT: mv a7, a2 ; CHECK-NEXT: .LBB42_3: # %vector.body @@ -2681,7 +2681,7 @@ ; CHECK-NEXT: andi a4, a2, 1024 ; CHECK-NEXT: xori a2, a4, 1024 ; CHECK-NEXT: slli a5, a5, 1 -; CHECK-NEXT: vsetvli a6, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a6, zero, e32, m2, ta, ma ; CHECK-NEXT: mv a6, a0 ; CHECK-NEXT: mv a7, a2 ; CHECK-NEXT: .LBB43_3: # %vector.body @@ -2774,7 +2774,7 @@ ; CHECK-NEXT: andi a4, a2, 1024 ; CHECK-NEXT: xori a2, a4, 1024 ; CHECK-NEXT: slli a5, a5, 1 -; CHECK-NEXT: vsetvli a6, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a6, zero, e32, m2, ta, ma ; CHECK-NEXT: mv a6, a0 ; CHECK-NEXT: mv a7, a2 ; CHECK-NEXT: .LBB44_3: # %vector.body @@ -2867,7 +2867,7 @@ ; CHECK-NEXT: andi a4, a2, 1024 ; CHECK-NEXT: xori a2, a4, 1024 ; CHECK-NEXT: slli a5, a5, 1 -; CHECK-NEXT: vsetvli a6, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a6, zero, e32, m2, ta, ma ; CHECK-NEXT: mv a6, a0 ; CHECK-NEXT: mv a7, a2 ; CHECK-NEXT: .LBB45_3: # %vector.body @@ -2951,13 +2951,13 @@ ; CHECK-LABEL: sink_splat_vp_mul: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a3, 1024 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: .LBB46_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vmul.vx v8, v8, a1, v0.t -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: addi a3, a3, -4 ; CHECK-NEXT: addi a0, a0, 16 @@ -2991,13 +2991,13 @@ ; CHECK-LABEL: sink_splat_vp_add: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a3, 1024 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: .LBB47_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vadd.vx v8, v8, a1, v0.t -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: addi a3, a3, -4 ; CHECK-NEXT: addi a0, a0, 16 @@ -3031,7 +3031,7 @@ define void @sink_splat_vp_add_commute(i32* nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_add_commute: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.x v8, a1 ; CHECK-NEXT: li a1, 1024 ; CHECK-NEXT: .LBB48_1: # %vector.body @@ -3039,7 +3039,7 @@ ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vadd.vv v9, v8, v9, v0.t -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v9, (a0) ; CHECK-NEXT: addi a1, a1, -4 ; CHECK-NEXT: addi a0, a0, 16 @@ -3073,13 +3073,13 @@ ; CHECK-LABEL: sink_splat_vp_sub: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a3, 1024 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: .LBB49_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vsub.vx v8, v8, a1, v0.t -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: addi a3, a3, -4 ; CHECK-NEXT: addi a0, a0, 16 @@ -3111,13 +3111,13 @@ ; CHECK-LABEL: sink_splat_vp_rsub: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a3, 1024 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: .LBB50_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vrsub.vx v8, v8, a1, v0.t -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: addi a3, a3, -4 ; CHECK-NEXT: addi a0, a0, 16 @@ -3151,13 +3151,13 @@ ; CHECK-LABEL: sink_splat_vp_shl: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a3, 1024 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: .LBB51_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vsll.vx v8, v8, a1, v0.t -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: addi a3, a3, -4 ; CHECK-NEXT: addi a0, a0, 16 @@ -3191,13 +3191,13 @@ ; CHECK-LABEL: sink_splat_vp_lshr: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a3, 1024 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: .LBB52_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vsrl.vx v8, v8, a1, v0.t -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: addi a3, a3, -4 ; CHECK-NEXT: addi a0, a0, 16 @@ -3231,13 +3231,13 @@ ; CHECK-LABEL: sink_splat_vp_ashr: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a3, 1024 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: .LBB53_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vsra.vx v8, v8, a1, v0.t -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: addi a3, a3, -4 ; CHECK-NEXT: addi a0, a0, 16 @@ -3271,13 +3271,13 @@ ; CHECK-LABEL: sink_splat_vp_fmul: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a2, 1024 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: .LBB54_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: addi a2, a2, -4 ; CHECK-NEXT: addi a0, a0, 16 @@ -3311,13 +3311,13 @@ ; CHECK-LABEL: sink_splat_vp_fdiv: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a2, 1024 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: .LBB55_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: addi a2, a2, -4 ; CHECK-NEXT: addi a0, a0, 16 @@ -3349,13 +3349,13 @@ ; CHECK-LABEL: sink_splat_vp_frdiv: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a2, 1024 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: .LBB56_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: addi a2, a2, -4 ; CHECK-NEXT: addi a0, a0, 16 @@ -3389,13 +3389,13 @@ ; CHECK-LABEL: sink_splat_vp_fadd: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a2, 1024 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: .LBB57_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: addi a2, a2, -4 ; CHECK-NEXT: addi a0, a0, 16 @@ -3429,13 +3429,13 @@ ; CHECK-LABEL: sink_splat_vp_fsub: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a2, 1024 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: .LBB58_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: addi a2, a2, -4 ; CHECK-NEXT: addi a0, a0, 16 @@ -3469,13 +3469,13 @@ ; CHECK-LABEL: sink_splat_vp_frsub: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a2, 1024 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: .LBB59_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: addi a2, a2, -4 ; CHECK-NEXT: addi a0, a0, 16 @@ -3509,13 +3509,13 @@ ; CHECK-LABEL: sink_splat_vp_udiv: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a3, 1024 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: .LBB60_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vdivu.vx v8, v8, a1, v0.t -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: addi a3, a3, -4 ; CHECK-NEXT: addi a0, a0, 16 @@ -3549,13 +3549,13 @@ ; CHECK-LABEL: sink_splat_vp_sdiv: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a3, 1024 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: .LBB61_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vdiv.vx v8, v8, a1, v0.t -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: addi a3, a3, -4 ; CHECK-NEXT: addi a0, a0, 16 @@ -3589,13 +3589,13 @@ ; CHECK-LABEL: sink_splat_vp_urem: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a3, 1024 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: .LBB62_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vremu.vx v8, v8, a1, v0.t -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: addi a3, a3, -4 ; CHECK-NEXT: addi a0, a0, 16 @@ -3629,13 +3629,13 @@ ; CHECK-LABEL: sink_splat_vp_srem: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a3, 1024 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: .LBB63_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vrem.vx v8, v8, a1, v0.t -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: addi a3, a3, -4 ; CHECK-NEXT: addi a0, a0, 16 @@ -3668,7 +3668,7 @@ define void @sink_splat_vp_srem_commute(i32* nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_srem_commute: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.x v8, a1 ; CHECK-NEXT: li a1, 1024 ; CHECK-NEXT: .LBB64_1: # %vector.body @@ -3676,7 +3676,7 @@ ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vrem.vv v9, v8, v9, v0.t -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v9, (a0) ; CHECK-NEXT: addi a1, a1, -4 ; CHECK-NEXT: addi a0, a0, 16 @@ -3710,14 +3710,14 @@ ; CHECK-LABEL: sink_splat_vp_fma: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a3, 1024 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: .LBB65_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: addi a3, a3, -4 ; CHECK-NEXT: addi a1, a1, 16 @@ -3753,14 +3753,14 @@ ; CHECK-LABEL: sink_splat_vp_fma_commute: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a3, 1024 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: .LBB66_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v9, (a1) ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu ; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: addi a3, a3, -4 ; CHECK-NEXT: addi a1, a1, 16 @@ -3797,7 +3797,7 @@ ; CHECK-LABEL: sink_splat_mul_lmul2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a2, 1024 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: .LBB67_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle64.v v8, (a0) @@ -3833,7 +3833,7 @@ ; CHECK-LABEL: sink_splat_add_lmul2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a2, 1024 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: .LBB68_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle64.v v8, (a0) @@ -3869,7 +3869,7 @@ ; CHECK-LABEL: sink_splat_sub_lmul2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a2, 1024 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: .LBB69_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle64.v v8, (a0) @@ -3905,7 +3905,7 @@ ; CHECK-LABEL: sink_splat_rsub_lmul2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a2, 1024 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: .LBB70_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle64.v v8, (a0) @@ -3941,7 +3941,7 @@ ; CHECK-LABEL: sink_splat_and_lmul2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a2, 1024 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: .LBB71_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle64.v v8, (a0) @@ -3977,7 +3977,7 @@ ; CHECK-LABEL: sink_splat_or_lmul2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a2, 1024 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: .LBB72_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle64.v v8, (a0) @@ -4013,7 +4013,7 @@ ; CHECK-LABEL: sink_splat_xor_lmul2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a2, 1024 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: .LBB73_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle64.v v8, (a0) @@ -4052,7 +4052,7 @@ ; CHECK-NEXT: li a3, 32 ; CHECK-NEXT: .LBB74_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmul.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) @@ -4089,7 +4089,7 @@ ; CHECK-NEXT: li a3, 32 ; CHECK-NEXT: .LBB75_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vadd.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) @@ -4126,7 +4126,7 @@ ; CHECK-NEXT: li a3, 32 ; CHECK-NEXT: .LBB76_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsub.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) @@ -4163,7 +4163,7 @@ ; CHECK-NEXT: li a3, 32 ; CHECK-NEXT: .LBB77_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vrsub.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) @@ -4200,7 +4200,7 @@ ; CHECK-NEXT: li a3, 32 ; CHECK-NEXT: .LBB78_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vand.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) @@ -4237,7 +4237,7 @@ ; CHECK-NEXT: li a3, 32 ; CHECK-NEXT: .LBB79_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vor.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) @@ -4274,7 +4274,7 @@ ; CHECK-NEXT: li a3, 32 ; CHECK-NEXT: .LBB80_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vxor.vx v8, v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) @@ -4308,7 +4308,7 @@ ; CHECK-LABEL: sink_splat_mul_lmulmf2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a2, 1024 -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: .LBB81_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) @@ -4344,7 +4344,7 @@ ; CHECK-LABEL: sink_splat_add_lmulmf2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a2, 1024 -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: .LBB82_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) @@ -4380,7 +4380,7 @@ ; CHECK-LABEL: sink_splat_sub_lmulmf2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a2, 1024 -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: .LBB83_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) @@ -4416,7 +4416,7 @@ ; CHECK-LABEL: sink_splat_rsub_lmulmf2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a2, 1024 -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: .LBB84_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) @@ -4452,7 +4452,7 @@ ; CHECK-LABEL: sink_splat_and_lmulmf2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a2, 1024 -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: .LBB85_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) @@ -4488,7 +4488,7 @@ ; CHECK-LABEL: sink_splat_or_lmulmf2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a2, 1024 -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: .LBB86_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) @@ -4524,7 +4524,7 @@ ; CHECK-LABEL: sink_splat_xor_lmulmf2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a2, 1024 -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: .LBB87_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v8, (a0) diff --git a/llvm/test/CodeGen/RISCV/rvv/smulo-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/smulo-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/smulo-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/smulo-sdnode.ll @@ -6,7 +6,7 @@ define @smulo_nxv1i8( %x, %y) { ; CHECK-LABEL: smulo_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmulh.vv v10, v8, v9 ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: vsra.vi v9, v8, 7 @@ -25,7 +25,7 @@ define @smulo_nxv2i8( %x, %y) { ; CHECK-LABEL: smulo_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmulh.vv v10, v8, v9 ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: vsra.vi v9, v8, 7 @@ -44,7 +44,7 @@ define @smulo_nxv4i8( %x, %y) { ; CHECK-LABEL: smulo_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmulh.vv v10, v8, v9 ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: vsra.vi v9, v8, 7 @@ -63,7 +63,7 @@ define @smulo_nxv8i8( %x, %y) { ; CHECK-LABEL: smulo_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmulh.vv v10, v8, v9 ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: vsra.vi v9, v8, 7 @@ -82,7 +82,7 @@ define @smulo_nxv16i8( %x, %y) { ; CHECK-LABEL: smulo_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmulh.vv v12, v8, v10 ; CHECK-NEXT: vmul.vv v8, v8, v10 ; CHECK-NEXT: vsra.vi v10, v8, 7 @@ -101,7 +101,7 @@ define @smulo_nxv32i8( %x, %y) { ; CHECK-LABEL: smulo_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vmulh.vv v16, v8, v12 ; CHECK-NEXT: vmul.vv v8, v8, v12 ; CHECK-NEXT: vsra.vi v12, v8, 7 @@ -120,7 +120,7 @@ define @smulo_nxv64i8( %x, %y) { ; CHECK-LABEL: smulo_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vmulh.vv v24, v8, v16 ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: vsra.vi v16, v8, 7 @@ -139,7 +139,7 @@ define @smulo_nxv1i16( %x, %y) { ; CHECK-LABEL: smulo_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vmulh.vv v10, v8, v9 ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: vsra.vi v9, v8, 15 @@ -158,7 +158,7 @@ define @smulo_nxv2i16( %x, %y) { ; CHECK-LABEL: smulo_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmulh.vv v10, v8, v9 ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: vsra.vi v9, v8, 15 @@ -177,7 +177,7 @@ define @smulo_nxv4i16( %x, %y) { ; CHECK-LABEL: smulo_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vmulh.vv v10, v8, v9 ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: vsra.vi v9, v8, 15 @@ -196,7 +196,7 @@ define @smulo_nxv8i16( %x, %y) { ; CHECK-LABEL: smulo_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmulh.vv v12, v8, v10 ; CHECK-NEXT: vmul.vv v8, v8, v10 ; CHECK-NEXT: vsra.vi v10, v8, 15 @@ -215,7 +215,7 @@ define @smulo_nxv16i16( %x, %y) { ; CHECK-LABEL: smulo_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vmulh.vv v16, v8, v12 ; CHECK-NEXT: vmul.vv v8, v8, v12 ; CHECK-NEXT: vsra.vi v12, v8, 15 @@ -234,7 +234,7 @@ define @smulo_nxv32i16( %x, %y) { ; CHECK-LABEL: smulo_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vmulh.vv v24, v8, v16 ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: vsra.vi v16, v8, 15 @@ -253,7 +253,7 @@ define @smulo_nxv1i32( %x, %y) { ; CHECK-LABEL: smulo_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmulh.vv v10, v8, v9 ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: vsra.vi v9, v8, 31 @@ -272,7 +272,7 @@ define @smulo_nxv2i32( %x, %y) { ; CHECK-LABEL: smulo_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vmulh.vv v10, v8, v9 ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: vsra.vi v9, v8, 31 @@ -291,7 +291,7 @@ define @smulo_nxv4i32( %x, %y) { ; CHECK-LABEL: smulo_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vmulh.vv v12, v8, v10 ; CHECK-NEXT: vmul.vv v8, v8, v10 ; CHECK-NEXT: vsra.vi v10, v8, 31 @@ -310,7 +310,7 @@ define @smulo_nxv8i32( %x, %y) { ; CHECK-LABEL: smulo_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmulh.vv v16, v8, v12 ; CHECK-NEXT: vmul.vv v8, v8, v12 ; CHECK-NEXT: vsra.vi v12, v8, 31 @@ -329,7 +329,7 @@ define @smulo_nxv16i32( %x, %y) { ; CHECK-LABEL: smulo_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vmulh.vv v24, v8, v16 ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: vsra.vi v16, v8, 31 @@ -348,7 +348,7 @@ define @smulo_nxv1i64( %x, %y) { ; CHECK-LABEL: smulo_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vmulh.vv v10, v8, v9 ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: li a0, 63 @@ -368,7 +368,7 @@ define @smulo_nxv2i64( %x, %y) { ; CHECK-LABEL: smulo_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vmulh.vv v12, v8, v10 ; CHECK-NEXT: vmul.vv v8, v8, v10 ; CHECK-NEXT: li a0, 63 @@ -388,7 +388,7 @@ define @smulo_nxv4i64( %x, %y) { ; CHECK-LABEL: smulo_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vmulh.vv v16, v8, v12 ; CHECK-NEXT: vmul.vv v8, v8, v12 ; CHECK-NEXT: li a0, 63 @@ -408,7 +408,7 @@ define @smulo_nxv8i64( %x, %y) { ; CHECK-LABEL: smulo_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmulh.vv v24, v8, v16 ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: li a0, 63 diff --git a/llvm/test/CodeGen/RISCV/rvv/splat-vectors.ll b/llvm/test/CodeGen/RISCV/rvv/splat-vectors.ll --- a/llvm/test/CodeGen/RISCV/rvv/splat-vectors.ll +++ b/llvm/test/CodeGen/RISCV/rvv/splat-vectors.ll @@ -5,7 +5,7 @@ define @splat_c3_nxv4i32( %v) { ; CHECK-LABEL: splat_c3_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vrgather.vi v10, v8, 3 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -18,7 +18,7 @@ define @splat_idx_nxv4i32( %v, i64 %idx) { ; CHECK-LABEL: splat_idx_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vrgather.vx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -31,7 +31,7 @@ define @splat_c4_nxv8i16( %v) { ; CHECK-LABEL: splat_c4_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vrgather.vi v10, v8, 4 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -44,7 +44,7 @@ define @splat_idx_nxv8i16( %v, i64 %idx) { ; CHECK-LABEL: splat_idx_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vrgather.vx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -57,7 +57,7 @@ define @splat_c1_nxv2f16( %v) { ; CHECK-LABEL: splat_c1_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vrgather.vi v9, v8, 1 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -70,7 +70,7 @@ define @splat_idx_nxv2f16( %v, i64 %idx) { ; CHECK-LABEL: splat_idx_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vrgather.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -83,7 +83,7 @@ define @splat_c3_nxv4f32( %v) { ; CHECK-LABEL: splat_c3_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vrgather.vi v10, v8, 3 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -96,7 +96,7 @@ define @splat_idx_nxv4f32( %v, i64 %idx) { ; CHECK-LABEL: splat_idx_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vrgather.vx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/splats-with-mixed-vl.ll b/llvm/test/CodeGen/RISCV/rvv/splats-with-mixed-vl.ll --- a/llvm/test/CodeGen/RISCV/rvv/splats-with-mixed-vl.ll +++ b/llvm/test/CodeGen/RISCV/rvv/splats-with-mixed-vl.ll @@ -4,7 +4,7 @@ define void @constant_splat_fixed(ptr %p) { ; CHECK-LABEL: constant_splat_fixed: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret @@ -15,7 +15,7 @@ define void @constant_splat_scalable(ptr %p) { ; CHECK-LABEL: constant_splat_scalable: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret @@ -28,10 +28,10 @@ define void @constant_splat_scalable_then_fixed(ptr %p, ptr %p2) { ; CHECK-LABEL: constant_splat_scalable_then_fixed: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vse32.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vse32.v v8, (a1) ; CHECK-NEXT: ret @@ -45,10 +45,10 @@ define void @constant_splat_fixed_then_scalable(ptr %p, ptr %p2) { ; CHECK-LABEL: constant_splat_fixed_then_scalable: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vse32.v v8, (a1) -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret @@ -60,7 +60,7 @@ define void @splat_scalable(ptr %p, i32 %v) { ; CHECK-LABEL: splat_scalable: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret @@ -73,7 +73,7 @@ define void @splat_fixed(ptr %p, i32 %v) { ; CHECK-LABEL: splat_fixed: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.x v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret @@ -88,12 +88,12 @@ define void @mixed_splats1(ptr %p, i32 %v) { ; CHECK-LABEL: mixed_splats1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v8, a1 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vse32.v v9, (a0) -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %v, i32 0 @@ -107,10 +107,10 @@ define void @mixed_splats2(ptr %p, i32 %v) { ; CHECK-LABEL: mixed_splats2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret @@ -125,7 +125,7 @@ define void @extract_vector(ptr %p, i32 %v) { ; CHECK-LABEL: extract_vector: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.x v8, a1 ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret @@ -140,7 +140,7 @@ define void @extract_vector_multiuse1(ptr %p, ptr %p2, i32 %v) { ; CHECK-LABEL: extract_vector_multiuse1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.x v8, a2 ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: vse32.v v8, (a1) @@ -157,9 +157,9 @@ define @extract_vector_multiuse2(ptr %p, ptr %p2, i32 %v) { ; CHECK-LABEL: extract_vector_multiuse2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v8, a2 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %v, i32 0 @@ -173,11 +173,11 @@ define void @extract_vector_mixed1(ptr %p, ptr %p2, i32 %v) { ; CHECK-LABEL: extract_vector_mixed1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a3, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v8, a2 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vse32.v v8, (a1) ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %v, i32 0 @@ -193,10 +193,10 @@ define void @extract_vector_mixed2(ptr %p, ptr %p2, i32 %v) { ; CHECK-LABEL: extract_vector_mixed2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a3, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v8, a2 ; CHECK-NEXT: vse32.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a1) ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %v, i32 0 @@ -211,10 +211,10 @@ define void @extract_vector_mixed3(ptr %p, ptr %p2, i32 %v) { ; CHECK-LABEL: extract_vector_mixed3: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a3, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v8, a2 ; CHECK-NEXT: vse32.v v8, (a0) -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a1) ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %v, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/stepvector.ll b/llvm/test/CodeGen/RISCV/rvv/stepvector.ll --- a/llvm/test/CodeGen/RISCV/rvv/stepvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/stepvector.ll @@ -7,7 +7,7 @@ define @stepvector_nxv1i8() { ; CHECK-LABEL: stepvector_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret %v = call @llvm.experimental.stepvector.nxv1i8() @@ -19,7 +19,7 @@ define @stepvector_nxv2i8() { ; CHECK-LABEL: stepvector_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret %v = call @llvm.experimental.stepvector.nxv2i8() @@ -31,7 +31,7 @@ define @stepvector_nxv3i8() { ; CHECK-LABEL: stepvector_nxv3i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret %v = call @llvm.experimental.stepvector.nxv3i8() @@ -43,7 +43,7 @@ define @stepvector_nxv4i8() { ; CHECK-LABEL: stepvector_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret %v = call @llvm.experimental.stepvector.nxv4i8() @@ -55,7 +55,7 @@ define @stepvector_nxv8i8() { ; CHECK-LABEL: stepvector_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret %v = call @llvm.experimental.stepvector.nxv8i8() @@ -65,7 +65,7 @@ define @add_stepvector_nxv8i8() { ; CHECK-LABEL: add_stepvector_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: ret @@ -79,7 +79,7 @@ define @mul_stepvector_nxv8i8() { ; CHECK-LABEL: mul_stepvector_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: li a0, 3 ; CHECK-NEXT: vmul.vx v8, v8, a0 @@ -95,7 +95,7 @@ define @shl_stepvector_nxv8i8() { ; CHECK-LABEL: shl_stepvector_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vsll.vi v8, v8, 2 ; CHECK-NEXT: ret @@ -112,7 +112,7 @@ define @stepvector_nxv16i8() { ; CHECK-LABEL: stepvector_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret %v = call @llvm.experimental.stepvector.nxv16i8() @@ -124,7 +124,7 @@ define @stepvector_nxv32i8() { ; CHECK-LABEL: stepvector_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret %v = call @llvm.experimental.stepvector.nxv32i8() @@ -136,7 +136,7 @@ define @stepvector_nxv64i8() { ; CHECK-LABEL: stepvector_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret %v = call @llvm.experimental.stepvector.nxv64i8() @@ -148,7 +148,7 @@ define @stepvector_nxv1i16() { ; CHECK-LABEL: stepvector_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret %v = call @llvm.experimental.stepvector.nxv1i16() @@ -160,7 +160,7 @@ define @stepvector_nxv2i16() { ; CHECK-LABEL: stepvector_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret %v = call @llvm.experimental.stepvector.nxv2i16() @@ -172,7 +172,7 @@ define @stepvector_nxv2i15() { ; CHECK-LABEL: stepvector_nxv2i15: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret %v = call @llvm.experimental.stepvector.nxv2i15() @@ -184,7 +184,7 @@ define @stepvector_nxv3i16() { ; CHECK-LABEL: stepvector_nxv3i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret %v = call @llvm.experimental.stepvector.nxv3i16() @@ -196,7 +196,7 @@ define @stepvector_nxv4i16() { ; CHECK-LABEL: stepvector_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret %v = call @llvm.experimental.stepvector.nxv4i16() @@ -208,7 +208,7 @@ define @stepvector_nxv8i16() { ; CHECK-LABEL: stepvector_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret %v = call @llvm.experimental.stepvector.nxv8i16() @@ -220,7 +220,7 @@ define @stepvector_nxv16i16() { ; CHECK-LABEL: stepvector_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret %v = call @llvm.experimental.stepvector.nxv16i16() @@ -230,7 +230,7 @@ define @add_stepvector_nxv16i16() { ; CHECK-LABEL: add_stepvector_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: ret @@ -244,7 +244,7 @@ define @mul_stepvector_nxv16i16() { ; CHECK-LABEL: mul_stepvector_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: li a0, 3 ; CHECK-NEXT: vmul.vx v8, v8, a0 @@ -260,7 +260,7 @@ define @shl_stepvector_nxv16i16() { ; CHECK-LABEL: shl_stepvector_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vsll.vi v8, v8, 2 ; CHECK-NEXT: ret @@ -277,7 +277,7 @@ define @stepvector_nxv32i16() { ; CHECK-LABEL: stepvector_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret %v = call @llvm.experimental.stepvector.nxv32i16() @@ -289,7 +289,7 @@ define @stepvector_nxv1i32() { ; CHECK-LABEL: stepvector_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret %v = call @llvm.experimental.stepvector.nxv1i32() @@ -301,7 +301,7 @@ define @stepvector_nxv2i32() { ; CHECK-LABEL: stepvector_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret %v = call @llvm.experimental.stepvector.nxv2i32() @@ -313,7 +313,7 @@ define @stepvector_nxv3i32() { ; CHECK-LABEL: stepvector_nxv3i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret %v = call @llvm.experimental.stepvector.nxv3i32() @@ -325,7 +325,7 @@ define @stepvector_nxv4i32() { ; CHECK-LABEL: stepvector_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret %v = call @llvm.experimental.stepvector.nxv4i32() @@ -337,7 +337,7 @@ define @stepvector_nxv8i32() { ; CHECK-LABEL: stepvector_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret %v = call @llvm.experimental.stepvector.nxv8i32() @@ -349,7 +349,7 @@ define @stepvector_nxv16i32() { ; CHECK-LABEL: stepvector_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret %v = call @llvm.experimental.stepvector.nxv16i32() @@ -359,7 +359,7 @@ define @add_stepvector_nxv16i32() { ; CHECK-LABEL: add_stepvector_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: ret @@ -373,7 +373,7 @@ define @mul_stepvector_nxv16i32() { ; CHECK-LABEL: mul_stepvector_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: li a0, 3 ; CHECK-NEXT: vmul.vx v8, v8, a0 @@ -389,7 +389,7 @@ define @shl_stepvector_nxv16i32() { ; CHECK-LABEL: shl_stepvector_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vsll.vi v8, v8, 2 ; CHECK-NEXT: ret @@ -406,7 +406,7 @@ define @stepvector_nxv1i64() { ; CHECK-LABEL: stepvector_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret %v = call @llvm.experimental.stepvector.nxv1i64() @@ -418,7 +418,7 @@ define @stepvector_nxv2i64() { ; CHECK-LABEL: stepvector_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret %v = call @llvm.experimental.stepvector.nxv2i64() @@ -430,7 +430,7 @@ define @stepvector_nxv3i64() { ; CHECK-LABEL: stepvector_nxv3i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret %v = call @llvm.experimental.stepvector.nxv3i64() @@ -442,7 +442,7 @@ define @stepvector_nxv4i64() { ; CHECK-LABEL: stepvector_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret %v = call @llvm.experimental.stepvector.nxv4i64() @@ -454,7 +454,7 @@ define @stepvector_nxv8i64() { ; CHECK-LABEL: stepvector_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret %v = call @llvm.experimental.stepvector.nxv8i64() @@ -464,7 +464,7 @@ define @add_stepvector_nxv8i64() { ; CHECK-LABEL: add_stepvector_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: ret @@ -478,7 +478,7 @@ define @mul_stepvector_nxv8i64() { ; CHECK-LABEL: mul_stepvector_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: li a0, 3 ; CHECK-NEXT: vmul.vx v8, v8, a0 @@ -502,7 +502,7 @@ ; RV32-NEXT: addi a0, a0, -683 ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v8, (a0), zero ; RV32-NEXT: vid.v v16 ; RV32-NEXT: vmul.vv v8, v16, v8 @@ -511,7 +511,7 @@ ; ; RV64-LABEL: mul_bigimm_stepvector_nxv8i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; RV64-NEXT: vid.v v8 ; RV64-NEXT: lui a0, 1987 ; RV64-NEXT: addiw a0, a0, -731 @@ -530,7 +530,7 @@ define @shl_stepvector_nxv8i64() { ; CHECK-LABEL: shl_stepvector_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vsll.vi v8, v8, 2 ; CHECK-NEXT: ret @@ -553,7 +553,7 @@ ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vid.v v8 ; RV32-NEXT: vadd.vv v16, v8, v16 @@ -563,7 +563,7 @@ ; RV64-LABEL: stepvector_nxv16i64: ; RV64: # %bb.0: ; RV64-NEXT: csrr a0, vlenb -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vid.v v8 ; RV64-NEXT: vadd.vx v16, v8, a0 ; RV64-NEXT: ret @@ -580,7 +580,7 @@ ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 1 ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vid.v v8 @@ -593,7 +593,7 @@ ; RV64: # %bb.0: # %entry ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 1 -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vid.v v8 ; RV64-NEXT: vadd.vv v8, v8, v8 ; RV64-NEXT: vadd.vx v16, v8, a0 @@ -615,7 +615,7 @@ ; RV32-NEXT: slli a1, a0, 1 ; RV32-NEXT: add a0, a1, a0 ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vid.v v8 @@ -627,7 +627,7 @@ ; ; RV64-LABEL: mul_stepvector_nxv16i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; RV64-NEXT: vid.v v8 ; RV64-NEXT: li a0, 3 ; RV64-NEXT: vmul.vx v8, v8, a0 @@ -668,7 +668,7 @@ ; RV32-NEXT: add a0, a0, a1 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v8, (a0), zero ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vid.v v24 @@ -685,7 +685,7 @@ ; RV64-NEXT: slli a1, a1, 12 ; RV64-NEXT: addi a1, a1, -683 ; RV64-NEXT: mul a0, a0, a1 -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vid.v v8 ; RV64-NEXT: vmul.vx v8, v8, a1 ; RV64-NEXT: vadd.vx v16, v8, a0 @@ -707,7 +707,7 @@ ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 2 ; RV32-NEXT: sw a0, 8(sp) -; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; RV32-NEXT: addi a0, sp, 8 ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vid.v v8 @@ -720,7 +720,7 @@ ; RV64: # %bb.0: # %entry ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 2 -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vid.v v8 ; RV64-NEXT: vsll.vi v8, v8, 2 ; RV64-NEXT: vadd.vx v16, v8, a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/strided-load-store-intrinsics.ll b/llvm/test/CodeGen/RISCV/rvv/strided-load-store-intrinsics.ll --- a/llvm/test/CodeGen/RISCV/rvv/strided-load-store-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/rvv/strided-load-store-intrinsics.ll @@ -68,7 +68,7 @@ ; CHECK-LABEL: strided_store_i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vsse8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret call void @llvm.riscv.masked.strided.store.v32i8.p0.i64(<32 x i8> %v, ptr %p, i64 %stride, <32 x i1> %m) @@ -79,7 +79,7 @@ ; CHECK-LABEL: strided_store_i8_zero: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsse8.v v8, (a0), zero, v0.t ; CHECK-NEXT: ret call void @llvm.riscv.masked.strided.store.v32i8.p0.i64(<32 x i8> %v, ptr %p, i64 0, <32 x i1> %m) @@ -91,7 +91,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: li a2, 1 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret call void @llvm.riscv.masked.strided.store.v32i8.p0.i64(<32 x i8> %v, ptr %p, i64 1, <32 x i1> %m) @@ -103,7 +103,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: li a2, -1 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsse8.v v8, (a0), a2, v0.t ; CHECK-NEXT: ret call void @llvm.riscv.masked.strided.store.v32i8.p0.i64(<32 x i8> %v, ptr %p, i64 -1, <32 x i1> %m) @@ -127,7 +127,7 @@ define void @strided_store_vscale_i64(ptr %p, %v, i64 %stride, %m) { ; CHECK-LABEL: strided_store_vscale_i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e64, m1, ta, ma ; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret call void @llvm.riscv.masked.strided.store.nxv1i64.p0.i64( %v, ptr %p, i64 %stride, %m) diff --git a/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll b/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll --- a/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll +++ b/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll @@ -61,13 +61,13 @@ define @strided_vpload_nxv1i8_i64_allones_mask(i8* %ptr, i64 signext %stride, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_nxv1i8_i64_allones_mask: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a3, e8, mf8, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a3, e8, mf8, ta, ma ; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1 ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_nxv1i8_i64_allones_mask: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1 ; CHECK-RV64-NEXT: ret %a = insertelement poison, i1 true, i32 0 @@ -97,13 +97,13 @@ define @strided_vpload_nxv1i8_allones_mask(i8* %ptr, i32 signext %stride, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_nxv1i8_allones_mask: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1 ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_nxv1i8_allones_mask: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1 ; CHECK-RV64-NEXT: ret %a = insertelement poison, i1 true, i32 0 @@ -169,13 +169,13 @@ define @strided_vpload_nxv8i8_allones_mask(i8* %ptr, i32 signext %stride, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_nxv8i8_allones_mask: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1 ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_nxv8i8_allones_mask: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1 ; CHECK-RV64-NEXT: ret %a = insertelement poison, i1 true, i32 0 @@ -223,13 +223,13 @@ define @strided_vpload_nxv2i16_allones_mask(i16* %ptr, i32 signext %stride, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_nxv2i16_allones_mask: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1 ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_nxv2i16_allones_mask: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1 ; CHECK-RV64-NEXT: ret %a = insertelement poison, i1 true, i32 0 @@ -331,13 +331,13 @@ define @strided_vpload_nxv4i32_allones_mask(i32* %ptr, i32 signext %stride, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_nxv4i32_allones_mask: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1 ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_nxv4i32_allones_mask: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1 ; CHECK-RV64-NEXT: ret %a = insertelement poison, i1 true, i32 0 @@ -385,13 +385,13 @@ define @strided_vpload_nxv1i64_allones_mask(i64* %ptr, i32 signext %stride, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_nxv1i64_allones_mask: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1 ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_nxv1i64_allones_mask: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1 ; CHECK-RV64-NEXT: ret %a = insertelement poison, i1 true, i32 0 @@ -493,13 +493,13 @@ define @strided_vpload_nxv2f16_allones_mask(half* %ptr, i32 signext %stride, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_nxv2f16_allones_mask: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1 ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_nxv2f16_allones_mask: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1 ; CHECK-RV64-NEXT: ret %a = insertelement poison, i1 true, i32 0 @@ -619,13 +619,13 @@ define @strided_vpload_nxv8f32_allones_mask(float* %ptr, i32 signext %stride, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_nxv8f32_allones_mask: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m4, ta, ma ; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1 ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_nxv8f32_allones_mask: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m4, ta, ma ; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1 ; CHECK-RV64-NEXT: ret %a = insertelement poison, i1 true, i32 0 @@ -691,13 +691,13 @@ define @strided_vpload_nxv4f64_allones_mask(double* %ptr, i32 signext %stride, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_nxv4f64_allones_mask: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1 ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_nxv4f64_allones_mask: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1 ; CHECK-RV64-NEXT: ret %a = insertelement poison, i1 true, i32 0 @@ -744,13 +744,13 @@ define @strided_vpload_nxv3f64_allones_mask(double* %ptr, i32 signext %stride, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_nxv3f64_allones_mask: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1 ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_nxv3f64_allones_mask: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1 ; CHECK-RV64-NEXT: ret %one = insertelement poison, i1 true, i32 0 @@ -774,7 +774,7 @@ ; CHECK-RV32-NEXT: # %bb.1: ; CHECK-RV32-NEXT: mv a2, a6 ; CHECK-RV32-NEXT: .LBB42_2: -; CHECK-RV32-NEXT: vsetvli a6, zero, e8, mf4, ta, mu +; CHECK-RV32-NEXT: vsetvli a6, zero, e8, mf4, ta, ma ; CHECK-RV32-NEXT: vslidedown.vx v0, v8, a5 ; CHECK-RV32-NEXT: bltu a3, a4, .LBB42_4 ; CHECK-RV32-NEXT: # %bb.3: @@ -800,7 +800,7 @@ ; CHECK-RV64-NEXT: # %bb.1: ; CHECK-RV64-NEXT: mv a3, a6 ; CHECK-RV64-NEXT: .LBB42_2: -; CHECK-RV64-NEXT: vsetvli a6, zero, e8, mf4, ta, mu +; CHECK-RV64-NEXT: vsetvli a6, zero, e8, mf4, ta, ma ; CHECK-RV64-NEXT: vslidedown.vx v0, v8, a5 ; CHECK-RV64-NEXT: bltu a2, a4, .LBB42_4 ; CHECK-RV64-NEXT: # %bb.3: @@ -834,9 +834,9 @@ ; CHECK-RV32-NEXT: .LBB43_4: ; CHECK-RV32-NEXT: mul a4, a3, a1 ; CHECK-RV32-NEXT: add a4, a0, a4 -; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-RV32-NEXT: vlse64.v v16, (a4), a1 -; CHECK-RV32-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1 ; CHECK-RV32-NEXT: ret ; @@ -855,9 +855,9 @@ ; CHECK-RV64-NEXT: .LBB43_4: ; CHECK-RV64-NEXT: mul a4, a2, a1 ; CHECK-RV64-NEXT: add a4, a0, a4 -; CHECK-RV64-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; CHECK-RV64-NEXT: vlse64.v v16, (a4), a1 -; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1 ; CHECK-RV64-NEXT: ret %one = insertelement poison, i1 true, i32 0 @@ -890,7 +890,7 @@ ; CHECK-RV32-NEXT: mv t0, a6 ; CHECK-RV32-NEXT: .LBB44_4: ; CHECK-RV32-NEXT: srli a6, a2, 3 -; CHECK-RV32-NEXT: vsetvli t1, zero, e8, mf4, ta, mu +; CHECK-RV32-NEXT: vsetvli t1, zero, e8, mf4, ta, ma ; CHECK-RV32-NEXT: vslidedown.vx v0, v8, a6 ; CHECK-RV32-NEXT: mv a6, a5 ; CHECK-RV32-NEXT: bltu a5, a2, .LBB44_6 @@ -908,7 +908,7 @@ ; CHECK-RV32-NEXT: # %bb.7: ; CHECK-RV32-NEXT: mv t0, t1 ; CHECK-RV32-NEXT: .LBB44_8: -; CHECK-RV32-NEXT: vsetvli a3, zero, e8, mf2, ta, mu +; CHECK-RV32-NEXT: vsetvli a3, zero, e8, mf2, ta, ma ; CHECK-RV32-NEXT: vslidedown.vx v0, v8, a7 ; CHECK-RV32-NEXT: bltu t0, a2, .LBB44_10 ; CHECK-RV32-NEXT: # %bb.9: @@ -941,7 +941,7 @@ ; CHECK-RV64-NEXT: mv t0, a6 ; CHECK-RV64-NEXT: .LBB44_4: ; CHECK-RV64-NEXT: srli a6, a4, 3 -; CHECK-RV64-NEXT: vsetvli t1, zero, e8, mf4, ta, mu +; CHECK-RV64-NEXT: vsetvli t1, zero, e8, mf4, ta, ma ; CHECK-RV64-NEXT: vslidedown.vx v0, v8, a6 ; CHECK-RV64-NEXT: mv a6, a5 ; CHECK-RV64-NEXT: bltu a5, a4, .LBB44_6 @@ -959,7 +959,7 @@ ; CHECK-RV64-NEXT: # %bb.7: ; CHECK-RV64-NEXT: mv t0, t1 ; CHECK-RV64-NEXT: .LBB44_8: -; CHECK-RV64-NEXT: vsetvli a2, zero, e8, mf2, ta, mu +; CHECK-RV64-NEXT: vsetvli a2, zero, e8, mf2, ta, ma ; CHECK-RV64-NEXT: vslidedown.vx v0, v8, a7 ; CHECK-RV64-NEXT: bltu t0, a4, .LBB44_10 ; CHECK-RV64-NEXT: # %bb.9: diff --git a/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll --- a/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll +++ b/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll @@ -9,13 +9,13 @@ define void @strided_vpstore_nxv1i8_i8( %val, i8* %ptr, i8 signext %stride, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_nxv1i8_i8: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-RV32-NEXT: vsse8.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_nxv1i8_i8: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-RV64-NEXT: vsse8.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.nxv1i8.p0i8.i8( %val, i8* %ptr, i8 %stride, %m, i32 %evl) @@ -27,13 +27,13 @@ define void @strided_vpstore_nxv1i8_i16( %val, i8* %ptr, i16 signext %stride, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_nxv1i8_i16: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-RV32-NEXT: vsse8.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_nxv1i8_i16: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-RV64-NEXT: vsse8.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.nxv1i8.p0i8.i16( %val, i8* %ptr, i16 %stride, %m, i32 %evl) @@ -45,13 +45,13 @@ define void @strided_vpstore_nxv1i8_i64( %val, i8* %ptr, i64 signext %stride, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_nxv1i8_i64: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a3, e8, mf8, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a3, e8, mf8, ta, ma ; CHECK-RV32-NEXT: vsse8.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_nxv1i8_i64: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-RV64-NEXT: vsse8.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.nxv1i8.p0i8.i64( %val, i8* %ptr, i64 %stride, %m, i32 %evl) @@ -63,13 +63,13 @@ define void @strided_vpstore_nxv1i8( %val, i8* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_nxv1i8: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-RV32-NEXT: vsse8.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_nxv1i8: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-RV64-NEXT: vsse8.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.nxv1i8.p0i8.i32( %val, i8* %ptr, i32 %strided, %m, i32 %evl) @@ -81,13 +81,13 @@ define void @strided_vpstore_nxv2i8( %val, i8* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_nxv2i8: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-RV32-NEXT: vsse8.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_nxv2i8: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-RV64-NEXT: vsse8.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.nxv2i8.p0i8.i32( %val, i8* %ptr, i32 %strided, %m, i32 %evl) @@ -99,13 +99,13 @@ define void @strided_vpstore_nxv4i8( %val, i8* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_nxv4i8: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-RV32-NEXT: vsse8.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_nxv4i8: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-RV64-NEXT: vsse8.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.nxv4i8.p0i8.i32( %val, i8* %ptr, i32 %strided, %m, i32 %evl) @@ -117,13 +117,13 @@ define void @strided_vpstore_nxv8i8( %val, i8* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_nxv8i8: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-RV32-NEXT: vsse8.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_nxv8i8: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-RV64-NEXT: vsse8.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.nxv8i8.p0i8.i32( %val, i8* %ptr, i32 %strided, %m, i32 %evl) @@ -135,13 +135,13 @@ define void @strided_vpstore_nxv1i16( %val, i16* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_nxv1i16: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-RV32-NEXT: vsse16.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_nxv1i16: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-RV64-NEXT: vsse16.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.nxv1i16.p0i16.i32( %val, i16* %ptr, i32 %strided, %m, i32 %evl) @@ -153,13 +153,13 @@ define void @strided_vpstore_nxv2i16( %val, i16* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_nxv2i16: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-RV32-NEXT: vsse16.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_nxv2i16: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-RV64-NEXT: vsse16.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.nxv2i16.p0i16.i32( %val, i16* %ptr, i32 %strided, %m, i32 %evl) @@ -171,13 +171,13 @@ define void @strided_vpstore_nxv4i16( %val, i16* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_nxv4i16: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-RV32-NEXT: vsse16.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_nxv4i16: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-RV64-NEXT: vsse16.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.nxv4i16.p0i16.i32( %val, i16* %ptr, i32 %strided, %m, i32 %evl) @@ -189,13 +189,13 @@ define void @strided_vpstore_nxv8i16( %val, i16* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_nxv8i16: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-RV32-NEXT: vsse16.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_nxv8i16: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-RV64-NEXT: vsse16.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.nxv8i16.p0i16.i32( %val, i16* %ptr, i32 %strided, %m, i32 %evl) @@ -207,13 +207,13 @@ define void @strided_vpstore_nxv1i32( %val, i32* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_nxv1i32: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-RV32-NEXT: vsse32.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_nxv1i32: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-RV64-NEXT: vsse32.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.nxv1i32.p0i32.i32( %val, i32* %ptr, i32 %strided, %m, i32 %evl) @@ -225,13 +225,13 @@ define void @strided_vpstore_nxv2i32( %val, i32* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_nxv2i32: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-RV32-NEXT: vsse32.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_nxv2i32: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-RV64-NEXT: vsse32.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.nxv2i32.p0i32.i32( %val, i32* %ptr, i32 %strided, %m, i32 %evl) @@ -243,13 +243,13 @@ define void @strided_vpstore_nxv4i32( %val, i32* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_nxv4i32: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-RV32-NEXT: vsse32.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_nxv4i32: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-RV64-NEXT: vsse32.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.nxv4i32.p0i32.i32( %val, i32* %ptr, i32 %strided, %m, i32 %evl) @@ -261,13 +261,13 @@ define void @strided_vpstore_nxv8i32( %val, i32* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_nxv8i32: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m4, ta, ma ; CHECK-RV32-NEXT: vsse32.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_nxv8i32: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m4, ta, ma ; CHECK-RV64-NEXT: vsse32.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.nxv8i32.p0i32.i32( %val, i32* %ptr, i32 %strided, %m, i32 %evl) @@ -279,13 +279,13 @@ define void @strided_vpstore_nxv1i64( %val, i64* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_nxv1i64: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-RV32-NEXT: vsse64.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_nxv1i64: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-RV64-NEXT: vsse64.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.nxv1i64.p0i64.i32( %val, i64* %ptr, i32 %strided, %m, i32 %evl) @@ -297,13 +297,13 @@ define void @strided_vpstore_nxv2i64( %val, i64* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_nxv2i64: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-RV32-NEXT: vsse64.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_nxv2i64: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-RV64-NEXT: vsse64.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.nxv2i64.p0i64.i32( %val, i64* %ptr, i32 %strided, %m, i32 %evl) @@ -315,13 +315,13 @@ define void @strided_vpstore_nxv4i64( %val, i64* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_nxv4i64: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-RV32-NEXT: vsse64.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_nxv4i64: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-RV64-NEXT: vsse64.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.nxv4i64.p0i64.i32( %val, i64* %ptr, i32 %strided, %m, i32 %evl) @@ -333,13 +333,13 @@ define void @strided_vpstore_nxv8i64( %val, i64* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_nxv8i64: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-RV32-NEXT: vsse64.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_nxv8i64: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-RV64-NEXT: vsse64.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.nxv8i64.p0i64.i32( %val, i64* %ptr, i32 %strided, %m, i32 %evl) @@ -351,13 +351,13 @@ define void @strided_vpstore_nxv1f16( %val, half* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_nxv1f16: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-RV32-NEXT: vsse16.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_nxv1f16: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-RV64-NEXT: vsse16.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.nxv1f16.p0f16.i32( %val, half* %ptr, i32 %strided, %m, i32 %evl) @@ -369,13 +369,13 @@ define void @strided_vpstore_nxv2f16( %val, half* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_nxv2f16: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-RV32-NEXT: vsse16.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_nxv2f16: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-RV64-NEXT: vsse16.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.nxv2f16.p0f16.i32( %val, half* %ptr, i32 %strided, %m, i32 %evl) @@ -387,13 +387,13 @@ define void @strided_vpstore_nxv4f16( %val, half* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_nxv4f16: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-RV32-NEXT: vsse16.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_nxv4f16: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-RV64-NEXT: vsse16.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.nxv4f16.p0f16.i32( %val, half* %ptr, i32 %strided, %m, i32 %evl) @@ -405,13 +405,13 @@ define void @strided_vpstore_nxv8f16( %val, half* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_nxv8f16: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-RV32-NEXT: vsse16.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_nxv8f16: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-RV64-NEXT: vsse16.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.nxv8f16.p0f16.i32( %val, half* %ptr, i32 %strided, %m, i32 %evl) @@ -423,13 +423,13 @@ define void @strided_vpstore_nxv1f32( %val, float* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_nxv1f32: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-RV32-NEXT: vsse32.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_nxv1f32: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-RV64-NEXT: vsse32.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.nxv1f32.p0f32.i32( %val, float* %ptr, i32 %strided, %m, i32 %evl) @@ -441,13 +441,13 @@ define void @strided_vpstore_nxv2f32( %val, float* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_nxv2f32: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-RV32-NEXT: vsse32.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_nxv2f32: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-RV64-NEXT: vsse32.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.nxv2f32.p0f32.i32( %val, float* %ptr, i32 %strided, %m, i32 %evl) @@ -459,13 +459,13 @@ define void @strided_vpstore_nxv4f32( %val, float* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_nxv4f32: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-RV32-NEXT: vsse32.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_nxv4f32: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-RV64-NEXT: vsse32.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.nxv4f32.p0f32.i32( %val, float* %ptr, i32 %strided, %m, i32 %evl) @@ -477,13 +477,13 @@ define void @strided_vpstore_nxv8f32( %val, float* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_nxv8f32: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m4, ta, ma ; CHECK-RV32-NEXT: vsse32.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_nxv8f32: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m4, ta, ma ; CHECK-RV64-NEXT: vsse32.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.nxv8f32.p0f32.i32( %val, float* %ptr, i32 %strided, %m, i32 %evl) @@ -495,13 +495,13 @@ define void @strided_vpstore_nxv1f64( %val, double* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_nxv1f64: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-RV32-NEXT: vsse64.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_nxv1f64: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-RV64-NEXT: vsse64.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.nxv1f64.p0f64.i32( %val, double* %ptr, i32 %strided, %m, i32 %evl) @@ -513,13 +513,13 @@ define void @strided_vpstore_nxv2f64( %val, double* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_nxv2f64: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-RV32-NEXT: vsse64.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_nxv2f64: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-RV64-NEXT: vsse64.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.nxv2f64.p0f64.i32( %val, double* %ptr, i32 %strided, %m, i32 %evl) @@ -531,13 +531,13 @@ define void @strided_vpstore_nxv4f64( %val, double* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_nxv4f64: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-RV32-NEXT: vsse64.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_nxv4f64: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-RV64-NEXT: vsse64.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.nxv4f64.p0f64.i32( %val, double* %ptr, i32 %strided, %m, i32 %evl) @@ -549,13 +549,13 @@ define void @strided_vpstore_nxv8f64( %val, double* %ptr, i32 signext %strided, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_nxv8f64: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-RV32-NEXT: vsse64.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_nxv8f64: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-RV64-NEXT: vsse64.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.nxv8f64.p0f64.i32( %val, double* %ptr, i32 %strided, %m, i32 %evl) @@ -565,13 +565,13 @@ define void @strided_vpstore_nxv1i8_allones_mask( %val, i8* %ptr, i32 signext %strided, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_nxv1i8_allones_mask: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-RV32-NEXT: vsse8.v v8, (a0), a1 ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_nxv1i8_allones_mask: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-RV64-NEXT: vsse8.v v8, (a0), a1 ; CHECK-RV64-NEXT: ret %a = insertelement poison, i1 true, i32 0 @@ -584,13 +584,13 @@ define void @strided_vpstore_nxv3f32( %v, float *%ptr, i32 signext %stride, %mask, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_nxv3f32: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-RV32-NEXT: vsse32.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_nxv3f32: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-RV64-NEXT: vsse32.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.nxv3f32.p0f32.i32( %v, float* %ptr, i32 %stride, %mask, i32 %evl) @@ -600,13 +600,13 @@ define void @strided_vpstore_nxv3f32_allones_mask( %v, float *%ptr, i32 signext %stride, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpstore_nxv3f32_allones_mask: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-RV32-NEXT: vsse32.v v8, (a0), a1 ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpstore_nxv3f32_allones_mask: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-RV64-NEXT: vsse32.v v8, (a0), a1 ; CHECK-RV64-NEXT: ret %one = insertelement poison, i1 true, i32 0 @@ -628,10 +628,10 @@ ; CHECK-RV32-NEXT: mv a4, a3 ; CHECK-RV32-NEXT: .LBB34_2: ; CHECK-RV32-NEXT: li a5, 0 -; CHECK-RV32-NEXT: vsetvli zero, a4, e64, m8, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a4, e64, m8, ta, ma ; CHECK-RV32-NEXT: vsse64.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: srli a6, a3, 3 -; CHECK-RV32-NEXT: vsetvli a7, zero, e8, mf4, ta, mu +; CHECK-RV32-NEXT: vsetvli a7, zero, e8, mf4, ta, ma ; CHECK-RV32-NEXT: sub a3, a2, a3 ; CHECK-RV32-NEXT: vslidedown.vx v0, v0, a6 ; CHECK-RV32-NEXT: bltu a2, a3, .LBB34_4 @@ -640,7 +640,7 @@ ; CHECK-RV32-NEXT: .LBB34_4: ; CHECK-RV32-NEXT: mul a2, a4, a1 ; CHECK-RV32-NEXT: add a0, a0, a2 -; CHECK-RV32-NEXT: vsetvli zero, a5, e64, m8, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a5, e64, m8, ta, ma ; CHECK-RV32-NEXT: vsse64.v v16, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; @@ -653,10 +653,10 @@ ; CHECK-RV64-NEXT: mv a4, a3 ; CHECK-RV64-NEXT: .LBB34_2: ; CHECK-RV64-NEXT: li a5, 0 -; CHECK-RV64-NEXT: vsetvli zero, a4, e64, m8, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a4, e64, m8, ta, ma ; CHECK-RV64-NEXT: vsse64.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: srli a6, a3, 3 -; CHECK-RV64-NEXT: vsetvli a7, zero, e8, mf4, ta, mu +; CHECK-RV64-NEXT: vsetvli a7, zero, e8, mf4, ta, ma ; CHECK-RV64-NEXT: sub a3, a2, a3 ; CHECK-RV64-NEXT: vslidedown.vx v0, v0, a6 ; CHECK-RV64-NEXT: bltu a2, a3, .LBB34_4 @@ -665,7 +665,7 @@ ; CHECK-RV64-NEXT: .LBB34_4: ; CHECK-RV64-NEXT: mul a2, a4, a1 ; CHECK-RV64-NEXT: add a0, a0, a2 -; CHECK-RV64-NEXT: vsetvli zero, a5, e64, m8, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a5, e64, m8, ta, ma ; CHECK-RV64-NEXT: vsse64.v v16, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret call void @llvm.experimental.vp.strided.store.nxv16f64.p0f64.i32( %v, double* %ptr, i32 %stride, %mask, i32 %evl) @@ -682,7 +682,7 @@ ; CHECK-RV32-NEXT: mv a3, a4 ; CHECK-RV32-NEXT: .LBB35_2: ; CHECK-RV32-NEXT: li a5, 0 -; CHECK-RV32-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; CHECK-RV32-NEXT: sub a4, a2, a4 ; CHECK-RV32-NEXT: vsse64.v v8, (a0), a1 ; CHECK-RV32-NEXT: bltu a2, a4, .LBB35_4 @@ -691,7 +691,7 @@ ; CHECK-RV32-NEXT: .LBB35_4: ; CHECK-RV32-NEXT: mul a2, a3, a1 ; CHECK-RV32-NEXT: add a0, a0, a2 -; CHECK-RV32-NEXT: vsetvli zero, a5, e64, m8, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a5, e64, m8, ta, ma ; CHECK-RV32-NEXT: vsse64.v v16, (a0), a1 ; CHECK-RV32-NEXT: ret ; @@ -704,7 +704,7 @@ ; CHECK-RV64-NEXT: mv a3, a4 ; CHECK-RV64-NEXT: .LBB35_2: ; CHECK-RV64-NEXT: li a5, 0 -; CHECK-RV64-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; CHECK-RV64-NEXT: sub a4, a2, a4 ; CHECK-RV64-NEXT: vsse64.v v8, (a0), a1 ; CHECK-RV64-NEXT: bltu a2, a4, .LBB35_4 @@ -713,7 +713,7 @@ ; CHECK-RV64-NEXT: .LBB35_4: ; CHECK-RV64-NEXT: mul a2, a3, a1 ; CHECK-RV64-NEXT: add a0, a0, a2 -; CHECK-RV64-NEXT: vsetvli zero, a5, e64, m8, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a5, e64, m8, ta, ma ; CHECK-RV64-NEXT: vsse64.v v16, (a0), a1 ; CHECK-RV64-NEXT: ret %one = insertelement poison, i1 true, i32 0 @@ -750,7 +750,7 @@ ; CHECK-RV32-NEXT: .LBB36_4: ; CHECK-RV32-NEXT: li t0, 0 ; CHECK-RV32-NEXT: vl8re64.v v16, (a0) -; CHECK-RV32-NEXT: vsetvli zero, a5, e64, m8, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a5, e64, m8, ta, ma ; CHECK-RV32-NEXT: vmv1r.v v0, v24 ; CHECK-RV32-NEXT: vsse64.v v8, (a1), a2, v0.t ; CHECK-RV32-NEXT: sub a7, a3, a7 @@ -759,7 +759,7 @@ ; CHECK-RV32-NEXT: # %bb.5: ; CHECK-RV32-NEXT: mv t0, a7 ; CHECK-RV32-NEXT: .LBB36_6: -; CHECK-RV32-NEXT: vsetvli a3, zero, e8, mf2, ta, mu +; CHECK-RV32-NEXT: vsetvli a3, zero, e8, mf2, ta, ma ; CHECK-RV32-NEXT: vslidedown.vx v0, v24, a0 ; CHECK-RV32-NEXT: bltu t0, a4, .LBB36_8 ; CHECK-RV32-NEXT: # %bb.7: @@ -768,7 +768,7 @@ ; CHECK-RV32-NEXT: li a0, 0 ; CHECK-RV32-NEXT: mul a3, a6, a2 ; CHECK-RV32-NEXT: add a7, a1, a3 -; CHECK-RV32-NEXT: vsetvli zero, t0, e64, m8, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, t0, e64, m8, ta, ma ; CHECK-RV32-NEXT: sub a3, a6, a4 ; CHECK-RV32-NEXT: vsse64.v v16, (a7), a2, v0.t ; CHECK-RV32-NEXT: bltu a6, a3, .LBB36_10 @@ -776,11 +776,11 @@ ; CHECK-RV32-NEXT: mv a0, a3 ; CHECK-RV32-NEXT: .LBB36_10: ; CHECK-RV32-NEXT: srli a3, a4, 3 -; CHECK-RV32-NEXT: vsetvli a4, zero, e8, mf4, ta, mu +; CHECK-RV32-NEXT: vsetvli a4, zero, e8, mf4, ta, ma ; CHECK-RV32-NEXT: vslidedown.vx v0, v24, a3 ; CHECK-RV32-NEXT: mul a3, a5, a2 ; CHECK-RV32-NEXT: add a1, a1, a3 -; CHECK-RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-RV32-NEXT: addi a0, sp, 16 ; CHECK-RV32-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload ; CHECK-RV32-NEXT: vsse64.v v8, (a1), a2, v0.t @@ -814,7 +814,7 @@ ; CHECK-RV64-NEXT: .LBB36_4: ; CHECK-RV64-NEXT: li t0, 0 ; CHECK-RV64-NEXT: vl8re64.v v16, (a0) -; CHECK-RV64-NEXT: vsetvli zero, a5, e64, m8, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a5, e64, m8, ta, ma ; CHECK-RV64-NEXT: vmv1r.v v0, v24 ; CHECK-RV64-NEXT: vsse64.v v8, (a1), a2, v0.t ; CHECK-RV64-NEXT: sub a7, a3, a7 @@ -823,7 +823,7 @@ ; CHECK-RV64-NEXT: # %bb.5: ; CHECK-RV64-NEXT: mv t0, a7 ; CHECK-RV64-NEXT: .LBB36_6: -; CHECK-RV64-NEXT: vsetvli a3, zero, e8, mf2, ta, mu +; CHECK-RV64-NEXT: vsetvli a3, zero, e8, mf2, ta, ma ; CHECK-RV64-NEXT: vslidedown.vx v0, v24, a0 ; CHECK-RV64-NEXT: bltu t0, a4, .LBB36_8 ; CHECK-RV64-NEXT: # %bb.7: @@ -832,7 +832,7 @@ ; CHECK-RV64-NEXT: li a0, 0 ; CHECK-RV64-NEXT: mul a3, a6, a2 ; CHECK-RV64-NEXT: add a7, a1, a3 -; CHECK-RV64-NEXT: vsetvli zero, t0, e64, m8, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, t0, e64, m8, ta, ma ; CHECK-RV64-NEXT: sub a3, a6, a4 ; CHECK-RV64-NEXT: vsse64.v v16, (a7), a2, v0.t ; CHECK-RV64-NEXT: bltu a6, a3, .LBB36_10 @@ -840,11 +840,11 @@ ; CHECK-RV64-NEXT: mv a0, a3 ; CHECK-RV64-NEXT: .LBB36_10: ; CHECK-RV64-NEXT: srli a3, a4, 3 -; CHECK-RV64-NEXT: vsetvli a4, zero, e8, mf4, ta, mu +; CHECK-RV64-NEXT: vsetvli a4, zero, e8, mf4, ta, ma ; CHECK-RV64-NEXT: vslidedown.vx v0, v24, a3 ; CHECK-RV64-NEXT: mul a3, a5, a2 ; CHECK-RV64-NEXT: add a1, a1, a3 -; CHECK-RV64-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-RV64-NEXT: addi a0, sp, 16 ; CHECK-RV64-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload ; CHECK-RV64-NEXT: vsse64.v v8, (a1), a2, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/umulo-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/umulo-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/umulo-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/umulo-sdnode.ll @@ -6,7 +6,7 @@ define @umulo_nxv1i8( %x, %y) { ; CHECK-LABEL: umulo_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmulhu.vv v10, v8, v9 ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vmul.vv v8, v8, v9 @@ -24,7 +24,7 @@ define @umulo_nxv2i8( %x, %y) { ; CHECK-LABEL: umulo_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmulhu.vv v10, v8, v9 ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vmul.vv v8, v8, v9 @@ -42,7 +42,7 @@ define @umulo_nxv4i8( %x, %y) { ; CHECK-LABEL: umulo_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmulhu.vv v10, v8, v9 ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vmul.vv v8, v8, v9 @@ -60,7 +60,7 @@ define @umulo_nxv8i8( %x, %y) { ; CHECK-LABEL: umulo_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmulhu.vv v10, v8, v9 ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vmul.vv v8, v8, v9 @@ -78,7 +78,7 @@ define @umulo_nxv16i8( %x, %y) { ; CHECK-LABEL: umulo_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmulhu.vv v12, v8, v10 ; CHECK-NEXT: vmsne.vi v0, v12, 0 ; CHECK-NEXT: vmul.vv v8, v8, v10 @@ -96,7 +96,7 @@ define @umulo_nxv32i8( %x, %y) { ; CHECK-LABEL: umulo_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vmulhu.vv v16, v8, v12 ; CHECK-NEXT: vmsne.vi v0, v16, 0 ; CHECK-NEXT: vmul.vv v8, v8, v12 @@ -114,7 +114,7 @@ define @umulo_nxv64i8( %x, %y) { ; CHECK-LABEL: umulo_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vmulhu.vv v24, v8, v16 ; CHECK-NEXT: vmsne.vi v0, v24, 0 ; CHECK-NEXT: vmul.vv v8, v8, v16 @@ -132,7 +132,7 @@ define @umulo_nxv1i16( %x, %y) { ; CHECK-LABEL: umulo_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vmulhu.vv v10, v8, v9 ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vmul.vv v8, v8, v9 @@ -150,7 +150,7 @@ define @umulo_nxv2i16( %x, %y) { ; CHECK-LABEL: umulo_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmulhu.vv v10, v8, v9 ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vmul.vv v8, v8, v9 @@ -168,7 +168,7 @@ define @umulo_nxv4i16( %x, %y) { ; CHECK-LABEL: umulo_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vmulhu.vv v10, v8, v9 ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vmul.vv v8, v8, v9 @@ -186,7 +186,7 @@ define @umulo_nxv8i16( %x, %y) { ; CHECK-LABEL: umulo_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmulhu.vv v12, v8, v10 ; CHECK-NEXT: vmsne.vi v0, v12, 0 ; CHECK-NEXT: vmul.vv v8, v8, v10 @@ -204,7 +204,7 @@ define @umulo_nxv16i16( %x, %y) { ; CHECK-LABEL: umulo_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vmulhu.vv v16, v8, v12 ; CHECK-NEXT: vmsne.vi v0, v16, 0 ; CHECK-NEXT: vmul.vv v8, v8, v12 @@ -222,7 +222,7 @@ define @umulo_nxv32i16( %x, %y) { ; CHECK-LABEL: umulo_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vmulhu.vv v24, v8, v16 ; CHECK-NEXT: vmsne.vi v0, v24, 0 ; CHECK-NEXT: vmul.vv v8, v8, v16 @@ -240,7 +240,7 @@ define @umulo_nxv1i32( %x, %y) { ; CHECK-LABEL: umulo_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmulhu.vv v10, v8, v9 ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vmul.vv v8, v8, v9 @@ -258,7 +258,7 @@ define @umulo_nxv2i32( %x, %y) { ; CHECK-LABEL: umulo_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vmulhu.vv v10, v8, v9 ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vmul.vv v8, v8, v9 @@ -276,7 +276,7 @@ define @umulo_nxv4i32( %x, %y) { ; CHECK-LABEL: umulo_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vmulhu.vv v12, v8, v10 ; CHECK-NEXT: vmsne.vi v0, v12, 0 ; CHECK-NEXT: vmul.vv v8, v8, v10 @@ -294,7 +294,7 @@ define @umulo_nxv8i32( %x, %y) { ; CHECK-LABEL: umulo_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmulhu.vv v16, v8, v12 ; CHECK-NEXT: vmsne.vi v0, v16, 0 ; CHECK-NEXT: vmul.vv v8, v8, v12 @@ -312,7 +312,7 @@ define @umulo_nxv16i32( %x, %y) { ; CHECK-LABEL: umulo_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vmulhu.vv v24, v8, v16 ; CHECK-NEXT: vmsne.vi v0, v24, 0 ; CHECK-NEXT: vmul.vv v8, v8, v16 @@ -330,7 +330,7 @@ define @umulo_nxv1i64( %x, %y) { ; CHECK-LABEL: umulo_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vmulhu.vv v10, v8, v9 ; CHECK-NEXT: vmsne.vi v0, v10, 0 ; CHECK-NEXT: vmul.vv v8, v8, v9 @@ -348,7 +348,7 @@ define @umulo_nxv2i64( %x, %y) { ; CHECK-LABEL: umulo_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vmulhu.vv v12, v8, v10 ; CHECK-NEXT: vmsne.vi v0, v12, 0 ; CHECK-NEXT: vmul.vv v8, v8, v10 @@ -366,7 +366,7 @@ define @umulo_nxv4i64( %x, %y) { ; CHECK-LABEL: umulo_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vmulhu.vv v16, v8, v12 ; CHECK-NEXT: vmsne.vi v0, v16, 0 ; CHECK-NEXT: vmul.vv v8, v8, v12 @@ -384,7 +384,7 @@ define @umulo_nxv8i64( %x, %y) { ; CHECK-LABEL: umulo_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmulhu.vv v24, v8, v16 ; CHECK-NEXT: vmsne.vi v0, v24, 0 ; CHECK-NEXT: vmul.vv v8, v8, v16 diff --git a/llvm/test/CodeGen/RISCV/rvv/unaligned-loads-stores.ll b/llvm/test/CodeGen/RISCV/rvv/unaligned-loads-stores.ll --- a/llvm/test/CodeGen/RISCV/rvv/unaligned-loads-stores.ll +++ b/llvm/test/CodeGen/RISCV/rvv/unaligned-loads-stores.ll @@ -7,7 +7,7 @@ define @unaligned_load_nxv1i32_a1(* %ptr) { ; CHECK-LABEL: unaligned_load_nxv1i32_a1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: ret %v = load , * %ptr, align 1 @@ -17,7 +17,7 @@ define @unaligned_load_nxv1i32_a2(* %ptr) { ; CHECK-LABEL: unaligned_load_nxv1i32_a2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: ret %v = load , * %ptr, align 2 @@ -27,7 +27,7 @@ define @aligned_load_nxv1i32_a4(* %ptr) { ; CHECK-LABEL: aligned_load_nxv1i32_a4: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: ret %v = load , * %ptr, align 4 @@ -92,7 +92,7 @@ define @unaligned_load_nxv1i1_a1(* %ptr) { ; CHECK-LABEL: unaligned_load_nxv1i1_a1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: ret %v = load , * %ptr, align 1 @@ -174,7 +174,7 @@ define void @unaligned_store_nxv1i16_a1( %x, * %ptr) { ; CHECK-LABEL: unaligned_store_nxv1i16_a1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret store %x, * %ptr, align 1 @@ -184,7 +184,7 @@ define void @aligned_store_nxv1i16_a2( %x, * %ptr) { ; CHECK-LABEL: aligned_store_nxv1i16_a2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret store %x, * %ptr, align 2 diff --git a/llvm/test/CodeGen/RISCV/rvv/unmasked-ta.ll b/llvm/test/CodeGen/RISCV/rvv/unmasked-ta.ll --- a/llvm/test/CodeGen/RISCV/rvv/unmasked-ta.ll +++ b/llvm/test/CodeGen/RISCV/rvv/unmasked-ta.ll @@ -14,7 +14,7 @@ define @intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfmacc.vv v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -37,7 +37,7 @@ define @intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -60,7 +60,7 @@ define @intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfmsac.vv v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -83,7 +83,7 @@ define @intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfmsub.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -106,7 +106,7 @@ define @intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfnmacc.vv v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -129,7 +129,7 @@ define @intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -152,7 +152,7 @@ define @intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfnmsac.vv v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -175,7 +175,7 @@ define @intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfnmsub.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfwmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -221,7 +221,7 @@ define @intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfwmsac.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -244,7 +244,7 @@ define @intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfwnmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -267,7 +267,7 @@ define @intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfwnmsac.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -294,7 +294,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vmacc.vv v8, v9, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -302,7 +302,7 @@ ; ; RV64-LABEL: intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vmacc.vx v8, a0, v9 ; RV64-NEXT: ret entry: @@ -329,7 +329,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vmadd.vv v8, v10, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -337,7 +337,7 @@ ; ; RV64-LABEL: intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vmadd.vx v8, a0, v9 ; RV64-NEXT: ret entry: @@ -364,7 +364,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vnmsac.vv v8, v9, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -372,7 +372,7 @@ ; ; RV64-LABEL: intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vnmsac.vx v8, a0, v9 ; RV64-NEXT: ret entry: @@ -399,7 +399,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vnmsub.vv v8, v10, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -407,7 +407,7 @@ ; ; RV64-LABEL: intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vnmsub.vx v8, a0, v9 ; RV64-NEXT: ret entry: @@ -430,7 +430,7 @@ define @intrinsic_vwmacc_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vwmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -453,7 +453,7 @@ define @intrinsic_vwmaccsu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vwmaccsu.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -476,7 +476,7 @@ define @intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vwmaccu.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -499,7 +499,7 @@ define @intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vwmaccus.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -521,7 +521,7 @@ define @intrinsic_vredsum_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -543,7 +543,7 @@ define @intrinsic_vredand_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -565,7 +565,7 @@ define @intrinsic_vredor_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -587,7 +587,7 @@ define @intrinsic_vredxor_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -609,7 +609,7 @@ define @intrinsic_vredminu_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -631,7 +631,7 @@ define @intrinsic_vredmin_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vredmin.vs v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -653,7 +653,7 @@ define @intrinsic_vredmaxu_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -675,7 +675,7 @@ define @intrinsic_vredmax_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -697,7 +697,7 @@ define @intrinsic_vwredsumu_vs_nxv4i16_nxv1i8_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv1i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -719,7 +719,7 @@ define @intrinsic_vwredsum_vs_nxv4i16_nxv1i8_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv1i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -741,7 +741,7 @@ define @intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfredosum.vs v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -763,7 +763,7 @@ define @intrinsic_vfredusum_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -785,7 +785,7 @@ define @intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -807,7 +807,7 @@ define @intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -829,7 +829,7 @@ define @intrinsic_vfwredosum_vs_nxv2f32_nxv1f16_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv1f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfwredosum.vs v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -850,7 +850,7 @@ define @intrinsic_vfwredusum_vs_nxv2f32_nxv1f16_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv1f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfwredusum.vs v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -873,7 +873,7 @@ define @intrinsic_vslidedown_vx_nxv1i8_nxv1i8( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -897,7 +897,7 @@ define @intrinsic_vslideup_vx_nxv1i8_nxv1i8( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -920,14 +920,14 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v8, (a0), zero ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: intrinsic_vmv.s.x_x_nxv1i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v8, a0 ; RV64-NEXT: ret entry: @@ -940,7 +940,7 @@ define @intrinsic_vfmv.s.f_f_nxv1f16(half %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: @@ -957,7 +957,7 @@ define @intrinsic_vcompress_um_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vcompress_um_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vcompress.vm v9, v8, v0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll --- a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll @@ -12,7 +12,7 @@ define @intrinsic_vle_v_tu_nxv1i8_nxv1i8( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vle_v_tu_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -34,7 +34,7 @@ define @intrinsic_vlse_v_tu( %0, * %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_tu: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, tu, ma ; CHECK-NEXT: vlse8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -55,7 +55,7 @@ define @intrinsic_vleff_v_tu( %0, * %1, iXLen %2, iXLen* %3) nounwind { ; RV32-LABEL: intrinsic_vleff_v_tu: ; RV32: # %bb.0: # %entry -; RV32-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; RV32-NEXT: vsetvli zero, a1, e8, mf8, tu, ma ; RV32-NEXT: vle8ff.v v8, (a0) ; RV32-NEXT: csrr a0, vl ; RV32-NEXT: sw a0, 0(a2) @@ -63,7 +63,7 @@ ; ; RV64-LABEL: intrinsic_vleff_v_tu: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; RV64-NEXT: vsetvli zero, a1, e8, mf8, tu, ma ; RV64-NEXT: vle8ff.v v8, (a0) ; RV64-NEXT: csrr a0, vl ; RV64-NEXT: sd a0, 0(a2) @@ -88,7 +88,7 @@ define @intrinsic_vloxei_v_tu_nxv1i8_nxv1i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_tu_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma ; CHECK-NEXT: vloxei8.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -110,7 +110,7 @@ define @intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vaadd.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -132,7 +132,7 @@ define @intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vaaddu.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -154,7 +154,7 @@ define @intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vadd.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -175,7 +175,7 @@ define @intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vand.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -197,7 +197,7 @@ define @intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vasub.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -219,7 +219,7 @@ define @intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vasubu.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -241,7 +241,7 @@ define @intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vdiv.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -263,7 +263,7 @@ define @intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vdivu.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -285,7 +285,7 @@ define @intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfadd.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -307,7 +307,7 @@ define @intrinsic_vfdiv_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfdiv.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -329,7 +329,7 @@ define @intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfmax.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -351,7 +351,7 @@ define @intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfmin.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -373,7 +373,7 @@ define @intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfmul.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -395,7 +395,7 @@ define @intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfrdiv.vf v8, v9, fa0 ; CHECK-NEXT: ret entry: @@ -417,7 +417,7 @@ define @intrinsic_vfsgnj_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfsgnj.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -439,7 +439,7 @@ define @intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfsgnjn.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -461,7 +461,7 @@ define @intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfsgnjx.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -483,7 +483,7 @@ define @intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfrsub.vf v8, v9, fa0 ; CHECK-NEXT: ret entry: @@ -505,7 +505,7 @@ define @intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfslide1down.vf v8, v9, fa0 ; CHECK-NEXT: ret entry: @@ -527,7 +527,7 @@ define @intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfslide1up.vf v8, v9, fa0 ; CHECK-NEXT: ret entry: @@ -549,7 +549,7 @@ define @intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfwsub.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -571,7 +571,7 @@ define @intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfwsub.wv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -594,7 +594,7 @@ ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vl4re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma ; CHECK-NEXT: vfwsub.wv v8, v16, v24 ; CHECK-NEXT: ret entry: @@ -616,7 +616,7 @@ define @intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfwmul.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -638,7 +638,7 @@ define @intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfwadd.wv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -660,7 +660,7 @@ define @intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfwadd.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -682,7 +682,7 @@ define @intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfsub.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -707,7 +707,7 @@ ; RV32: # %bb.0: # %entry ; RV32-NEXT: vsetvli a2, a2, e64, m1, ta, mu ; RV32-NEXT: slli a2, a2, 1 -; RV32-NEXT: vsetvli zero, a2, e32, m1, tu, mu +; RV32-NEXT: vsetvli zero, a2, e32, m1, tu, ma ; RV32-NEXT: vmv1r.v v10, v8 ; RV32-NEXT: vslide1down.vx v10, v9, a0 ; RV32-NEXT: vslide1down.vx v8, v10, a1 @@ -715,7 +715,7 @@ ; ; RV64-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma ; RV64-NEXT: vslide1down.vx v8, v9, a0 ; RV64-NEXT: ret entry: @@ -739,7 +739,7 @@ ; RV32: # %bb.0: # %entry ; RV32-NEXT: vsetvli a2, a2, e64, m1, ta, mu ; RV32-NEXT: slli a2, a2, 1 -; RV32-NEXT: vsetvli zero, a2, e32, m1, tu, mu +; RV32-NEXT: vsetvli zero, a2, e32, m1, tu, ma ; RV32-NEXT: vmv1r.v v10, v8 ; RV32-NEXT: vslide1up.vx v10, v9, a1 ; RV32-NEXT: vslide1up.vx v8, v10, a0 @@ -747,7 +747,7 @@ ; ; RV64-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma ; RV64-NEXT: vslide1up.vx v8, v9, a0 ; RV64-NEXT: ret entry: @@ -769,7 +769,7 @@ define @intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vmax.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -791,7 +791,7 @@ define @intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vmaxu.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -813,7 +813,7 @@ define @intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vmin.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -835,7 +835,7 @@ define @intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vminu.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -857,7 +857,7 @@ define @intrinsic_vmul_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vmul.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -879,7 +879,7 @@ define @intrinsic_vmulh_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vmulh.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -901,7 +901,7 @@ define @intrinsic_vmulhsu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vmulhsu.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -923,7 +923,7 @@ define @intrinsic_vmulhu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vmulhu.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -945,7 +945,7 @@ define @intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vnclip.wv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -967,7 +967,7 @@ define @intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vnclipu.wv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -989,7 +989,7 @@ define @intrinsic_vnsra_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vnsra.wv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -1011,7 +1011,7 @@ define @intrinsic_vnsrl_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vnsrl.wv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -1033,7 +1033,7 @@ define @intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vor.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -1055,7 +1055,7 @@ define @intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vrem.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -1082,7 +1082,7 @@ define @intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vrgather.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -1104,7 +1104,7 @@ define @intrinsic_vrgather_vx_nxv1i8_nxv1i8( %0, %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma ; CHECK-NEXT: vrgather.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -1126,7 +1126,7 @@ define @intrinsic_vrgatherei16_vv_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vrgatherei16.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -1152,16 +1152,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, ma ; RV32-NEXT: vsub.vv v8, v10, v9 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma ; RV64-NEXT: vrsub.vx v8, v9, a0 ; RV64-NEXT: ret entry: @@ -1187,16 +1187,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, ma ; RV32-NEXT: vsadd.vv v8, v9, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma ; RV64-NEXT: vsadd.vx v8, v9, a0 ; RV64-NEXT: ret entry: @@ -1218,7 +1218,7 @@ define @intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vsaddu.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -1240,7 +1240,7 @@ define @intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vsll.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -1262,7 +1262,7 @@ define @intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vsmul.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -1288,16 +1288,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, ma ; RV32-NEXT: vsmul.vv v8, v9, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma ; RV64-NEXT: vsmul.vx v8, v9, a0 ; RV64-NEXT: ret entry: @@ -1319,7 +1319,7 @@ define @intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vsra.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -1340,7 +1340,7 @@ define @intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vsrl.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -1362,7 +1362,7 @@ define @intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vssra.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -1384,7 +1384,7 @@ define @intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vssrl.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -1406,7 +1406,7 @@ define @intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vssub.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -1428,7 +1428,7 @@ define @intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vssubu.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -1454,16 +1454,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, ma ; RV32-NEXT: vssub.vv v8, v9, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: intrinsic_vssub_vx_nxv1i64_nxv1i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma ; RV64-NEXT: vssub.vx v8, v9, a0 ; RV64-NEXT: ret entry: @@ -1489,16 +1489,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, ma ; RV32-NEXT: vssubu.vv v8, v9, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma ; RV64-NEXT: vssubu.vx v8, v9, a0 ; RV64-NEXT: ret entry: @@ -1520,7 +1520,7 @@ define @intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vsub.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -1542,7 +1542,7 @@ define @intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vwadd.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -1564,7 +1564,7 @@ define @intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vwadd.wv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -1586,7 +1586,7 @@ define @intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vwaddu.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -1608,7 +1608,7 @@ define @intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vwmul.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -1630,7 +1630,7 @@ define @intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vwmulu.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -1652,7 +1652,7 @@ define @intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vwmulsu.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -1674,7 +1674,7 @@ define @intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vwsub.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -1696,7 +1696,7 @@ define @intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vwsub.wv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -1712,7 +1712,7 @@ define @intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8_tied( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8_tied: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vwsub.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -1734,7 +1734,7 @@ define @intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vwsubu.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -1756,7 +1756,7 @@ define @intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vwsubu.wv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -1778,7 +1778,7 @@ define @intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vxor.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -1799,7 +1799,7 @@ define @intrinsic_vsext_vf8_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vsext.vf8 v8, v9 ; CHECK-NEXT: ret entry: @@ -1819,7 +1819,7 @@ define @intrinsic_vzext_vf8_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vzext.vf8 v8, v9 ; CHECK-NEXT: ret entry: @@ -1839,7 +1839,7 @@ define @intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfncvt.x.f.w v8, v9 ; CHECK-NEXT: ret entry: @@ -1858,7 +1858,7 @@ define @intrinsic_vid_v_nxv1i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret entry: @@ -1877,7 +1877,7 @@ define @intrinsic_vfclass_v_nxv1i16_nxv1f16( ; CHECK-LABEL: intrinsic_vfclass_v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfclass.v v8, v9 ; CHECK-NEXT: ret %0, @@ -1900,7 +1900,7 @@ define @intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfcvt.f.x.v v8, v9 ; CHECK-NEXT: ret entry: @@ -1920,7 +1920,7 @@ define @intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfcvt.f.xu.v v8, v9 ; CHECK-NEXT: ret entry: @@ -1940,7 +1940,7 @@ define @intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v9 ; CHECK-NEXT: ret entry: @@ -1960,7 +1960,7 @@ define @intrinsic_vfcvt_rtz.xu.f.v_nxv1i16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v9 ; CHECK-NEXT: ret entry: @@ -1980,7 +1980,7 @@ define @intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfcvt.x.f.v v8, v9 ; CHECK-NEXT: ret entry: @@ -2000,7 +2000,7 @@ define @intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfncvt.f.f.w v8, v9 ; CHECK-NEXT: ret entry: @@ -2020,7 +2020,7 @@ define @intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfcvt.xu.f.v v8, v9 ; CHECK-NEXT: ret entry: @@ -2040,7 +2040,7 @@ define @intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfncvt.f.x.w v8, v9 ; CHECK-NEXT: ret entry: @@ -2060,7 +2060,7 @@ define @intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfncvt.f.xu.w v8, v9 ; CHECK-NEXT: ret entry: @@ -2080,7 +2080,7 @@ define @intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfncvt.rod.f.f.w v8, v9 ; CHECK-NEXT: ret entry: @@ -2100,7 +2100,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9 ; CHECK-NEXT: ret entry: @@ -2120,7 +2120,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v9 ; CHECK-NEXT: ret entry: @@ -2140,7 +2140,7 @@ define @intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vfncvt.x.f.w v8, v9 ; CHECK-NEXT: ret entry: @@ -2160,7 +2160,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vfncvt.xu.f.w v8, v9 ; CHECK-NEXT: ret entry: @@ -2180,7 +2180,7 @@ define @intrinsic_vfrec7_v_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfrec7.v v8, v9 ; CHECK-NEXT: ret entry: @@ -2200,7 +2200,7 @@ define @intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfrsqrt7.v v8, v9 ; CHECK-NEXT: ret entry: @@ -2220,7 +2220,7 @@ define @intrinsic_vfsqrt_v_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfsqrt.v v8, v9 ; CHECK-NEXT: ret entry: @@ -2240,7 +2240,7 @@ define @intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfwcvt.f.f.v v8, v9 ; CHECK-NEXT: ret entry: @@ -2260,7 +2260,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vfwcvt.f.x.v v8, v9 ; CHECK-NEXT: ret entry: @@ -2280,7 +2280,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9 ; CHECK-NEXT: ret entry: @@ -2300,7 +2300,7 @@ define @intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v9 ; CHECK-NEXT: ret entry: @@ -2320,7 +2320,7 @@ define @intrinsic_vfwcvt_rtz.xu.f.v_nxv1i32_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v9 ; CHECK-NEXT: ret entry: @@ -2340,7 +2340,7 @@ define @intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfwcvt.x.f.v v8, v9 ; CHECK-NEXT: ret entry: @@ -2360,7 +2360,7 @@ define @intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfwcvt.xu.f.v v8, v9 ; CHECK-NEXT: ret entry: @@ -2380,7 +2380,7 @@ define @intrinsic_viota_m_nxv1i8_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv1i8_nxv1i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: viota.m v8, v0 ; CHECK-NEXT: ret entry: @@ -2402,7 +2402,7 @@ define @intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vadc.vvm v8, v9, v10, v0 ; CHECK-NEXT: ret entry: @@ -2426,7 +2426,7 @@ define @intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vsbc.vvm v8, v9, v10, v0 ; CHECK-NEXT: ret entry: @@ -2450,7 +2450,7 @@ define @intrinsic_vmerge_vvm_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0 ; CHECK-NEXT: ret entry: @@ -2478,16 +2478,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v24, (a0), zero -; RV32-NEXT: vsetvli zero, zero, e64, m8, tu, mu +; RV32-NEXT: vsetvli zero, zero, e64, m8, tu, ma ; RV32-NEXT: vmerge.vvm v8, v16, v24, v0 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: intrinsic_vmerge_vxm_nxv8i64_nxv8i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, tu, ma ; RV64-NEXT: vmerge.vxm v8, v16, a0, v0 ; RV64-NEXT: ret entry: @@ -2510,9 +2510,9 @@ ; RV32-NEXT: li a1, -1 ; RV32-NEXT: sw a1, 8(sp) ; RV32-NEXT: addi a1, sp, 8 -; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v24, (a1), zero -; RV32-NEXT: vsetvli zero, zero, e64, m8, tu, mu +; RV32-NEXT: vsetvli zero, zero, e64, m8, tu, ma ; RV32-NEXT: vmerge.vvm v8, v16, v24, v0 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret @@ -2521,7 +2521,7 @@ ; RV64: # %bb.0: # %entry ; RV64-NEXT: li a1, -1 ; RV64-NEXT: srli a1, a1, 28 -; RV64-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; RV64-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; RV64-NEXT: vmerge.vxm v8, v16, a1, v0 ; RV64-NEXT: ret entry: @@ -2545,7 +2545,7 @@ define @intrinsic_vfmerge_vfm_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vfmerge.vfm v8, v16, fa0, v0 ; CHECK-NEXT: ret entry: @@ -2569,7 +2569,7 @@ define @intrinsic_vfmerge_vvm_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0 ; CHECK-NEXT: ret entry: @@ -2593,7 +2593,7 @@ define @intrinsic_vfmerge_vzm_nxv1f16_nxv1f16_f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vmerge.vim v8, v9, 0, v0 ; CHECK-NEXT: ret entry: @@ -2615,7 +2615,7 @@ define @intrinsic_vmv.v.v_v_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: @@ -2635,7 +2635,7 @@ define @intrinsic_vmv.v.v_v_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: @@ -2659,14 +2659,14 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, ma ; RV32-NEXT: vlse64.v v8, (a0), zero ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: intrinsic_vmv.v.x_x_nxv1i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma ; RV64-NEXT: vmv.v.x v8, a0 ; RV64-NEXT: ret entry: @@ -2686,7 +2686,7 @@ define @intrinsic_vfmv.v.f_f_nxv1f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfmv.v.f v8, fa0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/urem-seteq-vec.ll b/llvm/test/CodeGen/RISCV/rvv/urem-seteq-vec.ll --- a/llvm/test/CodeGen/RISCV/rvv/urem-seteq-vec.ll +++ b/llvm/test/CodeGen/RISCV/rvv/urem-seteq-vec.ll @@ -7,7 +7,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 1048571 ; RV32-NEXT: addi a0, a0, -1365 -; RV32-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: vsll.vi v9, v8, 15 ; RV32-NEXT: vsrl.vi v8, v8, 1 @@ -23,7 +23,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 1048571 ; RV64-NEXT: addiw a0, a0, -1365 -; RV64-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; RV64-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: vsll.vi v9, v8, 15 ; RV64-NEXT: vsrl.vi v8, v8, 1 @@ -49,7 +49,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 1048573 ; RV32-NEXT: addi a0, a0, -819 -; RV32-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; RV32-NEXT: vmul.vx v8, v8, a0 ; RV32-NEXT: lui a0, 3 ; RV32-NEXT: addi a0, a0, 819 @@ -62,7 +62,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 1048573 ; RV64-NEXT: addiw a0, a0, -819 -; RV64-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; RV64-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: lui a0, 3 ; RV64-NEXT: addiw a0, a0, 819 @@ -84,7 +84,7 @@ ; RV32-LABEL: test_urem_vec_even_divisor_eq1: ; RV32: # %bb.0: ; RV32-NEXT: li a0, 1 -; RV32-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; RV32-NEXT: vsub.vx v8, v8, a0 ; RV32-NEXT: lui a0, 1048571 ; RV32-NEXT: addi a0, a0, -1365 @@ -102,7 +102,7 @@ ; RV64-LABEL: test_urem_vec_even_divisor_eq1: ; RV64: # %bb.0: ; RV64-NEXT: li a0, 1 -; RV64-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; RV64-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; RV64-NEXT: vsub.vx v8, v8, a0 ; RV64-NEXT: lui a0, 1048571 ; RV64-NEXT: addiw a0, a0, -1365 @@ -130,7 +130,7 @@ ; RV32-LABEL: test_urem_vec_odd_divisor_eq1: ; RV32: # %bb.0: ; RV32-NEXT: li a0, 1 -; RV32-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; RV32-NEXT: vsub.vx v8, v8, a0 ; RV32-NEXT: lui a0, 1048573 ; RV32-NEXT: addi a0, a0, -819 @@ -145,7 +145,7 @@ ; RV64-LABEL: test_urem_vec_odd_divisor_eq1: ; RV64: # %bb.0: ; RV64-NEXT: li a0, 1 -; RV64-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; RV64-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; RV64-NEXT: vsub.vx v8, v8, a0 ; RV64-NEXT: lui a0, 1048573 ; RV64-NEXT: addiw a0, a0, -819 diff --git a/llvm/test/CodeGen/RISCV/rvv/vaadd.ll b/llvm/test/CodeGen/RISCV/rvv/vaadd.ll --- a/llvm/test/CodeGen/RISCV/rvv/vaadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaadd.ll @@ -12,7 +12,7 @@ define @intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vaadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -58,7 +58,7 @@ define @intrinsic_vaadd_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vaadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vaadd_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vaadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -150,7 +150,7 @@ define @intrinsic_vaadd_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vaadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -196,7 +196,7 @@ define @intrinsic_vaadd_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vaadd.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -242,7 +242,7 @@ define @intrinsic_vaadd_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vaadd.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -288,7 +288,7 @@ define @intrinsic_vaadd_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vaadd.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -335,7 +335,7 @@ define @intrinsic_vaadd_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vaadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -381,7 +381,7 @@ define @intrinsic_vaadd_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vaadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -427,7 +427,7 @@ define @intrinsic_vaadd_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vaadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -473,7 +473,7 @@ define @intrinsic_vaadd_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vaadd.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -519,7 +519,7 @@ define @intrinsic_vaadd_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vaadd.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -565,7 +565,7 @@ define @intrinsic_vaadd_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vaadd.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -612,7 +612,7 @@ define @intrinsic_vaadd_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vaadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -658,7 +658,7 @@ define @intrinsic_vaadd_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vaadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -704,7 +704,7 @@ define @intrinsic_vaadd_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vaadd.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -750,7 +750,7 @@ define @intrinsic_vaadd_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vaadd.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -796,7 +796,7 @@ define @intrinsic_vaadd_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vaadd.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -843,7 +843,7 @@ define @intrinsic_vaadd_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vaadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -889,7 +889,7 @@ define @intrinsic_vaadd_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vaadd.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -935,7 +935,7 @@ define @intrinsic_vaadd_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vaadd.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -981,7 +981,7 @@ define @intrinsic_vaadd_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vaadd.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1028,7 +1028,7 @@ define @intrinsic_vaadd_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vaadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1074,7 +1074,7 @@ define @intrinsic_vaadd_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vaadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1120,7 +1120,7 @@ define @intrinsic_vaadd_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vaadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1166,7 +1166,7 @@ define @intrinsic_vaadd_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vaadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1212,7 +1212,7 @@ define @intrinsic_vaadd_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vaadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1258,7 +1258,7 @@ define @intrinsic_vaadd_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vaadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1304,7 +1304,7 @@ define @intrinsic_vaadd_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vaadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1350,7 +1350,7 @@ define @intrinsic_vaadd_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vaadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1396,7 +1396,7 @@ define @intrinsic_vaadd_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vaadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1442,7 +1442,7 @@ define @intrinsic_vaadd_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vaadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1488,7 +1488,7 @@ define @intrinsic_vaadd_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vaadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1534,7 +1534,7 @@ define @intrinsic_vaadd_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vaadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1580,7 +1580,7 @@ define @intrinsic_vaadd_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vaadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1626,7 +1626,7 @@ define @intrinsic_vaadd_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vaadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1672,7 +1672,7 @@ define @intrinsic_vaadd_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vaadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1718,7 +1718,7 @@ define @intrinsic_vaadd_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vaadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1764,7 +1764,7 @@ define @intrinsic_vaadd_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vaadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1810,7 +1810,7 @@ define @intrinsic_vaadd_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaadd_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vaadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1860,7 +1860,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vaadd.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -1868,7 +1868,7 @@ ; ; RV64-LABEL: intrinsic_vaadd_vx_nxv1i64_nxv1i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vaadd.vx v8, v8, a0 ; RV64-NEXT: ret entry: @@ -1930,7 +1930,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vaadd.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -1938,7 +1938,7 @@ ; ; RV64-LABEL: intrinsic_vaadd_vx_nxv2i64_nxv2i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vaadd.vx v8, v8, a0 ; RV64-NEXT: ret entry: @@ -2000,7 +2000,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vaadd.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -2008,7 +2008,7 @@ ; ; RV64-LABEL: intrinsic_vaadd_vx_nxv4i64_nxv4i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vaadd.vx v8, v8, a0 ; RV64-NEXT: ret entry: @@ -2070,7 +2070,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vaadd.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -2078,7 +2078,7 @@ ; ; RV64-LABEL: intrinsic_vaadd_vx_nxv8i64_nxv8i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vaadd.vx v8, v8, a0 ; RV64-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vaaddu.ll b/llvm/test/CodeGen/RISCV/rvv/vaaddu.ll --- a/llvm/test/CodeGen/RISCV/rvv/vaaddu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaaddu.ll @@ -12,7 +12,7 @@ define @intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vaaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -58,7 +58,7 @@ define @intrinsic_vaaddu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vaaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vaaddu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vaaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -150,7 +150,7 @@ define @intrinsic_vaaddu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vaaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -196,7 +196,7 @@ define @intrinsic_vaaddu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vaaddu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -242,7 +242,7 @@ define @intrinsic_vaaddu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vaaddu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -288,7 +288,7 @@ define @intrinsic_vaaddu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vaaddu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -335,7 +335,7 @@ define @intrinsic_vaaddu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vaaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -381,7 +381,7 @@ define @intrinsic_vaaddu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vaaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -427,7 +427,7 @@ define @intrinsic_vaaddu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vaaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -473,7 +473,7 @@ define @intrinsic_vaaddu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vaaddu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -519,7 +519,7 @@ define @intrinsic_vaaddu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vaaddu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -565,7 +565,7 @@ define @intrinsic_vaaddu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vaaddu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -612,7 +612,7 @@ define @intrinsic_vaaddu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vaaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -658,7 +658,7 @@ define @intrinsic_vaaddu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vaaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -704,7 +704,7 @@ define @intrinsic_vaaddu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vaaddu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -750,7 +750,7 @@ define @intrinsic_vaaddu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vaaddu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -796,7 +796,7 @@ define @intrinsic_vaaddu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vaaddu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -843,7 +843,7 @@ define @intrinsic_vaaddu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vaaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -889,7 +889,7 @@ define @intrinsic_vaaddu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vaaddu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -935,7 +935,7 @@ define @intrinsic_vaaddu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vaaddu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -981,7 +981,7 @@ define @intrinsic_vaaddu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vaaddu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1028,7 +1028,7 @@ define @intrinsic_vaaddu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vaaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1074,7 +1074,7 @@ define @intrinsic_vaaddu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vaaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1120,7 +1120,7 @@ define @intrinsic_vaaddu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vaaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1166,7 +1166,7 @@ define @intrinsic_vaaddu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vaaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1212,7 +1212,7 @@ define @intrinsic_vaaddu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vaaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1258,7 +1258,7 @@ define @intrinsic_vaaddu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vaaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1304,7 +1304,7 @@ define @intrinsic_vaaddu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vaaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1350,7 +1350,7 @@ define @intrinsic_vaaddu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vaaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1396,7 +1396,7 @@ define @intrinsic_vaaddu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vaaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1442,7 +1442,7 @@ define @intrinsic_vaaddu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vaaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1488,7 +1488,7 @@ define @intrinsic_vaaddu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vaaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1534,7 +1534,7 @@ define @intrinsic_vaaddu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vaaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1580,7 +1580,7 @@ define @intrinsic_vaaddu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vaaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1626,7 +1626,7 @@ define @intrinsic_vaaddu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vaaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1672,7 +1672,7 @@ define @intrinsic_vaaddu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vaaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1718,7 +1718,7 @@ define @intrinsic_vaaddu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vaaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1764,7 +1764,7 @@ define @intrinsic_vaaddu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vaaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1810,7 +1810,7 @@ define @intrinsic_vaaddu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vaaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1860,7 +1860,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vaaddu.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -1868,7 +1868,7 @@ ; ; RV64-LABEL: intrinsic_vaaddu_vx_nxv1i64_nxv1i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vaaddu.vx v8, v8, a0 ; RV64-NEXT: ret entry: @@ -1930,7 +1930,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vaaddu.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -1938,7 +1938,7 @@ ; ; RV64-LABEL: intrinsic_vaaddu_vx_nxv2i64_nxv2i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vaaddu.vx v8, v8, a0 ; RV64-NEXT: ret entry: @@ -2000,7 +2000,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vaaddu.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -2008,7 +2008,7 @@ ; ; RV64-LABEL: intrinsic_vaaddu_vx_nxv4i64_nxv4i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vaaddu.vx v8, v8, a0 ; RV64-NEXT: ret entry: @@ -2070,7 +2070,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vaaddu.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -2078,7 +2078,7 @@ ; ; RV64-LABEL: intrinsic_vaaddu_vx_nxv8i64_nxv8i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vaaddu.vx v8, v8, a0 ; RV64-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll @@ -11,7 +11,7 @@ define @intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vadc.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -35,7 +35,7 @@ define @intrinsic_vadc_vvm_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vadc.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -59,7 +59,7 @@ define @intrinsic_vadc_vvm_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vadc.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -83,7 +83,7 @@ define @intrinsic_vadc_vvm_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vadc.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -107,7 +107,7 @@ define @intrinsic_vadc_vvm_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vadc.vvm v8, v8, v10, v0 ; CHECK-NEXT: ret entry: @@ -131,7 +131,7 @@ define @intrinsic_vadc_vvm_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vadc.vvm v8, v8, v12, v0 ; CHECK-NEXT: ret entry: @@ -155,7 +155,7 @@ define @intrinsic_vadc_vvm_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vadc.vvm v8, v8, v16, v0 ; CHECK-NEXT: ret entry: @@ -179,7 +179,7 @@ define @intrinsic_vadc_vvm_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vadc.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -203,7 +203,7 @@ define @intrinsic_vadc_vvm_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vadc.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -227,7 +227,7 @@ define @intrinsic_vadc_vvm_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vadc.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -251,7 +251,7 @@ define @intrinsic_vadc_vvm_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vadc.vvm v8, v8, v10, v0 ; CHECK-NEXT: ret entry: @@ -275,7 +275,7 @@ define @intrinsic_vadc_vvm_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vadc.vvm v8, v8, v12, v0 ; CHECK-NEXT: ret entry: @@ -299,7 +299,7 @@ define @intrinsic_vadc_vvm_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vadc.vvm v8, v8, v16, v0 ; CHECK-NEXT: ret entry: @@ -323,7 +323,7 @@ define @intrinsic_vadc_vvm_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vadc.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -347,7 +347,7 @@ define @intrinsic_vadc_vvm_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vadc.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -371,7 +371,7 @@ define @intrinsic_vadc_vvm_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vadc.vvm v8, v8, v10, v0 ; CHECK-NEXT: ret entry: @@ -395,7 +395,7 @@ define @intrinsic_vadc_vvm_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vadc.vvm v8, v8, v12, v0 ; CHECK-NEXT: ret entry: @@ -419,7 +419,7 @@ define @intrinsic_vadc_vvm_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vadc.vvm v8, v8, v16, v0 ; CHECK-NEXT: ret entry: @@ -443,7 +443,7 @@ define @intrinsic_vadc_vvm_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vadc.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -467,7 +467,7 @@ define @intrinsic_vadc_vvm_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vadc.vvm v8, v8, v10, v0 ; CHECK-NEXT: ret entry: @@ -491,7 +491,7 @@ define @intrinsic_vadc_vvm_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vadc.vvm v8, v8, v12, v0 ; CHECK-NEXT: ret entry: @@ -515,7 +515,7 @@ define @intrinsic_vadc_vvm_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vadc.vvm v8, v8, v16, v0 ; CHECK-NEXT: ret entry: @@ -539,7 +539,7 @@ define @intrinsic_vadc_vxm_nxv1i8_nxv1i8_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -563,7 +563,7 @@ define @intrinsic_vadc_vxm_nxv2i8_nxv2i8_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -587,7 +587,7 @@ define @intrinsic_vadc_vxm_nxv4i8_nxv4i8_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -611,7 +611,7 @@ define @intrinsic_vadc_vxm_nxv8i8_nxv8i8_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -635,7 +635,7 @@ define @intrinsic_vadc_vxm_nxv16i8_nxv16i8_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -659,7 +659,7 @@ define @intrinsic_vadc_vxm_nxv32i8_nxv32i8_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -683,7 +683,7 @@ define @intrinsic_vadc_vxm_nxv64i8_nxv64i8_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -707,7 +707,7 @@ define @intrinsic_vadc_vxm_nxv1i16_nxv1i16_i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -731,7 +731,7 @@ define @intrinsic_vadc_vxm_nxv2i16_nxv2i16_i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -755,7 +755,7 @@ define @intrinsic_vadc_vxm_nxv4i16_nxv4i16_i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -779,7 +779,7 @@ define @intrinsic_vadc_vxm_nxv8i16_nxv8i16_i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -803,7 +803,7 @@ define @intrinsic_vadc_vxm_nxv16i16_nxv16i16_i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -827,7 +827,7 @@ define @intrinsic_vadc_vxm_nxv32i16_nxv32i16_i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -851,7 +851,7 @@ define @intrinsic_vadc_vxm_nxv1i32_nxv1i32_i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -875,7 +875,7 @@ define @intrinsic_vadc_vxm_nxv2i32_nxv2i32_i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -899,7 +899,7 @@ define @intrinsic_vadc_vxm_nxv4i32_nxv4i32_i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -923,7 +923,7 @@ define @intrinsic_vadc_vxm_nxv8i32_nxv8i32_i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -947,7 +947,7 @@ define @intrinsic_vadc_vxm_nxv16i32_nxv16i32_i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -975,7 +975,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vadc.vvm v8, v8, v9, v0 ; CHECK-NEXT: addi sp, sp, 16 @@ -1005,7 +1005,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vadc.vvm v8, v8, v10, v0 ; CHECK-NEXT: addi sp, sp, 16 @@ -1035,7 +1035,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vadc.vvm v8, v8, v12, v0 ; CHECK-NEXT: addi sp, sp, 16 @@ -1065,7 +1065,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vadc.vvm v8, v8, v16, v0 ; CHECK-NEXT: addi sp, sp, 16 @@ -1084,7 +1084,7 @@ define @intrinsic_vadc_vim_nxv1i8_nxv1i8_i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vadc.vim v8, v8, -9, v0 ; CHECK-NEXT: ret entry: @@ -1101,7 +1101,7 @@ define @intrinsic_vadc_vim_nxv2i8_nxv2i8_i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vadc.vim v8, v8, 9, v0 ; CHECK-NEXT: ret entry: @@ -1118,7 +1118,7 @@ define @intrinsic_vadc_vim_nxv4i8_nxv4i8_i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vadc.vim v8, v8, -9, v0 ; CHECK-NEXT: ret entry: @@ -1135,7 +1135,7 @@ define @intrinsic_vadc_vim_nxv8i8_nxv8i8_i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vadc.vim v8, v8, 9, v0 ; CHECK-NEXT: ret entry: @@ -1152,7 +1152,7 @@ define @intrinsic_vadc_vim_nxv16i8_nxv16i8_i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vadc.vim v8, v8, -9, v0 ; CHECK-NEXT: ret entry: @@ -1169,7 +1169,7 @@ define @intrinsic_vadc_vim_nxv32i8_nxv32i8_i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vadc.vim v8, v8, 9, v0 ; CHECK-NEXT: ret entry: @@ -1186,7 +1186,7 @@ define @intrinsic_vadc_vim_nxv64i8_nxv64i8_i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vadc.vim v8, v8, -9, v0 ; CHECK-NEXT: ret entry: @@ -1203,7 +1203,7 @@ define @intrinsic_vadc_vim_nxv1i16_nxv1i16_i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vadc.vim v8, v8, 9, v0 ; CHECK-NEXT: ret entry: @@ -1220,7 +1220,7 @@ define @intrinsic_vadc_vim_nxv2i16_nxv2i16_i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vadc.vim v8, v8, -9, v0 ; CHECK-NEXT: ret entry: @@ -1237,7 +1237,7 @@ define @intrinsic_vadc_vim_nxv4i16_nxv4i16_i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vadc.vim v8, v8, 9, v0 ; CHECK-NEXT: ret entry: @@ -1254,7 +1254,7 @@ define @intrinsic_vadc_vim_nxv8i16_nxv8i16_i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vadc.vim v8, v8, -9, v0 ; CHECK-NEXT: ret entry: @@ -1271,7 +1271,7 @@ define @intrinsic_vadc_vim_nxv16i16_nxv16i16_i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vadc.vim v8, v8, 9, v0 ; CHECK-NEXT: ret entry: @@ -1288,7 +1288,7 @@ define @intrinsic_vadc_vim_nxv32i16_nxv32i16_i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vadc.vim v8, v8, -9, v0 ; CHECK-NEXT: ret entry: @@ -1305,7 +1305,7 @@ define @intrinsic_vadc_vim_nxv1i32_nxv1i32_i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vadc.vim v8, v8, 9, v0 ; CHECK-NEXT: ret entry: @@ -1322,7 +1322,7 @@ define @intrinsic_vadc_vim_nxv2i32_nxv2i32_i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vadc.vim v8, v8, -9, v0 ; CHECK-NEXT: ret entry: @@ -1339,7 +1339,7 @@ define @intrinsic_vadc_vim_nxv4i32_nxv4i32_i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vadc.vim v8, v8, 9, v0 ; CHECK-NEXT: ret entry: @@ -1356,7 +1356,7 @@ define @intrinsic_vadc_vim_nxv8i32_nxv8i32_i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vadc.vim v8, v8, -9, v0 ; CHECK-NEXT: ret entry: @@ -1373,7 +1373,7 @@ define @intrinsic_vadc_vim_nxv16i32_nxv16i32_i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vadc.vim v8, v8, 9, v0 ; CHECK-NEXT: ret entry: @@ -1390,7 +1390,7 @@ define @intrinsic_vadc_vim_nxv1i64_nxv1i64_i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vadc.vim v8, v8, 9, v0 ; CHECK-NEXT: ret entry: @@ -1407,7 +1407,7 @@ define @intrinsic_vadc_vim_nxv2i64_nxv2i64_i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vadc.vim v8, v8, -9, v0 ; CHECK-NEXT: ret entry: @@ -1424,7 +1424,7 @@ define @intrinsic_vadc_vim_nxv4i64_nxv4i64_i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vadc.vim v8, v8, 9, v0 ; CHECK-NEXT: ret entry: @@ -1441,7 +1441,7 @@ define @intrinsic_vadc_vim_nxv8i64_nxv8i64_i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vadc.vim v8, v8, -9, v0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vadc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vadc-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vadc-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadc-rv64.ll @@ -11,7 +11,7 @@ define @intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vadc.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -35,7 +35,7 @@ define @intrinsic_vadc_vvm_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vadc.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -59,7 +59,7 @@ define @intrinsic_vadc_vvm_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vadc.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -83,7 +83,7 @@ define @intrinsic_vadc_vvm_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vadc.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -107,7 +107,7 @@ define @intrinsic_vadc_vvm_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vadc.vvm v8, v8, v10, v0 ; CHECK-NEXT: ret entry: @@ -131,7 +131,7 @@ define @intrinsic_vadc_vvm_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vadc.vvm v8, v8, v12, v0 ; CHECK-NEXT: ret entry: @@ -155,7 +155,7 @@ define @intrinsic_vadc_vvm_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vadc.vvm v8, v8, v16, v0 ; CHECK-NEXT: ret entry: @@ -179,7 +179,7 @@ define @intrinsic_vadc_vvm_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vadc.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -203,7 +203,7 @@ define @intrinsic_vadc_vvm_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vadc.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -227,7 +227,7 @@ define @intrinsic_vadc_vvm_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vadc.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -251,7 +251,7 @@ define @intrinsic_vadc_vvm_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vadc.vvm v8, v8, v10, v0 ; CHECK-NEXT: ret entry: @@ -275,7 +275,7 @@ define @intrinsic_vadc_vvm_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vadc.vvm v8, v8, v12, v0 ; CHECK-NEXT: ret entry: @@ -299,7 +299,7 @@ define @intrinsic_vadc_vvm_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vadc.vvm v8, v8, v16, v0 ; CHECK-NEXT: ret entry: @@ -323,7 +323,7 @@ define @intrinsic_vadc_vvm_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vadc.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -347,7 +347,7 @@ define @intrinsic_vadc_vvm_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vadc.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -371,7 +371,7 @@ define @intrinsic_vadc_vvm_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vadc.vvm v8, v8, v10, v0 ; CHECK-NEXT: ret entry: @@ -395,7 +395,7 @@ define @intrinsic_vadc_vvm_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vadc.vvm v8, v8, v12, v0 ; CHECK-NEXT: ret entry: @@ -419,7 +419,7 @@ define @intrinsic_vadc_vvm_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vadc.vvm v8, v8, v16, v0 ; CHECK-NEXT: ret entry: @@ -443,7 +443,7 @@ define @intrinsic_vadc_vvm_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vadc.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -467,7 +467,7 @@ define @intrinsic_vadc_vvm_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vadc.vvm v8, v8, v10, v0 ; CHECK-NEXT: ret entry: @@ -491,7 +491,7 @@ define @intrinsic_vadc_vvm_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vadc.vvm v8, v8, v12, v0 ; CHECK-NEXT: ret entry: @@ -515,7 +515,7 @@ define @intrinsic_vadc_vvm_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vadc.vvm v8, v8, v16, v0 ; CHECK-NEXT: ret entry: @@ -539,7 +539,7 @@ define @intrinsic_vadc_vxm_nxv1i8_nxv1i8_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -563,7 +563,7 @@ define @intrinsic_vadc_vxm_nxv2i8_nxv2i8_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -587,7 +587,7 @@ define @intrinsic_vadc_vxm_nxv4i8_nxv4i8_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -611,7 +611,7 @@ define @intrinsic_vadc_vxm_nxv8i8_nxv8i8_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -635,7 +635,7 @@ define @intrinsic_vadc_vxm_nxv16i8_nxv16i8_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -659,7 +659,7 @@ define @intrinsic_vadc_vxm_nxv32i8_nxv32i8_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -683,7 +683,7 @@ define @intrinsic_vadc_vxm_nxv64i8_nxv64i8_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -707,7 +707,7 @@ define @intrinsic_vadc_vxm_nxv1i16_nxv1i16_i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -731,7 +731,7 @@ define @intrinsic_vadc_vxm_nxv2i16_nxv2i16_i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -755,7 +755,7 @@ define @intrinsic_vadc_vxm_nxv4i16_nxv4i16_i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -779,7 +779,7 @@ define @intrinsic_vadc_vxm_nxv8i16_nxv8i16_i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -803,7 +803,7 @@ define @intrinsic_vadc_vxm_nxv16i16_nxv16i16_i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -827,7 +827,7 @@ define @intrinsic_vadc_vxm_nxv32i16_nxv32i16_i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -851,7 +851,7 @@ define @intrinsic_vadc_vxm_nxv1i32_nxv1i32_i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -875,7 +875,7 @@ define @intrinsic_vadc_vxm_nxv2i32_nxv2i32_i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -899,7 +899,7 @@ define @intrinsic_vadc_vxm_nxv4i32_nxv4i32_i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -923,7 +923,7 @@ define @intrinsic_vadc_vxm_nxv8i32_nxv8i32_i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -947,7 +947,7 @@ define @intrinsic_vadc_vxm_nxv16i32_nxv16i32_i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -971,7 +971,7 @@ define @intrinsic_vadc_vxm_nxv1i64_nxv1i64_i64( %0, i64 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -995,7 +995,7 @@ define @intrinsic_vadc_vxm_nxv2i64_nxv2i64_i64( %0, i64 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -1019,7 +1019,7 @@ define @intrinsic_vadc_vxm_nxv4i64_nxv4i64_i64( %0, i64 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -1043,7 +1043,7 @@ define @intrinsic_vadc_vxm_nxv8i64_nxv8i64_i64( %0, i64 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vadc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -1060,7 +1060,7 @@ define @intrinsic_vadc_vim_nxv1i8_nxv1i8_i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vadc.vim v8, v8, 9, v0 ; CHECK-NEXT: ret entry: @@ -1077,7 +1077,7 @@ define @intrinsic_vadc_vim_nxv2i8_nxv2i8_i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vadc.vim v8, v8, -9, v0 ; CHECK-NEXT: ret entry: @@ -1094,7 +1094,7 @@ define @intrinsic_vadc_vim_nxv4i8_nxv4i8_i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vadc.vim v8, v8, 9, v0 ; CHECK-NEXT: ret entry: @@ -1111,7 +1111,7 @@ define @intrinsic_vadc_vim_nxv8i8_nxv8i8_i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vadc.vim v8, v8, -9, v0 ; CHECK-NEXT: ret entry: @@ -1128,7 +1128,7 @@ define @intrinsic_vadc_vim_nxv16i8_nxv16i8_i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vadc.vim v8, v8, 9, v0 ; CHECK-NEXT: ret entry: @@ -1145,7 +1145,7 @@ define @intrinsic_vadc_vim_nxv32i8_nxv32i8_i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vadc.vim v8, v8, -9, v0 ; CHECK-NEXT: ret entry: @@ -1162,7 +1162,7 @@ define @intrinsic_vadc_vim_nxv64i8_nxv64i8_i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vadc.vim v8, v8, 9, v0 ; CHECK-NEXT: ret entry: @@ -1179,7 +1179,7 @@ define @intrinsic_vadc_vim_nxv1i16_nxv1i16_i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vadc.vim v8, v8, -9, v0 ; CHECK-NEXT: ret entry: @@ -1196,7 +1196,7 @@ define @intrinsic_vadc_vim_nxv2i16_nxv2i16_i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vadc.vim v8, v8, 9, v0 ; CHECK-NEXT: ret entry: @@ -1213,7 +1213,7 @@ define @intrinsic_vadc_vim_nxv4i16_nxv4i16_i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vadc.vim v8, v8, -9, v0 ; CHECK-NEXT: ret entry: @@ -1230,7 +1230,7 @@ define @intrinsic_vadc_vim_nxv8i16_nxv8i16_i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vadc.vim v8, v8, 9, v0 ; CHECK-NEXT: ret entry: @@ -1247,7 +1247,7 @@ define @intrinsic_vadc_vim_nxv16i16_nxv16i16_i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vadc.vim v8, v8, -9, v0 ; CHECK-NEXT: ret entry: @@ -1264,7 +1264,7 @@ define @intrinsic_vadc_vim_nxv32i16_nxv32i16_i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vadc.vim v8, v8, 9, v0 ; CHECK-NEXT: ret entry: @@ -1281,7 +1281,7 @@ define @intrinsic_vadc_vim_nxv1i32_nxv1i32_i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vadc.vim v8, v8, -9, v0 ; CHECK-NEXT: ret entry: @@ -1298,7 +1298,7 @@ define @intrinsic_vadc_vim_nxv2i32_nxv2i32_i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vadc.vim v8, v8, 9, v0 ; CHECK-NEXT: ret entry: @@ -1315,7 +1315,7 @@ define @intrinsic_vadc_vim_nxv4i32_nxv4i32_i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vadc.vim v8, v8, -9, v0 ; CHECK-NEXT: ret entry: @@ -1332,7 +1332,7 @@ define @intrinsic_vadc_vim_nxv8i32_nxv8i32_i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vadc.vim v8, v8, 9, v0 ; CHECK-NEXT: ret entry: @@ -1349,7 +1349,7 @@ define @intrinsic_vadc_vim_nxv16i32_nxv16i32_i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vadc.vim v8, v8, -9, v0 ; CHECK-NEXT: ret entry: @@ -1366,7 +1366,7 @@ define @intrinsic_vadc_vim_nxv1i64_nxv1i64_i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vadc.vim v8, v8, 9, v0 ; CHECK-NEXT: ret entry: @@ -1383,7 +1383,7 @@ define @intrinsic_vadc_vim_nxv2i64_nxv2i64_i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vadc.vim v8, v8, -9, v0 ; CHECK-NEXT: ret entry: @@ -1400,7 +1400,7 @@ define @intrinsic_vadc_vim_nxv4i64_nxv4i64_i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vadc.vim v8, v8, 9, v0 ; CHECK-NEXT: ret entry: @@ -1417,7 +1417,7 @@ define @intrinsic_vadc_vim_nxv8i64_nxv8i64_i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vadc_vim_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vadc.vim v8, v8, -9, v0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-sdnode.ll @@ -5,7 +5,7 @@ define @vadd_vx_nxv1i8( %va, i8 signext %b) { ; CHECK-LABEL: vadd_vx_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -17,7 +17,7 @@ define @vadd_vx_nxv1i8_0( %va) { ; CHECK-LABEL: vadd_vx_nxv1i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %head = insertelement poison, i8 -1, i32 0 @@ -29,7 +29,7 @@ define @vadd_vx_nxv1i8_1( %va) { ; CHECK-LABEL: vadd_vx_nxv1i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 2 ; CHECK-NEXT: ret %head = insertelement poison, i8 2, i32 0 @@ -42,7 +42,7 @@ define @vadd_ii_nxv1i8_1() { ; CHECK-LABEL: vadd_ii_nxv1i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 5 ; CHECK-NEXT: ret %heada = insertelement poison, i8 2, i32 0 @@ -56,7 +56,7 @@ define @vadd_vx_nxv2i8( %va, i8 signext %b) { ; CHECK-LABEL: vadd_vx_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -68,7 +68,7 @@ define @vadd_vx_nxv2i8_0( %va) { ; CHECK-LABEL: vadd_vx_nxv2i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %head = insertelement poison, i8 -1, i32 0 @@ -80,7 +80,7 @@ define @vadd_vx_nxv2i8_1( %va) { ; CHECK-LABEL: vadd_vx_nxv2i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 2 ; CHECK-NEXT: ret %head = insertelement poison, i8 2, i32 0 @@ -92,7 +92,7 @@ define @vadd_vx_nxv4i8( %va, i8 signext %b) { ; CHECK-LABEL: vadd_vx_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -104,7 +104,7 @@ define @vadd_vx_nxv4i8_0( %va) { ; CHECK-LABEL: vadd_vx_nxv4i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %head = insertelement poison, i8 -1, i32 0 @@ -116,7 +116,7 @@ define @vadd_vx_nxv4i8_1( %va) { ; CHECK-LABEL: vadd_vx_nxv4i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 2 ; CHECK-NEXT: ret %head = insertelement poison, i8 2, i32 0 @@ -128,7 +128,7 @@ define @vadd_vx_nxv8i8( %va, i8 signext %b) { ; CHECK-LABEL: vadd_vx_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -140,7 +140,7 @@ define @vadd_vx_nxv8i8_0( %va) { ; CHECK-LABEL: vadd_vx_nxv8i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %head = insertelement poison, i8 -1, i32 0 @@ -152,7 +152,7 @@ define @vadd_vx_nxv8i8_1( %va) { ; CHECK-LABEL: vadd_vx_nxv8i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 2 ; CHECK-NEXT: ret %head = insertelement poison, i8 2, i32 0 @@ -164,7 +164,7 @@ define @vadd_vx_nxv16i8( %va, i8 signext %b) { ; CHECK-LABEL: vadd_vx_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -176,7 +176,7 @@ define @vadd_vx_nxv16i8_0( %va) { ; CHECK-LABEL: vadd_vx_nxv16i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %head = insertelement poison, i8 -1, i32 0 @@ -188,7 +188,7 @@ define @vadd_vx_nxv16i8_1( %va) { ; CHECK-LABEL: vadd_vx_nxv16i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 2 ; CHECK-NEXT: ret %head = insertelement poison, i8 2, i32 0 @@ -200,7 +200,7 @@ define @vadd_vx_nxv32i8( %va, i8 signext %b) { ; CHECK-LABEL: vadd_vx_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -212,7 +212,7 @@ define @vadd_vx_nxv32i8_0( %va) { ; CHECK-LABEL: vadd_vx_nxv32i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %head = insertelement poison, i8 -1, i32 0 @@ -224,7 +224,7 @@ define @vadd_vx_nxv32i8_1( %va) { ; CHECK-LABEL: vadd_vx_nxv32i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 2 ; CHECK-NEXT: ret %head = insertelement poison, i8 2, i32 0 @@ -236,7 +236,7 @@ define @vadd_vx_nxv64i8( %va, i8 signext %b) { ; CHECK-LABEL: vadd_vx_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -248,7 +248,7 @@ define @vadd_vx_nxv64i8_0( %va) { ; CHECK-LABEL: vadd_vx_nxv64i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %head = insertelement poison, i8 -1, i32 0 @@ -260,7 +260,7 @@ define @vadd_vx_nxv64i8_1( %va) { ; CHECK-LABEL: vadd_vx_nxv64i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 2 ; CHECK-NEXT: ret %head = insertelement poison, i8 2, i32 0 @@ -272,7 +272,7 @@ define @vadd_vx_nxv1i16( %va, i16 signext %b) { ; CHECK-LABEL: vadd_vx_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -284,7 +284,7 @@ define @vadd_vx_nxv1i16_0( %va) { ; CHECK-LABEL: vadd_vx_nxv1i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %head = insertelement poison, i16 -1, i32 0 @@ -296,7 +296,7 @@ define @vadd_vx_nxv1i16_1( %va) { ; CHECK-LABEL: vadd_vx_nxv1i16_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 2 ; CHECK-NEXT: ret %head = insertelement poison, i16 2, i32 0 @@ -308,7 +308,7 @@ define @vadd_vx_nxv2i16( %va, i16 signext %b) { ; CHECK-LABEL: vadd_vx_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -320,7 +320,7 @@ define @vadd_vx_nxv2i16_0( %va) { ; CHECK-LABEL: vadd_vx_nxv2i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %head = insertelement poison, i16 -1, i32 0 @@ -332,7 +332,7 @@ define @vadd_vx_nxv2i16_1( %va) { ; CHECK-LABEL: vadd_vx_nxv2i16_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 2 ; CHECK-NEXT: ret %head = insertelement poison, i16 2, i32 0 @@ -344,7 +344,7 @@ define @vadd_vx_nxv4i16( %va, i16 signext %b) { ; CHECK-LABEL: vadd_vx_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -356,7 +356,7 @@ define @vadd_vx_nxv4i16_0( %va) { ; CHECK-LABEL: vadd_vx_nxv4i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %head = insertelement poison, i16 -1, i32 0 @@ -368,7 +368,7 @@ define @vadd_vx_nxv4i16_1( %va) { ; CHECK-LABEL: vadd_vx_nxv4i16_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 2 ; CHECK-NEXT: ret %head = insertelement poison, i16 2, i32 0 @@ -380,7 +380,7 @@ define @vadd_vx_nxv8i16( %va, i16 signext %b) { ; CHECK-LABEL: vadd_vx_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -392,7 +392,7 @@ define @vadd_vx_nxv8i16_0( %va) { ; CHECK-LABEL: vadd_vx_nxv8i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %head = insertelement poison, i16 -1, i32 0 @@ -404,7 +404,7 @@ define @vadd_vx_nxv8i16_1( %va) { ; CHECK-LABEL: vadd_vx_nxv8i16_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 2 ; CHECK-NEXT: ret %head = insertelement poison, i16 2, i32 0 @@ -416,7 +416,7 @@ define @vadd_vx_nxv16i16( %va, i16 signext %b) { ; CHECK-LABEL: vadd_vx_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -428,7 +428,7 @@ define @vadd_vx_nxv16i16_0( %va) { ; CHECK-LABEL: vadd_vx_nxv16i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %head = insertelement poison, i16 -1, i32 0 @@ -440,7 +440,7 @@ define @vadd_vx_nxv16i16_1( %va) { ; CHECK-LABEL: vadd_vx_nxv16i16_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 2 ; CHECK-NEXT: ret %head = insertelement poison, i16 2, i32 0 @@ -452,7 +452,7 @@ define @vadd_vx_nxv32i16( %va, i16 signext %b) { ; CHECK-LABEL: vadd_vx_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -464,7 +464,7 @@ define @vadd_vx_nxv32i16_0( %va) { ; CHECK-LABEL: vadd_vx_nxv32i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %head = insertelement poison, i16 -1, i32 0 @@ -476,7 +476,7 @@ define @vadd_vx_nxv32i16_1( %va) { ; CHECK-LABEL: vadd_vx_nxv32i16_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 2 ; CHECK-NEXT: ret %head = insertelement poison, i16 2, i32 0 @@ -488,7 +488,7 @@ define @vadd_vx_nxv1i32( %va, i32 signext %b) { ; CHECK-LABEL: vadd_vx_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -500,7 +500,7 @@ define @vadd_vx_nxv1i32_0( %va) { ; CHECK-LABEL: vadd_vx_nxv1i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %head = insertelement poison, i32 -1, i32 0 @@ -512,7 +512,7 @@ define @vadd_vx_nxv1i32_1( %va) { ; CHECK-LABEL: vadd_vx_nxv1i32_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 2 ; CHECK-NEXT: ret %head = insertelement poison, i32 2, i32 0 @@ -524,7 +524,7 @@ define @vadd_vx_nxv2i32( %va, i32 signext %b) { ; CHECK-LABEL: vadd_vx_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -536,7 +536,7 @@ define @vadd_vx_nxv2i32_0( %va) { ; CHECK-LABEL: vadd_vx_nxv2i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %head = insertelement poison, i32 -1, i32 0 @@ -548,7 +548,7 @@ define @vadd_vx_nxv2i32_1( %va) { ; CHECK-LABEL: vadd_vx_nxv2i32_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 2 ; CHECK-NEXT: ret %head = insertelement poison, i32 2, i32 0 @@ -560,7 +560,7 @@ define @vadd_vx_nxv4i32( %va, i32 signext %b) { ; CHECK-LABEL: vadd_vx_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -572,7 +572,7 @@ define @vadd_vx_nxv4i32_0( %va) { ; CHECK-LABEL: vadd_vx_nxv4i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %head = insertelement poison, i32 -1, i32 0 @@ -584,7 +584,7 @@ define @vadd_vx_nxv4i32_1( %va) { ; CHECK-LABEL: vadd_vx_nxv4i32_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 2 ; CHECK-NEXT: ret %head = insertelement poison, i32 2, i32 0 @@ -596,7 +596,7 @@ define @vadd_vx_nxv8i32( %va, i32 signext %b) { ; CHECK-LABEL: vadd_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -608,7 +608,7 @@ define @vadd_vx_nxv8i32_0( %va) { ; CHECK-LABEL: vadd_vx_nxv8i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %head = insertelement poison, i32 -1, i32 0 @@ -620,7 +620,7 @@ define @vadd_vx_nxv8i32_1( %va) { ; CHECK-LABEL: vadd_vx_nxv8i32_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 2 ; CHECK-NEXT: ret %head = insertelement poison, i32 2, i32 0 @@ -632,7 +632,7 @@ define @vadd_vx_nxv16i32( %va, i32 signext %b) { ; CHECK-LABEL: vadd_vx_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -644,7 +644,7 @@ define @vadd_vx_nxv16i32_0( %va) { ; CHECK-LABEL: vadd_vx_nxv16i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %head = insertelement poison, i32 -1, i32 0 @@ -656,7 +656,7 @@ define @vadd_vx_nxv16i32_1( %va) { ; CHECK-LABEL: vadd_vx_nxv16i32_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 2 ; CHECK-NEXT: ret %head = insertelement poison, i32 2, i32 0 @@ -673,7 +673,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vadd.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -681,7 +681,7 @@ ; ; RV64-LABEL: vadd_vx_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV64-NEXT: vadd.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -693,7 +693,7 @@ define @vadd_vx_nxv1i64_0( %va) { ; CHECK-LABEL: vadd_vx_nxv1i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %head = insertelement poison, i64 -1, i32 0 @@ -705,7 +705,7 @@ define @vadd_vx_nxv1i64_1( %va) { ; CHECK-LABEL: vadd_vx_nxv1i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 2 ; CHECK-NEXT: ret %head = insertelement poison, i64 2, i32 0 @@ -722,7 +722,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vadd.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -730,7 +730,7 @@ ; ; RV64-LABEL: vadd_vx_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV64-NEXT: vadd.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -742,7 +742,7 @@ define @vadd_vx_nxv2i64_0( %va) { ; CHECK-LABEL: vadd_vx_nxv2i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %head = insertelement poison, i64 -1, i32 0 @@ -754,7 +754,7 @@ define @vadd_vx_nxv2i64_1( %va) { ; CHECK-LABEL: vadd_vx_nxv2i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 2 ; CHECK-NEXT: ret %head = insertelement poison, i64 2, i32 0 @@ -771,7 +771,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vadd.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -779,7 +779,7 @@ ; ; RV64-LABEL: vadd_vx_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV64-NEXT: vadd.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -791,7 +791,7 @@ define @vadd_vx_nxv4i64_0( %va) { ; CHECK-LABEL: vadd_vx_nxv4i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %head = insertelement poison, i64 -1, i32 0 @@ -803,7 +803,7 @@ define @vadd_vx_nxv4i64_1( %va) { ; CHECK-LABEL: vadd_vx_nxv4i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 2 ; CHECK-NEXT: ret %head = insertelement poison, i64 2, i32 0 @@ -820,7 +820,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vadd.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -828,7 +828,7 @@ ; ; RV64-LABEL: vadd_vx_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vadd.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -840,7 +840,7 @@ define @vadd_vx_nxv8i64_0( %va) { ; CHECK-LABEL: vadd_vx_nxv8i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %head = insertelement poison, i64 -1, i32 0 @@ -852,7 +852,7 @@ define @vadd_vx_nxv8i64_1( %va) { ; CHECK-LABEL: vadd_vx_nxv8i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 2 ; CHECK-NEXT: ret %head = insertelement poison, i64 2, i32 0 @@ -868,7 +868,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v8, (a0), zero ; RV32-NEXT: sw a3, 12(sp) ; RV32-NEXT: sw a2, 8(sp) @@ -880,7 +880,7 @@ ; RV64-LABEL: vadd_xx_nxv8i64: ; RV64: # %bb.0: ; RV64-NEXT: add a0, a0, a1 -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vmv.v.x v8, a0 ; RV64-NEXT: ret %head1 = insertelement poison, i64 %a, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll @@ -33,7 +33,7 @@ define @vadd_vv_nxv1i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv1i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -69,7 +69,7 @@ define @vadd_vx_nxv1i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_nxv1i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -95,7 +95,7 @@ define @vadd_vi_nxv1i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv1i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 @@ -121,7 +121,7 @@ define @vadd_vv_nxv2i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -145,7 +145,7 @@ define @vadd_vx_nxv2i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_nxv2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -171,7 +171,7 @@ define @vadd_vi_nxv2i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 @@ -197,7 +197,7 @@ define @vadd_vv_nxv3i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv3i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -221,7 +221,7 @@ define @vadd_vx_nxv3i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_nxv3i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -247,7 +247,7 @@ define @vadd_vi_nxv3i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv3i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 @@ -273,7 +273,7 @@ define @vadd_vv_nxv4i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -297,7 +297,7 @@ define @vadd_vx_nxv4i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_nxv4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -323,7 +323,7 @@ define @vadd_vi_nxv4i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 @@ -349,7 +349,7 @@ define @vadd_vv_nxv8i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -373,7 +373,7 @@ define @vadd_vx_nxv8i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_nxv8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -399,7 +399,7 @@ define @vadd_vi_nxv8i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 @@ -425,7 +425,7 @@ define @vadd_vv_nxv16i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -449,7 +449,7 @@ define @vadd_vx_nxv16i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_nxv16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -475,7 +475,7 @@ define @vadd_vi_nxv16i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 @@ -501,7 +501,7 @@ define @vadd_vv_nxv32i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv32i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -525,7 +525,7 @@ define @vadd_vx_nxv32i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_nxv32i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -551,7 +551,7 @@ define @vadd_vi_nxv32i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv32i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 @@ -577,7 +577,7 @@ define @vadd_vv_nxv64i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv64i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -601,7 +601,7 @@ define @vadd_vx_nxv64i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_nxv64i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -627,7 +627,7 @@ define @vadd_vi_nxv64i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv64i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 @@ -653,7 +653,7 @@ ; CHECK-NEXT: mv a3, a2 ; CHECK-NEXT: .LBB50_2: ; CHECK-NEXT: li a4, 0 -; CHECK-NEXT: vsetvli a5, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a5, zero, e8, m8, ta, ma ; CHECK-NEXT: vlm.v v24, (a0) ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, mu ; CHECK-NEXT: sub a0, a1, a2 @@ -683,14 +683,14 @@ ; CHECK-NEXT: mv a2, a1 ; CHECK-NEXT: .LBB51_2: ; CHECK-NEXT: li a3, 0 -; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma ; CHECK-NEXT: sub a1, a0, a1 ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: bltu a0, a1, .LBB51_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a3, a1 ; CHECK-NEXT: .LBB51_4: -; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma ; CHECK-NEXT: vadd.vi v16, v16, -1 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 @@ -716,7 +716,7 @@ define @vadd_vv_nxv1i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv1i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -740,7 +740,7 @@ define @vadd_vx_nxv1i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_nxv1i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -766,7 +766,7 @@ define @vadd_vi_nxv1i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv1i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 -1, i32 0 @@ -792,7 +792,7 @@ define @vadd_vv_nxv2i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -816,7 +816,7 @@ define @vadd_vx_nxv2i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_nxv2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -842,7 +842,7 @@ define @vadd_vi_nxv2i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 -1, i32 0 @@ -868,7 +868,7 @@ define @vadd_vv_nxv4i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -892,7 +892,7 @@ define @vadd_vx_nxv4i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_nxv4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -918,7 +918,7 @@ define @vadd_vi_nxv4i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 -1, i32 0 @@ -944,7 +944,7 @@ define @vadd_vv_nxv8i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -968,7 +968,7 @@ define @vadd_vx_nxv8i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_nxv8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -994,7 +994,7 @@ define @vadd_vi_nxv8i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 -1, i32 0 @@ -1020,7 +1020,7 @@ define @vadd_vv_nxv16i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1044,7 +1044,7 @@ define @vadd_vx_nxv16i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_nxv16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -1070,7 +1070,7 @@ define @vadd_vi_nxv16i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 -1, i32 0 @@ -1096,7 +1096,7 @@ define @vadd_vv_nxv32i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv32i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1120,7 +1120,7 @@ define @vadd_vx_nxv32i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_nxv32i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -1146,7 +1146,7 @@ define @vadd_vi_nxv32i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv32i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 -1, i32 0 @@ -1172,7 +1172,7 @@ define @vadd_vv_nxv1i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv1i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1196,7 +1196,7 @@ define @vadd_vx_nxv1i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_nxv1i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1222,7 +1222,7 @@ define @vadd_vi_nxv1i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv1i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 -1, i32 0 @@ -1248,7 +1248,7 @@ define @vadd_vv_nxv2i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1272,7 +1272,7 @@ define @vadd_vx_nxv2i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1298,7 +1298,7 @@ define @vadd_vi_nxv2i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 -1, i32 0 @@ -1324,7 +1324,7 @@ define @vadd_vv_nxv4i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1348,7 +1348,7 @@ define @vadd_vx_nxv4i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_nxv4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1374,7 +1374,7 @@ define @vadd_vi_nxv4i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 -1, i32 0 @@ -1400,7 +1400,7 @@ define @vadd_vv_nxv8i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1424,7 +1424,7 @@ define @vadd_vx_nxv8i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_nxv8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1450,7 +1450,7 @@ define @vadd_vi_nxv8i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 -1, i32 0 @@ -1476,7 +1476,7 @@ define @vadd_vv_nxv16i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1500,7 +1500,7 @@ define @vadd_vx_nxv16i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vx_nxv16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1526,7 +1526,7 @@ define @vadd_vi_nxv16i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 -1, i32 0 @@ -1548,7 +1548,7 @@ ; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a4, a1, 2 -; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma ; CHECK-NEXT: slli a1, a1, 1 ; CHECK-NEXT: sub a3, a0, a1 ; CHECK-NEXT: vslidedown.vx v0, v0, a4 @@ -1583,14 +1583,14 @@ ; CHECK-NEXT: mv a2, a1 ; CHECK-NEXT: .LBB119_2: ; CHECK-NEXT: li a3, 0 -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: sub a1, a0, a1 ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: bltu a0, a1, .LBB119_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a3, a1 ; CHECK-NEXT: .LBB119_4: -; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; CHECK-NEXT: vadd.vi v16, v16, -1 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 -1, i32 0 @@ -1615,7 +1615,7 @@ ; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a4, a0, 2 -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: slli a1, a0, 1 ; CHECK-NEXT: sub a3, a0, a1 ; CHECK-NEXT: vslidedown.vx v0, v0, a4 @@ -1662,7 +1662,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: srli a1, a0, 2 -; RV64-NEXT: vsetvli a2, zero, e8, mf2, ta, mu +; RV64-NEXT: vsetvli a2, zero, e8, mf2, ta, ma ; RV64-NEXT: vslidedown.vx v24, v0, a1 ; RV64-NEXT: slli a0, a0, 1 ; RV64-NEXT: vsetvli zero, a0, e32, m8, ta, mu @@ -1694,7 +1694,7 @@ define @vadd_vv_nxv1i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv1i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1711,7 +1711,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vadd.vv v8, v8, v9, v0.t @@ -1737,16 +1737,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vadd.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vadd_vx_nxv1i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vadd.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1772,7 +1772,7 @@ define @vadd_vi_nxv1i64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv1i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 -1, i32 0 @@ -1798,7 +1798,7 @@ define @vadd_vv_nxv2i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1815,7 +1815,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vadd.vv v8, v8, v10, v0.t @@ -1841,16 +1841,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vadd.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vadd_vx_nxv2i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vadd.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1876,7 +1876,7 @@ define @vadd_vi_nxv2i64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 -1, i32 0 @@ -1902,7 +1902,7 @@ define @vadd_vv_nxv4i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv4i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1919,7 +1919,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vadd.vv v8, v8, v12, v0.t @@ -1945,16 +1945,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vadd.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vadd_vx_nxv4i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vadd.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1980,7 +1980,7 @@ define @vadd_vi_nxv4i64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv4i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 -1, i32 0 @@ -2006,7 +2006,7 @@ define @vadd_vv_nxv8i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv8i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -2023,7 +2023,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vadd.vv v8, v8, v16, v0.t @@ -2049,16 +2049,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vadd.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vadd_vx_nxv8i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vadd.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -2084,7 +2084,7 @@ define @vadd_vi_nxv8i64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vi_nxv8i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 -1, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd.ll b/llvm/test/CodeGen/RISCV/rvv/vadd.ll --- a/llvm/test/CodeGen/RISCV/rvv/vadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd.ll @@ -12,7 +12,7 @@ define @intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -58,7 +58,7 @@ define @intrinsic_vadd_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vadd_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -150,7 +150,7 @@ define @intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -196,7 +196,7 @@ define @intrinsic_vadd_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -242,7 +242,7 @@ define @intrinsic_vadd_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -288,7 +288,7 @@ define @intrinsic_vadd_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -335,7 +335,7 @@ define @intrinsic_vadd_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -381,7 +381,7 @@ define @intrinsic_vadd_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -427,7 +427,7 @@ define @intrinsic_vadd_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -473,7 +473,7 @@ define @intrinsic_vadd_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -519,7 +519,7 @@ define @intrinsic_vadd_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -565,7 +565,7 @@ define @intrinsic_vadd_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -612,7 +612,7 @@ define @intrinsic_vadd_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -658,7 +658,7 @@ define @intrinsic_vadd_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -704,7 +704,7 @@ define @intrinsic_vadd_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -750,7 +750,7 @@ define @intrinsic_vadd_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -796,7 +796,7 @@ define @intrinsic_vadd_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -843,7 +843,7 @@ define @intrinsic_vadd_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -889,7 +889,7 @@ define @intrinsic_vadd_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -935,7 +935,7 @@ define @intrinsic_vadd_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -981,7 +981,7 @@ define @intrinsic_vadd_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1028,7 +1028,7 @@ define @intrinsic_vadd_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1074,7 +1074,7 @@ define @intrinsic_vadd_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1120,7 +1120,7 @@ define @intrinsic_vadd_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1166,7 +1166,7 @@ define @intrinsic_vadd_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1212,7 +1212,7 @@ define @intrinsic_vadd_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1258,7 +1258,7 @@ define @intrinsic_vadd_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1304,7 +1304,7 @@ define @intrinsic_vadd_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1350,7 +1350,7 @@ define @intrinsic_vadd_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1396,7 +1396,7 @@ define @intrinsic_vadd_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1442,7 +1442,7 @@ define @intrinsic_vadd_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1488,7 +1488,7 @@ define @intrinsic_vadd_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1534,7 +1534,7 @@ define @intrinsic_vadd_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1580,7 +1580,7 @@ define @intrinsic_vadd_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1626,7 +1626,7 @@ define @intrinsic_vadd_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1672,7 +1672,7 @@ define @intrinsic_vadd_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1718,7 +1718,7 @@ define @intrinsic_vadd_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1764,7 +1764,7 @@ define @intrinsic_vadd_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1810,7 +1810,7 @@ define @intrinsic_vadd_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vadd_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1860,7 +1860,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vadd.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -1868,7 +1868,7 @@ ; ; RV64-LABEL: intrinsic_vadd_vx_nxv1i64_nxv1i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vadd.vx v8, v8, a0 ; RV64-NEXT: ret entry: @@ -1884,14 +1884,14 @@ define @intrinsic_vadd_vx_sext_nxv1i64_nxv1i64_i64( %0, i32 %1, iXLen %2) nounwind { ; RV32-LABEL: intrinsic_vadd_vx_sext_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry -; RV32-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV32-NEXT: vadd.vx v8, v8, a0 ; RV32-NEXT: ret ; ; RV64-LABEL: intrinsic_vadd_vx_sext_nxv1i64_nxv1i64_i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: sext.w a0, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vadd.vx v8, v8, a0 ; RV64-NEXT: ret entry: @@ -1909,7 +1909,7 @@ ; CHECK-LABEL: intrinsic_vadd_vx_sextload_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: lw a0, 0(a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1973,7 +1973,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vadd.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -1981,7 +1981,7 @@ ; ; RV64-LABEL: intrinsic_vadd_vx_nxv2i64_nxv2i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vadd.vx v8, v8, a0 ; RV64-NEXT: ret entry: @@ -2043,7 +2043,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vadd.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -2051,7 +2051,7 @@ ; ; RV64-LABEL: intrinsic_vadd_vx_nxv4i64_nxv4i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vadd.vx v8, v8, a0 ; RV64-NEXT: ret entry: @@ -2113,7 +2113,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vadd.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -2121,7 +2121,7 @@ ; ; RV64-LABEL: intrinsic_vadd_vx_nxv8i64_nxv8i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vadd.vx v8, v8, a0 ; RV64-NEXT: ret entry: @@ -2173,7 +2173,7 @@ define @intrinsic_vadd_vi_nxv1i8_nxv1i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vadd_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2206,7 +2206,7 @@ define @intrinsic_vadd_vi_nxv2i8_nxv2i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vadd_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2239,7 +2239,7 @@ define @intrinsic_vadd_vi_nxv4i8_nxv4i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vadd_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2272,7 +2272,7 @@ define @intrinsic_vadd_vi_nxv8i8_nxv8i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vadd_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2305,7 +2305,7 @@ define @intrinsic_vadd_vi_nxv16i8_nxv16i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vadd_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2338,7 +2338,7 @@ define @intrinsic_vadd_vi_nxv32i8_nxv32i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vadd_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2371,7 +2371,7 @@ define @intrinsic_vadd_vi_nxv64i8_nxv64i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vadd_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -9 ; CHECK-NEXT: ret entry: @@ -2404,7 +2404,7 @@ define @intrinsic_vadd_vi_nxv1i16_nxv1i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vadd_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2437,7 +2437,7 @@ define @intrinsic_vadd_vi_nxv2i16_nxv2i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vadd_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2470,7 +2470,7 @@ define @intrinsic_vadd_vi_nxv4i16_nxv4i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vadd_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2503,7 +2503,7 @@ define @intrinsic_vadd_vi_nxv8i16_nxv8i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vadd_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2536,7 +2536,7 @@ define @intrinsic_vadd_vi_nxv16i16_nxv16i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vadd_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2569,7 +2569,7 @@ define @intrinsic_vadd_vi_nxv32i16_nxv32i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vadd_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2602,7 +2602,7 @@ define @intrinsic_vadd_vi_nxv1i32_nxv1i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vadd_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2635,7 +2635,7 @@ define @intrinsic_vadd_vi_nxv2i32_nxv2i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vadd_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2668,7 +2668,7 @@ define @intrinsic_vadd_vi_nxv4i32_nxv4i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vadd_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2701,7 +2701,7 @@ define @intrinsic_vadd_vi_nxv8i32_nxv8i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vadd_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2734,7 +2734,7 @@ define @intrinsic_vadd_vi_nxv16i32_nxv16i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vadd_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2767,7 +2767,7 @@ define @intrinsic_vadd_vi_nxv1i64_nxv1i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vadd_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2800,7 +2800,7 @@ define @intrinsic_vadd_vi_nxv2i64_nxv2i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vadd_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2833,7 +2833,7 @@ define @intrinsic_vadd_vi_nxv4i64_nxv4i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vadd_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2866,7 +2866,7 @@ define @intrinsic_vadd_vi_nxv8i64_nxv8i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vadd_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vand_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vand_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vand_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vand_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vand_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -292,7 +292,7 @@ define @intrinsic_vand_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vand_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vand_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vand_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vand_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vand_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vand_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vand_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vand_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -717,7 +717,7 @@ define @intrinsic_vand_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -764,7 +764,7 @@ define @intrinsic_vand_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -811,7 +811,7 @@ define @intrinsic_vand_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vand_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vand_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vand_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vand_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1048,7 +1048,7 @@ define @intrinsic_vand_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1095,7 +1095,7 @@ define @intrinsic_vand_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1142,7 +1142,7 @@ define @intrinsic_vand_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1189,7 +1189,7 @@ define @intrinsic_vand_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1236,7 +1236,7 @@ define @intrinsic_vand_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1283,7 +1283,7 @@ define @intrinsic_vand_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1330,7 +1330,7 @@ define @intrinsic_vand_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1377,7 +1377,7 @@ define @intrinsic_vand_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1424,7 +1424,7 @@ define @intrinsic_vand_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1471,7 +1471,7 @@ define @intrinsic_vand_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1518,7 +1518,7 @@ define @intrinsic_vand_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1565,7 +1565,7 @@ define @intrinsic_vand_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1612,7 +1612,7 @@ define @intrinsic_vand_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1659,7 +1659,7 @@ define @intrinsic_vand_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1706,7 +1706,7 @@ define @intrinsic_vand_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1753,7 +1753,7 @@ define @intrinsic_vand_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1800,7 +1800,7 @@ define @intrinsic_vand_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1847,7 +1847,7 @@ define @intrinsic_vand_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1898,7 +1898,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 @@ -1957,7 +1957,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vand.vv v8, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 @@ -2016,7 +2016,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vand.vv v8, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 @@ -2075,7 +2075,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vand.vv v8, v8, v16 ; CHECK-NEXT: addi sp, sp, 16 @@ -2124,7 +2124,7 @@ define @intrinsic_vand_vi_nxv1i8_nxv1i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2157,7 +2157,7 @@ define @intrinsic_vand_vi_nxv2i8_nxv2i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2190,7 +2190,7 @@ define @intrinsic_vand_vi_nxv4i8_nxv4i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2223,7 +2223,7 @@ define @intrinsic_vand_vi_nxv8i8_nxv8i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2256,7 +2256,7 @@ define @intrinsic_vand_vi_nxv16i8_nxv16i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2289,7 +2289,7 @@ define @intrinsic_vand_vi_nxv32i8_nxv32i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2322,7 +2322,7 @@ define @intrinsic_vand_vi_nxv64i8_nxv64i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2355,7 +2355,7 @@ define @intrinsic_vand_vi_nxv1i16_nxv1i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2388,7 +2388,7 @@ define @intrinsic_vand_vi_nxv2i16_nxv2i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2421,7 +2421,7 @@ define @intrinsic_vand_vi_nxv4i16_nxv4i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2454,7 +2454,7 @@ define @intrinsic_vand_vi_nxv8i16_nxv8i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2487,7 +2487,7 @@ define @intrinsic_vand_vi_nxv16i16_nxv16i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2520,7 +2520,7 @@ define @intrinsic_vand_vi_nxv32i16_nxv32i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2553,7 +2553,7 @@ define @intrinsic_vand_vi_nxv1i32_nxv1i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2586,7 +2586,7 @@ define @intrinsic_vand_vi_nxv2i32_nxv2i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2619,7 +2619,7 @@ define @intrinsic_vand_vi_nxv4i32_nxv4i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2652,7 +2652,7 @@ define @intrinsic_vand_vi_nxv8i32_nxv8i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2685,7 +2685,7 @@ define @intrinsic_vand_vi_nxv16i32_nxv16i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2718,7 +2718,7 @@ define @intrinsic_vand_vi_nxv1i64_nxv1i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2751,7 +2751,7 @@ define @intrinsic_vand_vi_nxv2i64_nxv2i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2784,7 +2784,7 @@ define @intrinsic_vand_vi_nxv4i64_nxv4i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2817,7 +2817,7 @@ define @intrinsic_vand_vi_nxv8i64_nxv8i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 9 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vand_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vand_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vand_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vand_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vand_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -292,7 +292,7 @@ define @intrinsic_vand_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vand_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vand_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vand_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vand_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vand_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vand_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vand_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vand_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -717,7 +717,7 @@ define @intrinsic_vand_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -764,7 +764,7 @@ define @intrinsic_vand_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -811,7 +811,7 @@ define @intrinsic_vand_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vand_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vand_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vand_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vand_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1048,7 +1048,7 @@ define @intrinsic_vand_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1095,7 +1095,7 @@ define @intrinsic_vand_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1142,7 +1142,7 @@ define @intrinsic_vand_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1189,7 +1189,7 @@ define @intrinsic_vand_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1236,7 +1236,7 @@ define @intrinsic_vand_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1283,7 +1283,7 @@ define @intrinsic_vand_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1330,7 +1330,7 @@ define @intrinsic_vand_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1377,7 +1377,7 @@ define @intrinsic_vand_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1424,7 +1424,7 @@ define @intrinsic_vand_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1471,7 +1471,7 @@ define @intrinsic_vand_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1518,7 +1518,7 @@ define @intrinsic_vand_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1565,7 +1565,7 @@ define @intrinsic_vand_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1612,7 +1612,7 @@ define @intrinsic_vand_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1659,7 +1659,7 @@ define @intrinsic_vand_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1706,7 +1706,7 @@ define @intrinsic_vand_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1753,7 +1753,7 @@ define @intrinsic_vand_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1800,7 +1800,7 @@ define @intrinsic_vand_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1847,7 +1847,7 @@ define @intrinsic_vand_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1894,7 +1894,7 @@ define @intrinsic_vand_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1941,7 +1941,7 @@ define @intrinsic_vand_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1988,7 +1988,7 @@ define @intrinsic_vand_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -2035,7 +2035,7 @@ define @intrinsic_vand_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vand_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -2076,7 +2076,7 @@ define @intrinsic_vand_vi_nxv1i8_nxv1i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2109,7 +2109,7 @@ define @intrinsic_vand_vi_nxv2i8_nxv2i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2142,7 +2142,7 @@ define @intrinsic_vand_vi_nxv4i8_nxv4i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2175,7 +2175,7 @@ define @intrinsic_vand_vi_nxv8i8_nxv8i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2208,7 +2208,7 @@ define @intrinsic_vand_vi_nxv16i8_nxv16i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2241,7 +2241,7 @@ define @intrinsic_vand_vi_nxv32i8_nxv32i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2274,7 +2274,7 @@ define @intrinsic_vand_vi_nxv64i8_nxv64i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2307,7 +2307,7 @@ define @intrinsic_vand_vi_nxv1i16_nxv1i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2340,7 +2340,7 @@ define @intrinsic_vand_vi_nxv2i16_nxv2i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2373,7 +2373,7 @@ define @intrinsic_vand_vi_nxv4i16_nxv4i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2406,7 +2406,7 @@ define @intrinsic_vand_vi_nxv8i16_nxv8i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2439,7 +2439,7 @@ define @intrinsic_vand_vi_nxv16i16_nxv16i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2472,7 +2472,7 @@ define @intrinsic_vand_vi_nxv32i16_nxv32i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2505,7 +2505,7 @@ define @intrinsic_vand_vi_nxv1i32_nxv1i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2538,7 +2538,7 @@ define @intrinsic_vand_vi_nxv2i32_nxv2i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2571,7 +2571,7 @@ define @intrinsic_vand_vi_nxv4i32_nxv4i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2604,7 +2604,7 @@ define @intrinsic_vand_vi_nxv8i32_nxv8i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2637,7 +2637,7 @@ define @intrinsic_vand_vi_nxv16i32_nxv16i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2670,7 +2670,7 @@ define @intrinsic_vand_vi_nxv1i64_nxv1i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2703,7 +2703,7 @@ define @intrinsic_vand_vi_nxv2i64_nxv2i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2736,7 +2736,7 @@ define @intrinsic_vand_vi_nxv4i64_nxv4i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2769,7 +2769,7 @@ define @intrinsic_vand_vi_nxv8i64_nxv8i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vand_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 9 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vand-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vand-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vand-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vand-sdnode.ll @@ -5,7 +5,7 @@ define @vand_vv_nxv1i8( %va, %vb) { ; CHECK-LABEL: vand_vv_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = and %va, %vb @@ -15,7 +15,7 @@ define @vand_vx_nxv1i8( %va, i8 signext %b) { ; CHECK-LABEL: vand_vx_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -27,7 +27,7 @@ define @vand_vi_nxv1i8_0( %va) { ; CHECK-LABEL: vand_vi_nxv1i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, -10 ; CHECK-NEXT: ret %head = insertelement poison, i8 -10, i32 0 @@ -39,7 +39,7 @@ define @vand_vi_nxv1i8_1( %va) { ; CHECK-LABEL: vand_vi_nxv1i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 8 ; CHECK-NEXT: ret %head = insertelement poison, i8 8, i32 0 @@ -52,7 +52,7 @@ ; CHECK-LABEL: vand_vi_nxv1i8_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 16, i32 0 @@ -64,7 +64,7 @@ define @vand_vv_nxv2i8( %va, %vb) { ; CHECK-LABEL: vand_vv_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = and %va, %vb @@ -74,7 +74,7 @@ define @vand_vx_nxv2i8( %va, i8 signext %b) { ; CHECK-LABEL: vand_vx_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -86,7 +86,7 @@ define @vand_vi_nxv2i8_0( %va) { ; CHECK-LABEL: vand_vi_nxv2i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, -10 ; CHECK-NEXT: ret %head = insertelement poison, i8 -10, i32 0 @@ -98,7 +98,7 @@ define @vand_vi_nxv2i8_1( %va) { ; CHECK-LABEL: vand_vi_nxv2i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 8 ; CHECK-NEXT: ret %head = insertelement poison, i8 8, i32 0 @@ -111,7 +111,7 @@ ; CHECK-LABEL: vand_vi_nxv2i8_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 16, i32 0 @@ -123,7 +123,7 @@ define @vand_vv_nxv4i8( %va, %vb) { ; CHECK-LABEL: vand_vv_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = and %va, %vb @@ -133,7 +133,7 @@ define @vand_vx_nxv4i8( %va, i8 signext %b) { ; CHECK-LABEL: vand_vx_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -145,7 +145,7 @@ define @vand_vi_nxv4i8_0( %va) { ; CHECK-LABEL: vand_vi_nxv4i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, -10 ; CHECK-NEXT: ret %head = insertelement poison, i8 -10, i32 0 @@ -157,7 +157,7 @@ define @vand_vi_nxv4i8_1( %va) { ; CHECK-LABEL: vand_vi_nxv4i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 8 ; CHECK-NEXT: ret %head = insertelement poison, i8 8, i32 0 @@ -170,7 +170,7 @@ ; CHECK-LABEL: vand_vi_nxv4i8_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 16, i32 0 @@ -182,7 +182,7 @@ define @vand_vv_nxv8i8( %va, %vb) { ; CHECK-LABEL: vand_vv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = and %va, %vb @@ -192,7 +192,7 @@ define @vand_vx_nxv8i8( %va, i8 signext %b) { ; CHECK-LABEL: vand_vx_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -204,7 +204,7 @@ define @vand_vi_nxv8i8_0( %va) { ; CHECK-LABEL: vand_vi_nxv8i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vand.vi v8, v8, -10 ; CHECK-NEXT: ret %head = insertelement poison, i8 -10, i32 0 @@ -216,7 +216,7 @@ define @vand_vi_nxv8i8_1( %va) { ; CHECK-LABEL: vand_vi_nxv8i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 8 ; CHECK-NEXT: ret %head = insertelement poison, i8 8, i32 0 @@ -229,7 +229,7 @@ ; CHECK-LABEL: vand_vi_nxv8i8_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 16, i32 0 @@ -241,7 +241,7 @@ define @vand_vv_nxv16i8( %va, %vb) { ; CHECK-LABEL: vand_vv_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = and %va, %vb @@ -251,7 +251,7 @@ define @vand_vx_nxv16i8( %va, i8 signext %b) { ; CHECK-LABEL: vand_vx_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -263,7 +263,7 @@ define @vand_vi_nxv16i8_0( %va) { ; CHECK-LABEL: vand_vi_nxv16i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, -10 ; CHECK-NEXT: ret %head = insertelement poison, i8 -10, i32 0 @@ -275,7 +275,7 @@ define @vand_vi_nxv16i8_1( %va) { ; CHECK-LABEL: vand_vi_nxv16i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 8 ; CHECK-NEXT: ret %head = insertelement poison, i8 8, i32 0 @@ -288,7 +288,7 @@ ; CHECK-LABEL: vand_vi_nxv16i8_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 16, i32 0 @@ -300,7 +300,7 @@ define @vand_vv_nxv32i8( %va, %vb) { ; CHECK-LABEL: vand_vv_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = and %va, %vb @@ -310,7 +310,7 @@ define @vand_vx_nxv32i8( %va, i8 signext %b) { ; CHECK-LABEL: vand_vx_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -322,7 +322,7 @@ define @vand_vi_nxv32i8_0( %va) { ; CHECK-LABEL: vand_vi_nxv32i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, -10 ; CHECK-NEXT: ret %head = insertelement poison, i8 -10, i32 0 @@ -334,7 +334,7 @@ define @vand_vi_nxv32i8_1( %va) { ; CHECK-LABEL: vand_vi_nxv32i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 8 ; CHECK-NEXT: ret %head = insertelement poison, i8 8, i32 0 @@ -347,7 +347,7 @@ ; CHECK-LABEL: vand_vi_nxv32i8_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 16, i32 0 @@ -359,7 +359,7 @@ define @vand_vv_nxv64i8( %va, %vb) { ; CHECK-LABEL: vand_vv_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = and %va, %vb @@ -369,7 +369,7 @@ define @vand_vx_nxv64i8( %va, i8 signext %b) { ; CHECK-LABEL: vand_vx_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -381,7 +381,7 @@ define @vand_vi_nxv64i8_0( %va) { ; CHECK-LABEL: vand_vi_nxv64i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, -10 ; CHECK-NEXT: ret %head = insertelement poison, i8 -10, i32 0 @@ -393,7 +393,7 @@ define @vand_vi_nxv64i8_1( %va) { ; CHECK-LABEL: vand_vi_nxv64i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 8 ; CHECK-NEXT: ret %head = insertelement poison, i8 8, i32 0 @@ -406,7 +406,7 @@ ; CHECK-LABEL: vand_vi_nxv64i8_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 16, i32 0 @@ -418,7 +418,7 @@ define @vand_vv_nxv1i16( %va, %vb) { ; CHECK-LABEL: vand_vv_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = and %va, %vb @@ -428,7 +428,7 @@ define @vand_vx_nxv1i16( %va, i16 signext %b) { ; CHECK-LABEL: vand_vx_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -440,7 +440,7 @@ define @vand_vi_nxv1i16_0( %va) { ; CHECK-LABEL: vand_vi_nxv1i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, -10 ; CHECK-NEXT: ret %head = insertelement poison, i16 -10, i32 0 @@ -452,7 +452,7 @@ define @vand_vi_nxv1i16_1( %va) { ; CHECK-LABEL: vand_vi_nxv1i16_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 8 ; CHECK-NEXT: ret %head = insertelement poison, i16 8, i32 0 @@ -465,7 +465,7 @@ ; CHECK-LABEL: vand_vi_nxv1i16_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 16, i32 0 @@ -477,7 +477,7 @@ define @vand_vv_nxv2i16( %va, %vb) { ; CHECK-LABEL: vand_vv_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = and %va, %vb @@ -487,7 +487,7 @@ define @vand_vx_nxv2i16( %va, i16 signext %b) { ; CHECK-LABEL: vand_vx_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -499,7 +499,7 @@ define @vand_vi_nxv2i16_0( %va) { ; CHECK-LABEL: vand_vi_nxv2i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, -10 ; CHECK-NEXT: ret %head = insertelement poison, i16 -10, i32 0 @@ -511,7 +511,7 @@ define @vand_vi_nxv2i16_1( %va) { ; CHECK-LABEL: vand_vi_nxv2i16_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 8 ; CHECK-NEXT: ret %head = insertelement poison, i16 8, i32 0 @@ -524,7 +524,7 @@ ; CHECK-LABEL: vand_vi_nxv2i16_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 16, i32 0 @@ -536,7 +536,7 @@ define @vand_vv_nxv4i16( %va, %vb) { ; CHECK-LABEL: vand_vv_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = and %va, %vb @@ -546,7 +546,7 @@ define @vand_vx_nxv4i16( %va, i16 signext %b) { ; CHECK-LABEL: vand_vx_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -558,7 +558,7 @@ define @vand_vi_nxv4i16_0( %va) { ; CHECK-LABEL: vand_vi_nxv4i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vand.vi v8, v8, -10 ; CHECK-NEXT: ret %head = insertelement poison, i16 -10, i32 0 @@ -570,7 +570,7 @@ define @vand_vi_nxv4i16_1( %va) { ; CHECK-LABEL: vand_vi_nxv4i16_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 8 ; CHECK-NEXT: ret %head = insertelement poison, i16 8, i32 0 @@ -583,7 +583,7 @@ ; CHECK-LABEL: vand_vi_nxv4i16_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 16, i32 0 @@ -595,7 +595,7 @@ define @vand_vv_nxv8i16( %va, %vb) { ; CHECK-LABEL: vand_vv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = and %va, %vb @@ -605,7 +605,7 @@ define @vand_vx_nxv8i16( %va, i16 signext %b) { ; CHECK-LABEL: vand_vx_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -617,7 +617,7 @@ define @vand_vi_nxv8i16_0( %va) { ; CHECK-LABEL: vand_vi_nxv8i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, -10 ; CHECK-NEXT: ret %head = insertelement poison, i16 -10, i32 0 @@ -629,7 +629,7 @@ define @vand_vi_nxv8i16_1( %va) { ; CHECK-LABEL: vand_vi_nxv8i16_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 8 ; CHECK-NEXT: ret %head = insertelement poison, i16 8, i32 0 @@ -642,7 +642,7 @@ ; CHECK-LABEL: vand_vi_nxv8i16_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 16, i32 0 @@ -654,7 +654,7 @@ define @vand_vv_nxv16i16( %va, %vb) { ; CHECK-LABEL: vand_vv_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = and %va, %vb @@ -664,7 +664,7 @@ define @vand_vx_nxv16i16( %va, i16 signext %b) { ; CHECK-LABEL: vand_vx_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -676,7 +676,7 @@ define @vand_vi_nxv16i16_0( %va) { ; CHECK-LABEL: vand_vi_nxv16i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, -10 ; CHECK-NEXT: ret %head = insertelement poison, i16 -10, i32 0 @@ -688,7 +688,7 @@ define @vand_vi_nxv16i16_1( %va) { ; CHECK-LABEL: vand_vi_nxv16i16_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 8 ; CHECK-NEXT: ret %head = insertelement poison, i16 8, i32 0 @@ -701,7 +701,7 @@ ; CHECK-LABEL: vand_vi_nxv16i16_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 16, i32 0 @@ -713,7 +713,7 @@ define @vand_vv_nxv32i16( %va, %vb) { ; CHECK-LABEL: vand_vv_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = and %va, %vb @@ -723,7 +723,7 @@ define @vand_vx_nxv32i16( %va, i16 signext %b) { ; CHECK-LABEL: vand_vx_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -735,7 +735,7 @@ define @vand_vi_nxv32i16_0( %va) { ; CHECK-LABEL: vand_vi_nxv32i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, -10 ; CHECK-NEXT: ret %head = insertelement poison, i16 -10, i32 0 @@ -747,7 +747,7 @@ define @vand_vi_nxv32i16_1( %va) { ; CHECK-LABEL: vand_vi_nxv32i16_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 8 ; CHECK-NEXT: ret %head = insertelement poison, i16 8, i32 0 @@ -760,7 +760,7 @@ ; CHECK-LABEL: vand_vi_nxv32i16_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 16, i32 0 @@ -772,7 +772,7 @@ define @vand_vv_nxv1i32( %va, %vb) { ; CHECK-LABEL: vand_vv_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = and %va, %vb @@ -782,7 +782,7 @@ define @vand_vx_nxv1i32( %va, i32 signext %b) { ; CHECK-LABEL: vand_vx_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -794,7 +794,7 @@ define @vand_vi_nxv1i32_0( %va) { ; CHECK-LABEL: vand_vi_nxv1i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, -10 ; CHECK-NEXT: ret %head = insertelement poison, i32 -10, i32 0 @@ -806,7 +806,7 @@ define @vand_vi_nxv1i32_1( %va) { ; CHECK-LABEL: vand_vi_nxv1i32_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 8 ; CHECK-NEXT: ret %head = insertelement poison, i32 8, i32 0 @@ -819,7 +819,7 @@ ; CHECK-LABEL: vand_vi_nxv1i32_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 16, i32 0 @@ -831,7 +831,7 @@ define @vand_vv_nxv2i32( %va, %vb) { ; CHECK-LABEL: vand_vv_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = and %va, %vb @@ -841,7 +841,7 @@ define @vand_vx_nxv2i32( %va, i32 signext %b) { ; CHECK-LABEL: vand_vx_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -853,7 +853,7 @@ define @vand_vi_nxv2i32_0( %va) { ; CHECK-LABEL: vand_vi_nxv2i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vand.vi v8, v8, -10 ; CHECK-NEXT: ret %head = insertelement poison, i32 -10, i32 0 @@ -865,7 +865,7 @@ define @vand_vi_nxv2i32_1( %va) { ; CHECK-LABEL: vand_vi_nxv2i32_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 8 ; CHECK-NEXT: ret %head = insertelement poison, i32 8, i32 0 @@ -878,7 +878,7 @@ ; CHECK-LABEL: vand_vi_nxv2i32_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 16, i32 0 @@ -890,7 +890,7 @@ define @vand_vv_nxv4i32( %va, %vb) { ; CHECK-LABEL: vand_vv_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = and %va, %vb @@ -900,7 +900,7 @@ define @vand_vx_nxv4i32( %va, i32 signext %b) { ; CHECK-LABEL: vand_vx_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -912,7 +912,7 @@ define @vand_vi_nxv4i32_0( %va) { ; CHECK-LABEL: vand_vi_nxv4i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, -10 ; CHECK-NEXT: ret %head = insertelement poison, i32 -10, i32 0 @@ -924,7 +924,7 @@ define @vand_vi_nxv4i32_1( %va) { ; CHECK-LABEL: vand_vi_nxv4i32_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 8 ; CHECK-NEXT: ret %head = insertelement poison, i32 8, i32 0 @@ -937,7 +937,7 @@ ; CHECK-LABEL: vand_vi_nxv4i32_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 16, i32 0 @@ -949,7 +949,7 @@ define @vand_vv_nxv8i32( %va, %vb) { ; CHECK-LABEL: vand_vv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = and %va, %vb @@ -959,7 +959,7 @@ define @vand_vx_nxv8i32( %va, i32 signext %b) { ; CHECK-LABEL: vand_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -971,7 +971,7 @@ define @vand_vi_nxv8i32_0( %va) { ; CHECK-LABEL: vand_vi_nxv8i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, -10 ; CHECK-NEXT: ret %head = insertelement poison, i32 -10, i32 0 @@ -983,7 +983,7 @@ define @vand_vi_nxv8i32_1( %va) { ; CHECK-LABEL: vand_vi_nxv8i32_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 8 ; CHECK-NEXT: ret %head = insertelement poison, i32 8, i32 0 @@ -996,7 +996,7 @@ ; CHECK-LABEL: vand_vi_nxv8i32_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 16, i32 0 @@ -1008,7 +1008,7 @@ define @vand_vv_nxv16i32( %va, %vb) { ; CHECK-LABEL: vand_vv_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = and %va, %vb @@ -1018,7 +1018,7 @@ define @vand_vx_nxv16i32( %va, i32 signext %b) { ; CHECK-LABEL: vand_vx_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -1030,7 +1030,7 @@ define @vand_vi_nxv16i32_0( %va) { ; CHECK-LABEL: vand_vi_nxv16i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, -10 ; CHECK-NEXT: ret %head = insertelement poison, i32 -10, i32 0 @@ -1042,7 +1042,7 @@ define @vand_vi_nxv16i32_1( %va) { ; CHECK-LABEL: vand_vi_nxv16i32_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 8 ; CHECK-NEXT: ret %head = insertelement poison, i32 8, i32 0 @@ -1055,7 +1055,7 @@ ; CHECK-LABEL: vand_vi_nxv16i32_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 16, i32 0 @@ -1067,7 +1067,7 @@ define @vand_vv_nxv1i64( %va, %vb) { ; CHECK-LABEL: vand_vv_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = and %va, %vb @@ -1082,7 +1082,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vand.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -1090,7 +1090,7 @@ ; ; RV64-LABEL: vand_vx_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV64-NEXT: vand.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -1102,7 +1102,7 @@ define @vand_vi_nxv1i64_0( %va) { ; CHECK-LABEL: vand_vi_nxv1i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vand.vi v8, v8, -10 ; CHECK-NEXT: ret %head = insertelement poison, i64 -10, i32 0 @@ -1114,7 +1114,7 @@ define @vand_vi_nxv1i64_1( %va) { ; CHECK-LABEL: vand_vi_nxv1i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 8 ; CHECK-NEXT: ret %head = insertelement poison, i64 8, i32 0 @@ -1127,7 +1127,7 @@ ; CHECK-LABEL: vand_vi_nxv1i64_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 16, i32 0 @@ -1139,7 +1139,7 @@ define @vand_vv_nxv2i64( %va, %vb) { ; CHECK-LABEL: vand_vv_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = and %va, %vb @@ -1154,7 +1154,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vand.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -1162,7 +1162,7 @@ ; ; RV64-LABEL: vand_vx_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV64-NEXT: vand.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -1174,7 +1174,7 @@ define @vand_vi_nxv2i64_0( %va) { ; CHECK-LABEL: vand_vi_nxv2i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, -10 ; CHECK-NEXT: ret %head = insertelement poison, i64 -10, i32 0 @@ -1186,7 +1186,7 @@ define @vand_vi_nxv2i64_1( %va) { ; CHECK-LABEL: vand_vi_nxv2i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 8 ; CHECK-NEXT: ret %head = insertelement poison, i64 8, i32 0 @@ -1199,7 +1199,7 @@ ; CHECK-LABEL: vand_vi_nxv2i64_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 16, i32 0 @@ -1211,7 +1211,7 @@ define @vand_vv_nxv4i64( %va, %vb) { ; CHECK-LABEL: vand_vv_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = and %va, %vb @@ -1226,7 +1226,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vand.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -1234,7 +1234,7 @@ ; ; RV64-LABEL: vand_vx_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV64-NEXT: vand.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -1246,7 +1246,7 @@ define @vand_vi_nxv4i64_0( %va) { ; CHECK-LABEL: vand_vi_nxv4i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, -10 ; CHECK-NEXT: ret %head = insertelement poison, i64 -10, i32 0 @@ -1258,7 +1258,7 @@ define @vand_vi_nxv4i64_1( %va) { ; CHECK-LABEL: vand_vi_nxv4i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 8 ; CHECK-NEXT: ret %head = insertelement poison, i64 8, i32 0 @@ -1271,7 +1271,7 @@ ; CHECK-LABEL: vand_vi_nxv4i64_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 16, i32 0 @@ -1283,7 +1283,7 @@ define @vand_vv_nxv8i64( %va, %vb) { ; CHECK-LABEL: vand_vv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = and %va, %vb @@ -1298,7 +1298,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vand.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -1306,7 +1306,7 @@ ; ; RV64-LABEL: vand_vx_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vand.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -1318,7 +1318,7 @@ define @vand_vi_nxv8i64_0( %va) { ; CHECK-LABEL: vand_vi_nxv8i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, -10 ; CHECK-NEXT: ret %head = insertelement poison, i64 -10, i32 0 @@ -1330,7 +1330,7 @@ define @vand_vi_nxv8i64_1( %va) { ; CHECK-LABEL: vand_vi_nxv8i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 8 ; CHECK-NEXT: ret %head = insertelement poison, i64 8, i32 0 @@ -1343,7 +1343,7 @@ ; CHECK-LABEL: vand_vi_nxv8i64_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 16, i32 0 @@ -1359,7 +1359,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v8, (a0), zero ; RV32-NEXT: sw a3, 12(sp) ; RV32-NEXT: sw a2, 8(sp) @@ -1371,7 +1371,7 @@ ; RV64-LABEL: vand_xx_nxv8i64: ; RV64: # %bb.0: ; RV64-NEXT: and a0, a0, a1 -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vmv.v.x v8, a0 ; RV64-NEXT: ret %head1 = insertelement poison, i64 %a, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vand-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vand-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vand-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vand-vp.ll @@ -33,7 +33,7 @@ define @vand_vv_nxv1i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv1i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -57,7 +57,7 @@ define @vand_vx_nxv1i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_nxv1i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -83,7 +83,7 @@ define @vand_vi_nxv1i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_nxv1i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 4, i32 0 @@ -109,7 +109,7 @@ define @vand_vv_nxv2i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -133,7 +133,7 @@ define @vand_vx_nxv2i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_nxv2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -159,7 +159,7 @@ define @vand_vi_nxv2i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_nxv2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 4, i32 0 @@ -185,7 +185,7 @@ define @vand_vv_nxv4i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -209,7 +209,7 @@ define @vand_vx_nxv4i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_nxv4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -235,7 +235,7 @@ define @vand_vi_nxv4i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_nxv4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 4, i32 0 @@ -261,7 +261,7 @@ define @vand_vv_nxv8i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -285,7 +285,7 @@ define @vand_vx_nxv8i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_nxv8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -311,7 +311,7 @@ define @vand_vi_nxv8i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_nxv8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 4, i32 0 @@ -337,7 +337,7 @@ define @vand_vv_nxv16i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -361,7 +361,7 @@ define @vand_vx_nxv16i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_nxv16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -387,7 +387,7 @@ define @vand_vi_nxv16i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_nxv16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 4, i32 0 @@ -413,7 +413,7 @@ define @vand_vv_nxv32i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv32i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -437,7 +437,7 @@ define @vand_vx_nxv32i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_nxv32i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -463,7 +463,7 @@ define @vand_vi_nxv32i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_nxv32i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 4, i32 0 @@ -489,7 +489,7 @@ define @vand_vv_nxv64i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv64i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -513,7 +513,7 @@ define @vand_vx_nxv64i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_nxv64i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -539,7 +539,7 @@ define @vand_vi_nxv64i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_nxv64i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 4, i32 0 @@ -565,7 +565,7 @@ define @vand_vv_nxv1i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv1i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -589,7 +589,7 @@ define @vand_vx_nxv1i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_nxv1i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -615,7 +615,7 @@ define @vand_vi_nxv1i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_nxv1i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 4, i32 0 @@ -641,7 +641,7 @@ define @vand_vv_nxv2i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -665,7 +665,7 @@ define @vand_vx_nxv2i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_nxv2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -691,7 +691,7 @@ define @vand_vi_nxv2i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_nxv2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 4, i32 0 @@ -717,7 +717,7 @@ define @vand_vv_nxv4i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -741,7 +741,7 @@ define @vand_vx_nxv4i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_nxv4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -767,7 +767,7 @@ define @vand_vi_nxv4i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_nxv4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 4, i32 0 @@ -793,7 +793,7 @@ define @vand_vv_nxv8i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -817,7 +817,7 @@ define @vand_vx_nxv8i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_nxv8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -843,7 +843,7 @@ define @vand_vi_nxv8i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_nxv8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 4, i32 0 @@ -869,7 +869,7 @@ define @vand_vv_nxv14i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv14i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -893,7 +893,7 @@ define @vand_vx_nxv14i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_nxv14i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -919,7 +919,7 @@ define @vand_vi_nxv14i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_nxv14i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 4, i32 0 @@ -945,7 +945,7 @@ define @vand_vv_nxv16i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -969,7 +969,7 @@ define @vand_vx_nxv16i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_nxv16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -995,7 +995,7 @@ define @vand_vi_nxv16i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_nxv16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 4, i32 0 @@ -1021,7 +1021,7 @@ define @vand_vv_nxv32i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv32i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1057,7 +1057,7 @@ define @vand_vx_nxv32i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_nxv32i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -1083,7 +1083,7 @@ define @vand_vi_nxv32i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_nxv32i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 4, i32 0 @@ -1109,7 +1109,7 @@ define @vand_vv_nxv1i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv1i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1133,7 +1133,7 @@ define @vand_vx_nxv1i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_nxv1i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1159,7 +1159,7 @@ define @vand_vi_nxv1i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_nxv1i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 4, i32 0 @@ -1185,7 +1185,7 @@ define @vand_vv_nxv2i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1209,7 +1209,7 @@ define @vand_vx_nxv2i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1235,7 +1235,7 @@ define @vand_vi_nxv2i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 4, i32 0 @@ -1261,7 +1261,7 @@ define @vand_vv_nxv4i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1285,7 +1285,7 @@ define @vand_vx_nxv4i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_nxv4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1311,7 +1311,7 @@ define @vand_vi_nxv4i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_nxv4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 4, i32 0 @@ -1337,7 +1337,7 @@ define @vand_vv_nxv8i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1361,7 +1361,7 @@ define @vand_vx_nxv8i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_nxv8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1387,7 +1387,7 @@ define @vand_vi_nxv8i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_nxv8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 4, i32 0 @@ -1413,7 +1413,7 @@ define @vand_vv_nxv16i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1437,7 +1437,7 @@ define @vand_vx_nxv16i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vx_nxv16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1463,7 +1463,7 @@ define @vand_vi_nxv16i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_nxv16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 4, i32 0 @@ -1489,7 +1489,7 @@ define @vand_vv_nxv1i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv1i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1506,7 +1506,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vand.vv v8, v8, v9, v0.t @@ -1532,16 +1532,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vand.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vand_vx_nxv1i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vand.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1567,7 +1567,7 @@ define @vand_vi_nxv1i64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_nxv1i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 4, i32 0 @@ -1593,7 +1593,7 @@ define @vand_vv_nxv2i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1610,7 +1610,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vand.vv v8, v8, v10, v0.t @@ -1636,16 +1636,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vand.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vand_vx_nxv2i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vand.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1671,7 +1671,7 @@ define @vand_vi_nxv2i64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_nxv2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 4, i32 0 @@ -1697,7 +1697,7 @@ define @vand_vv_nxv4i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv4i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1714,7 +1714,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vand.vv v8, v8, v12, v0.t @@ -1740,16 +1740,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vand.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vand_vx_nxv4i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vand.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1775,7 +1775,7 @@ define @vand_vi_nxv4i64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_nxv4i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 4, i32 0 @@ -1801,7 +1801,7 @@ define @vand_vv_nxv8i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vand_vv_nxv8i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vand.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1818,7 +1818,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vand.vv v8, v8, v16, v0.t @@ -1844,16 +1844,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vand.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vand_vx_nxv8i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vand.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1879,7 +1879,7 @@ define @vand_vi_nxv8i64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vand_vi_nxv8i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 4, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vasub.ll b/llvm/test/CodeGen/RISCV/rvv/vasub.ll --- a/llvm/test/CodeGen/RISCV/rvv/vasub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vasub.ll @@ -12,7 +12,7 @@ define @intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vasub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -58,7 +58,7 @@ define @intrinsic_vasub_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vasub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vasub_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vasub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -150,7 +150,7 @@ define @intrinsic_vasub_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vasub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -196,7 +196,7 @@ define @intrinsic_vasub_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vasub.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -242,7 +242,7 @@ define @intrinsic_vasub_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vasub.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -288,7 +288,7 @@ define @intrinsic_vasub_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vasub.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -335,7 +335,7 @@ define @intrinsic_vasub_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vasub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -381,7 +381,7 @@ define @intrinsic_vasub_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vasub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -427,7 +427,7 @@ define @intrinsic_vasub_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vasub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -473,7 +473,7 @@ define @intrinsic_vasub_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vasub.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -519,7 +519,7 @@ define @intrinsic_vasub_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vasub.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -565,7 +565,7 @@ define @intrinsic_vasub_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vasub.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -612,7 +612,7 @@ define @intrinsic_vasub_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vasub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -658,7 +658,7 @@ define @intrinsic_vasub_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vasub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -704,7 +704,7 @@ define @intrinsic_vasub_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vasub.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -750,7 +750,7 @@ define @intrinsic_vasub_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vasub.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -796,7 +796,7 @@ define @intrinsic_vasub_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vasub.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -843,7 +843,7 @@ define @intrinsic_vasub_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vasub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -889,7 +889,7 @@ define @intrinsic_vasub_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vasub.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -935,7 +935,7 @@ define @intrinsic_vasub_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vasub.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -981,7 +981,7 @@ define @intrinsic_vasub_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vasub.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1028,7 +1028,7 @@ define @intrinsic_vasub_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vasub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1074,7 +1074,7 @@ define @intrinsic_vasub_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vasub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1120,7 +1120,7 @@ define @intrinsic_vasub_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vasub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1166,7 +1166,7 @@ define @intrinsic_vasub_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vasub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1212,7 +1212,7 @@ define @intrinsic_vasub_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vasub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1258,7 +1258,7 @@ define @intrinsic_vasub_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vasub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1304,7 +1304,7 @@ define @intrinsic_vasub_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vasub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1350,7 +1350,7 @@ define @intrinsic_vasub_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vasub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1396,7 +1396,7 @@ define @intrinsic_vasub_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vasub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1442,7 +1442,7 @@ define @intrinsic_vasub_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vasub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1488,7 +1488,7 @@ define @intrinsic_vasub_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vasub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1534,7 +1534,7 @@ define @intrinsic_vasub_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vasub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1580,7 +1580,7 @@ define @intrinsic_vasub_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vasub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1626,7 +1626,7 @@ define @intrinsic_vasub_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vasub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1672,7 +1672,7 @@ define @intrinsic_vasub_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vasub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1718,7 +1718,7 @@ define @intrinsic_vasub_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vasub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1764,7 +1764,7 @@ define @intrinsic_vasub_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vasub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1810,7 +1810,7 @@ define @intrinsic_vasub_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasub_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vasub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1860,7 +1860,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vasub.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -1868,7 +1868,7 @@ ; ; RV64-LABEL: intrinsic_vasub_vx_nxv1i64_nxv1i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vasub.vx v8, v8, a0 ; RV64-NEXT: ret entry: @@ -1930,7 +1930,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vasub.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -1938,7 +1938,7 @@ ; ; RV64-LABEL: intrinsic_vasub_vx_nxv2i64_nxv2i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vasub.vx v8, v8, a0 ; RV64-NEXT: ret entry: @@ -2000,7 +2000,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vasub.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -2008,7 +2008,7 @@ ; ; RV64-LABEL: intrinsic_vasub_vx_nxv4i64_nxv4i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vasub.vx v8, v8, a0 ; RV64-NEXT: ret entry: @@ -2070,7 +2070,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vasub.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -2078,7 +2078,7 @@ ; ; RV64-LABEL: intrinsic_vasub_vx_nxv8i64_nxv8i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vasub.vx v8, v8, a0 ; RV64-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vasubu.ll b/llvm/test/CodeGen/RISCV/rvv/vasubu.ll --- a/llvm/test/CodeGen/RISCV/rvv/vasubu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vasubu.ll @@ -12,7 +12,7 @@ define @intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vasubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -58,7 +58,7 @@ define @intrinsic_vasubu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vasubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vasubu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vasubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -150,7 +150,7 @@ define @intrinsic_vasubu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vasubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -196,7 +196,7 @@ define @intrinsic_vasubu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vasubu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -242,7 +242,7 @@ define @intrinsic_vasubu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vasubu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -288,7 +288,7 @@ define @intrinsic_vasubu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vasubu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -335,7 +335,7 @@ define @intrinsic_vasubu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vasubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -381,7 +381,7 @@ define @intrinsic_vasubu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vasubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -427,7 +427,7 @@ define @intrinsic_vasubu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vasubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -473,7 +473,7 @@ define @intrinsic_vasubu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vasubu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -519,7 +519,7 @@ define @intrinsic_vasubu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vasubu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -565,7 +565,7 @@ define @intrinsic_vasubu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vasubu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -612,7 +612,7 @@ define @intrinsic_vasubu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vasubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -658,7 +658,7 @@ define @intrinsic_vasubu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vasubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -704,7 +704,7 @@ define @intrinsic_vasubu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vasubu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -750,7 +750,7 @@ define @intrinsic_vasubu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vasubu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -796,7 +796,7 @@ define @intrinsic_vasubu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vasubu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -843,7 +843,7 @@ define @intrinsic_vasubu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vasubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -889,7 +889,7 @@ define @intrinsic_vasubu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vasubu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -935,7 +935,7 @@ define @intrinsic_vasubu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vasubu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -981,7 +981,7 @@ define @intrinsic_vasubu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vasubu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1028,7 +1028,7 @@ define @intrinsic_vasubu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vasubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1074,7 +1074,7 @@ define @intrinsic_vasubu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vasubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1120,7 +1120,7 @@ define @intrinsic_vasubu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vasubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1166,7 +1166,7 @@ define @intrinsic_vasubu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vasubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1212,7 +1212,7 @@ define @intrinsic_vasubu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vasubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1258,7 +1258,7 @@ define @intrinsic_vasubu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vasubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1304,7 +1304,7 @@ define @intrinsic_vasubu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vasubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1350,7 +1350,7 @@ define @intrinsic_vasubu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vasubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1396,7 +1396,7 @@ define @intrinsic_vasubu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vasubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1442,7 +1442,7 @@ define @intrinsic_vasubu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vasubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1488,7 +1488,7 @@ define @intrinsic_vasubu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vasubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1534,7 +1534,7 @@ define @intrinsic_vasubu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vasubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1580,7 +1580,7 @@ define @intrinsic_vasubu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vasubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1626,7 +1626,7 @@ define @intrinsic_vasubu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vasubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1672,7 +1672,7 @@ define @intrinsic_vasubu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vasubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1718,7 +1718,7 @@ define @intrinsic_vasubu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vasubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1764,7 +1764,7 @@ define @intrinsic_vasubu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vasubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1810,7 +1810,7 @@ define @intrinsic_vasubu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vasubu_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vasubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1860,7 +1860,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vasubu.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -1868,7 +1868,7 @@ ; ; RV64-LABEL: intrinsic_vasubu_vx_nxv1i64_nxv1i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vasubu.vx v8, v8, a0 ; RV64-NEXT: ret entry: @@ -1930,7 +1930,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vasubu.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -1938,7 +1938,7 @@ ; ; RV64-LABEL: intrinsic_vasubu_vx_nxv2i64_nxv2i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vasubu.vx v8, v8, a0 ; RV64-NEXT: ret entry: @@ -2000,7 +2000,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vasubu.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -2008,7 +2008,7 @@ ; ; RV64-LABEL: intrinsic_vasubu_vx_nxv4i64_nxv4i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vasubu.vx v8, v8, a0 ; RV64-NEXT: ret entry: @@ -2070,7 +2070,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vasubu.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -2078,7 +2078,7 @@ ; ; RV64-LABEL: intrinsic_vasubu_vx_nxv8i64_nxv8i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vasubu.vx v8, v8, a0 ; RV64-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vcompress.ll b/llvm/test/CodeGen/RISCV/rvv/vcompress.ll --- a/llvm/test/CodeGen/RISCV/rvv/vcompress.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vcompress.ll @@ -12,7 +12,7 @@ define @intrinsic_vcompress_vm_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vcompress.vm v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -34,7 +34,7 @@ define @intrinsic_vcompress_vm_nxv2i8_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vcompress.vm v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -56,7 +56,7 @@ define @intrinsic_vcompress_vm_nxv4i8_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vcompress.vm v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -78,7 +78,7 @@ define @intrinsic_vcompress_vm_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vcompress.vm v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -100,7 +100,7 @@ define @intrinsic_vcompress_vm_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vcompress.vm v8, v10, v0 ; CHECK-NEXT: ret entry: @@ -122,7 +122,7 @@ define @intrinsic_vcompress_vm_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vcompress.vm v8, v12, v0 ; CHECK-NEXT: ret entry: @@ -144,7 +144,7 @@ define @intrinsic_vcompress_vm_nxv64i8_nxv64i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma ; CHECK-NEXT: vcompress.vm v8, v16, v0 ; CHECK-NEXT: ret entry: @@ -166,7 +166,7 @@ define @intrinsic_vcompress_vm_nxv1i16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vcompress.vm v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -188,7 +188,7 @@ define @intrinsic_vcompress_vm_nxv2i16_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vcompress.vm v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -210,7 +210,7 @@ define @intrinsic_vcompress_vm_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vcompress.vm v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -232,7 +232,7 @@ define @intrinsic_vcompress_vm_nxv8i16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vcompress.vm v8, v10, v0 ; CHECK-NEXT: ret entry: @@ -254,7 +254,7 @@ define @intrinsic_vcompress_vm_nxv16i16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vcompress.vm v8, v12, v0 ; CHECK-NEXT: ret entry: @@ -276,7 +276,7 @@ define @intrinsic_vcompress_vm_nxv32i16_nxv32i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vcompress.vm v8, v16, v0 ; CHECK-NEXT: ret entry: @@ -298,7 +298,7 @@ define @intrinsic_vcompress_vm_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vcompress.vm v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -320,7 +320,7 @@ define @intrinsic_vcompress_vm_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vcompress.vm v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -342,7 +342,7 @@ define @intrinsic_vcompress_vm_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vcompress.vm v8, v10, v0 ; CHECK-NEXT: ret entry: @@ -364,7 +364,7 @@ define @intrinsic_vcompress_vm_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vcompress.vm v8, v12, v0 ; CHECK-NEXT: ret entry: @@ -386,7 +386,7 @@ define @intrinsic_vcompress_vm_nxv16i32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vcompress.vm v8, v16, v0 ; CHECK-NEXT: ret entry: @@ -408,7 +408,7 @@ define @intrinsic_vcompress_vm_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vcompress.vm v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -430,7 +430,7 @@ define @intrinsic_vcompress_vm_nxv2i64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vcompress.vm v8, v10, v0 ; CHECK-NEXT: ret entry: @@ -452,7 +452,7 @@ define @intrinsic_vcompress_vm_nxv4i64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vcompress.vm v8, v12, v0 ; CHECK-NEXT: ret entry: @@ -474,7 +474,7 @@ define @intrinsic_vcompress_vm_nxv8i64_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vcompress.vm v8, v16, v0 ; CHECK-NEXT: ret entry: @@ -496,7 +496,7 @@ define @intrinsic_vcompress_vm_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vcompress.vm v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -518,7 +518,7 @@ define @intrinsic_vcompress_vm_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vcompress.vm v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -540,7 +540,7 @@ define @intrinsic_vcompress_vm_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vcompress.vm v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -562,7 +562,7 @@ define @intrinsic_vcompress_vm_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vcompress.vm v8, v10, v0 ; CHECK-NEXT: ret entry: @@ -584,7 +584,7 @@ define @intrinsic_vcompress_vm_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vcompress.vm v8, v12, v0 ; CHECK-NEXT: ret entry: @@ -606,7 +606,7 @@ define @intrinsic_vcompress_vm_nxv32f16_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vcompress.vm v8, v16, v0 ; CHECK-NEXT: ret entry: @@ -628,7 +628,7 @@ define @intrinsic_vcompress_vm_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vcompress.vm v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -650,7 +650,7 @@ define @intrinsic_vcompress_vm_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vcompress.vm v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -672,7 +672,7 @@ define @intrinsic_vcompress_vm_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vcompress.vm v8, v10, v0 ; CHECK-NEXT: ret entry: @@ -694,7 +694,7 @@ define @intrinsic_vcompress_vm_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vcompress.vm v8, v12, v0 ; CHECK-NEXT: ret entry: @@ -716,7 +716,7 @@ define @intrinsic_vcompress_vm_nxv16f32_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vcompress.vm v8, v16, v0 ; CHECK-NEXT: ret entry: @@ -738,7 +738,7 @@ define @intrinsic_vcompress_vm_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vcompress.vm v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -760,7 +760,7 @@ define @intrinsic_vcompress_vm_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vcompress.vm v8, v10, v0 ; CHECK-NEXT: ret entry: @@ -782,7 +782,7 @@ define @intrinsic_vcompress_vm_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vcompress.vm v8, v12, v0 ; CHECK-NEXT: ret entry: @@ -804,7 +804,7 @@ define @intrinsic_vcompress_vm_nxv8f64_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vcompress_vm_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vcompress.vm v8, v16, v0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll @@ -19,7 +19,7 @@ define @vfsgnj_vv_nxv1f16_unmasked( %va, %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_nxv1f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -43,7 +43,7 @@ define @vfsgnj_vv_nxv2f16_unmasked( %va, %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -67,7 +67,7 @@ define @vfsgnj_vv_nxv4f16_unmasked( %va, %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_nxv4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -91,7 +91,7 @@ define @vfsgnj_vv_nxv8f16_unmasked( %va, %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_nxv8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -115,7 +115,7 @@ define @vfsgnj_vv_nxv16f16_unmasked( %va, %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_nxv16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -139,7 +139,7 @@ define @vfsgnj_vv_nxv32f16_unmasked( %va, %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_nxv32f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -163,7 +163,7 @@ define @vfsgnj_vv_nxv1f32_unmasked( %va, %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_nxv1f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -187,7 +187,7 @@ define @vfsgnj_vv_nxv2f32_unmasked( %va, %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -211,7 +211,7 @@ define @vfsgnj_vv_nxv4f32_unmasked( %va, %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_nxv4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -235,7 +235,7 @@ define @vfsgnj_vv_nxv8f32_unmasked( %va, %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_nxv8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -259,7 +259,7 @@ define @vfsgnj_vv_nxv16f32_unmasked( %va, %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_nxv16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -283,7 +283,7 @@ define @vfsgnj_vv_nxv1f64_unmasked( %va, %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_nxv1f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -307,7 +307,7 @@ define @vfsgnj_vv_nxv2f64_unmasked( %va, %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -331,7 +331,7 @@ define @vfsgnj_vv_nxv4f64_unmasked( %va, %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_nxv4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -355,7 +355,7 @@ define @vfsgnj_vv_nxv8f64_unmasked( %va, %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfsgnj_vv_nxv8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vcpop.ll b/llvm/test/CodeGen/RISCV/rvv/vcpop.ll --- a/llvm/test/CodeGen/RISCV/rvv/vcpop.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vcpop.ll @@ -10,7 +10,7 @@ define iXLen @intrinsic_vcpop_m_nxv1i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpop_m_nxv1i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: ret entry: @@ -43,7 +43,7 @@ ; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv1i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a0, v9, v0.t ; CHECK-NEXT: ret @@ -77,7 +77,7 @@ define iXLen @intrinsic_vcpop_m_nxv2i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpop_m_nxv2i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: ret entry: @@ -97,7 +97,7 @@ ; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv2i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a0, v9, v0.t ; CHECK-NEXT: ret @@ -117,7 +117,7 @@ define iXLen @intrinsic_vcpop_m_nxv4i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpop_m_nxv4i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: ret entry: @@ -137,7 +137,7 @@ ; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv4i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a0, v9, v0.t ; CHECK-NEXT: ret @@ -157,7 +157,7 @@ define iXLen @intrinsic_vcpop_m_nxv8i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpop_m_nxv8i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: ret entry: @@ -177,7 +177,7 @@ ; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv8i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a0, v9, v0.t ; CHECK-NEXT: ret @@ -197,7 +197,7 @@ define iXLen @intrinsic_vcpop_m_nxv16i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpop_m_nxv16i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ ; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv16i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a0, v9, v0.t ; CHECK-NEXT: ret @@ -237,7 +237,7 @@ define iXLen @intrinsic_vcpop_m_nxv32i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpop_m_nxv32i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: ret entry: @@ -257,7 +257,7 @@ ; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv32i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a0, v9, v0.t ; CHECK-NEXT: ret @@ -277,7 +277,7 @@ define iXLen @intrinsic_vcpop_m_nxv64i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vcpop_m_nxv64i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: ret entry: @@ -297,7 +297,7 @@ ; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv64i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a0, v9, v0.t ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vdiv_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vdiv_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vdiv_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vdiv_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vdiv_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -292,7 +292,7 @@ define @intrinsic_vdiv_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vdiv_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vdiv_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vdiv_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vdiv_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vdiv_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vdiv_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vdiv_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vdiv_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -717,7 +717,7 @@ define @intrinsic_vdiv_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -764,7 +764,7 @@ define @intrinsic_vdiv_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -811,7 +811,7 @@ define @intrinsic_vdiv_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vdiv_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vdiv_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vdiv_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vdiv_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1048,7 +1048,7 @@ define @intrinsic_vdiv_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1095,7 +1095,7 @@ define @intrinsic_vdiv_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1142,7 +1142,7 @@ define @intrinsic_vdiv_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1189,7 +1189,7 @@ define @intrinsic_vdiv_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1236,7 +1236,7 @@ define @intrinsic_vdiv_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1283,7 +1283,7 @@ define @intrinsic_vdiv_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1330,7 +1330,7 @@ define @intrinsic_vdiv_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1377,7 +1377,7 @@ define @intrinsic_vdiv_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1424,7 +1424,7 @@ define @intrinsic_vdiv_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1471,7 +1471,7 @@ define @intrinsic_vdiv_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1518,7 +1518,7 @@ define @intrinsic_vdiv_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1565,7 +1565,7 @@ define @intrinsic_vdiv_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1612,7 +1612,7 @@ define @intrinsic_vdiv_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1659,7 +1659,7 @@ define @intrinsic_vdiv_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1706,7 +1706,7 @@ define @intrinsic_vdiv_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1753,7 +1753,7 @@ define @intrinsic_vdiv_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1800,7 +1800,7 @@ define @intrinsic_vdiv_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1847,7 +1847,7 @@ define @intrinsic_vdiv_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1898,7 +1898,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 @@ -1957,7 +1957,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vdiv.vv v8, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 @@ -2016,7 +2016,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vdiv.vv v8, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 @@ -2075,7 +2075,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vdiv.vv v8, v8, v16 ; CHECK-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vdiv_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vdiv_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vdiv_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vdiv_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vdiv_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -292,7 +292,7 @@ define @intrinsic_vdiv_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vdiv_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vdiv_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vdiv_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vdiv_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vdiv_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vdiv_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vdiv_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vdiv_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -717,7 +717,7 @@ define @intrinsic_vdiv_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -764,7 +764,7 @@ define @intrinsic_vdiv_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -811,7 +811,7 @@ define @intrinsic_vdiv_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vdiv_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vdiv_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vdiv_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vdiv_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1048,7 +1048,7 @@ define @intrinsic_vdiv_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1095,7 +1095,7 @@ define @intrinsic_vdiv_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1142,7 +1142,7 @@ define @intrinsic_vdiv_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1189,7 +1189,7 @@ define @intrinsic_vdiv_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1236,7 +1236,7 @@ define @intrinsic_vdiv_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1283,7 +1283,7 @@ define @intrinsic_vdiv_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1330,7 +1330,7 @@ define @intrinsic_vdiv_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1377,7 +1377,7 @@ define @intrinsic_vdiv_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1424,7 +1424,7 @@ define @intrinsic_vdiv_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1471,7 +1471,7 @@ define @intrinsic_vdiv_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1518,7 +1518,7 @@ define @intrinsic_vdiv_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1565,7 +1565,7 @@ define @intrinsic_vdiv_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1612,7 +1612,7 @@ define @intrinsic_vdiv_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1659,7 +1659,7 @@ define @intrinsic_vdiv_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1706,7 +1706,7 @@ define @intrinsic_vdiv_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1753,7 +1753,7 @@ define @intrinsic_vdiv_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1800,7 +1800,7 @@ define @intrinsic_vdiv_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1847,7 +1847,7 @@ define @intrinsic_vdiv_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1894,7 +1894,7 @@ define @intrinsic_vdiv_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1941,7 +1941,7 @@ define @intrinsic_vdiv_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1988,7 +1988,7 @@ define @intrinsic_vdiv_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -2035,7 +2035,7 @@ define @intrinsic_vdiv_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdiv_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode.ll @@ -7,7 +7,7 @@ define @vdiv_vv_nxv1i8( %va, %vb) { ; CHECK-LABEL: vdiv_vv_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = sdiv %va, %vb @@ -17,7 +17,7 @@ define @vdiv_vx_nxv1i8( %va, i8 signext %b) { ; CHECK-LABEL: vdiv_vx_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -30,7 +30,7 @@ ; CHECK-LABEL: vdiv_vi_nxv1i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 109 -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmulh.vx v9, v8, a0 ; CHECK-NEXT: vsub.vv v8, v9, v8 ; CHECK-NEXT: vsra.vi v8, v8, 2 @@ -58,7 +58,7 @@ define @vdiv_iv_nxv1i8_0( %va) { ; CHECK-LABEL: vdiv_iv_nxv1i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: ret %head = insertelement poison, i8 0, i32 0 @@ -70,7 +70,7 @@ define @vdiv_vv_nxv2i8( %va, %vb) { ; CHECK-LABEL: vdiv_vv_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = sdiv %va, %vb @@ -80,7 +80,7 @@ define @vdiv_vx_nxv2i8( %va, i8 signext %b) { ; CHECK-LABEL: vdiv_vx_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -93,7 +93,7 @@ ; CHECK-LABEL: vdiv_vi_nxv2i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 109 -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmulh.vx v9, v8, a0 ; CHECK-NEXT: vsub.vv v8, v9, v8 ; CHECK-NEXT: vsra.vi v8, v8, 2 @@ -109,7 +109,7 @@ define @vdiv_vv_nxv4i8( %va, %vb) { ; CHECK-LABEL: vdiv_vv_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = sdiv %va, %vb @@ -119,7 +119,7 @@ define @vdiv_vx_nxv4i8( %va, i8 signext %b) { ; CHECK-LABEL: vdiv_vx_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -132,7 +132,7 @@ ; CHECK-LABEL: vdiv_vi_nxv4i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 109 -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmulh.vx v9, v8, a0 ; CHECK-NEXT: vsub.vv v8, v9, v8 ; CHECK-NEXT: vsra.vi v8, v8, 2 @@ -148,7 +148,7 @@ define @vdiv_vv_nxv8i8( %va, %vb) { ; CHECK-LABEL: vdiv_vv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = sdiv %va, %vb @@ -158,7 +158,7 @@ define @vdiv_vx_nxv8i8( %va, i8 signext %b) { ; CHECK-LABEL: vdiv_vx_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -171,7 +171,7 @@ ; CHECK-LABEL: vdiv_vi_nxv8i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 109 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmulh.vx v9, v8, a0 ; CHECK-NEXT: vsub.vv v8, v9, v8 ; CHECK-NEXT: vsra.vi v8, v8, 2 @@ -187,7 +187,7 @@ define @vdiv_vv_nxv16i8( %va, %vb) { ; CHECK-LABEL: vdiv_vv_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = sdiv %va, %vb @@ -197,7 +197,7 @@ define @vdiv_vx_nxv16i8( %va, i8 signext %b) { ; CHECK-LABEL: vdiv_vx_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -210,7 +210,7 @@ ; CHECK-LABEL: vdiv_vi_nxv16i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 109 -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vmulh.vx v10, v8, a0 ; CHECK-NEXT: vsub.vv v8, v10, v8 ; CHECK-NEXT: vsra.vi v8, v8, 2 @@ -226,7 +226,7 @@ define @vdiv_vv_nxv32i8( %va, %vb) { ; CHECK-LABEL: vdiv_vv_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = sdiv %va, %vb @@ -236,7 +236,7 @@ define @vdiv_vx_nxv32i8( %va, i8 signext %b) { ; CHECK-LABEL: vdiv_vx_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -249,7 +249,7 @@ ; CHECK-LABEL: vdiv_vi_nxv32i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 109 -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vmulh.vx v12, v8, a0 ; CHECK-NEXT: vsub.vv v8, v12, v8 ; CHECK-NEXT: vsra.vi v8, v8, 2 @@ -265,7 +265,7 @@ define @vdiv_vv_nxv64i8( %va, %vb) { ; CHECK-LABEL: vdiv_vv_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = sdiv %va, %vb @@ -275,7 +275,7 @@ define @vdiv_vx_nxv64i8( %va, i8 signext %b) { ; CHECK-LABEL: vdiv_vx_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -288,7 +288,7 @@ ; CHECK-LABEL: vdiv_vi_nxv64i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 109 -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vmulh.vx v16, v8, a0 ; CHECK-NEXT: vsub.vv v8, v16, v8 ; CHECK-NEXT: vsra.vi v8, v8, 2 @@ -304,7 +304,7 @@ define @vdiv_vv_nxv1i16( %va, %vb) { ; CHECK-LABEL: vdiv_vv_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = sdiv %va, %vb @@ -314,7 +314,7 @@ define @vdiv_vx_nxv1i16( %va, i16 signext %b) { ; CHECK-LABEL: vdiv_vx_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -328,7 +328,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 1048571 ; RV32-NEXT: addi a0, a0, 1755 -; RV32-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; RV32-NEXT: vmulh.vx v8, v8, a0 ; RV32-NEXT: vsra.vi v8, v8, 1 ; RV32-NEXT: vsrl.vi v9, v8, 15 @@ -339,7 +339,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 1048571 ; RV64-NEXT: addiw a0, a0, 1755 -; RV64-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; RV64-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; RV64-NEXT: vmulh.vx v8, v8, a0 ; RV64-NEXT: vsra.vi v8, v8, 1 ; RV64-NEXT: vsrl.vi v9, v8, 15 @@ -354,7 +354,7 @@ define @vdiv_vv_nxv2i16( %va, %vb) { ; CHECK-LABEL: vdiv_vv_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = sdiv %va, %vb @@ -364,7 +364,7 @@ define @vdiv_vx_nxv2i16( %va, i16 signext %b) { ; CHECK-LABEL: vdiv_vx_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -378,7 +378,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 1048571 ; RV32-NEXT: addi a0, a0, 1755 -; RV32-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; RV32-NEXT: vmulh.vx v8, v8, a0 ; RV32-NEXT: vsra.vi v8, v8, 1 ; RV32-NEXT: vsrl.vi v9, v8, 15 @@ -389,7 +389,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 1048571 ; RV64-NEXT: addiw a0, a0, 1755 -; RV64-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; RV64-NEXT: vmulh.vx v8, v8, a0 ; RV64-NEXT: vsra.vi v8, v8, 1 ; RV64-NEXT: vsrl.vi v9, v8, 15 @@ -404,7 +404,7 @@ define @vdiv_vv_nxv4i16( %va, %vb) { ; CHECK-LABEL: vdiv_vv_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = sdiv %va, %vb @@ -414,7 +414,7 @@ define @vdiv_vx_nxv4i16( %va, i16 signext %b) { ; CHECK-LABEL: vdiv_vx_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -428,7 +428,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 1048571 ; RV32-NEXT: addi a0, a0, 1755 -; RV32-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; RV32-NEXT: vmulh.vx v8, v8, a0 ; RV32-NEXT: vsra.vi v8, v8, 1 ; RV32-NEXT: vsrl.vi v9, v8, 15 @@ -439,7 +439,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 1048571 ; RV64-NEXT: addiw a0, a0, 1755 -; RV64-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; RV64-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; RV64-NEXT: vmulh.vx v8, v8, a0 ; RV64-NEXT: vsra.vi v8, v8, 1 ; RV64-NEXT: vsrl.vi v9, v8, 15 @@ -454,7 +454,7 @@ define @vdiv_vv_nxv8i16( %va, %vb) { ; CHECK-LABEL: vdiv_vv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = sdiv %va, %vb @@ -464,7 +464,7 @@ define @vdiv_vx_nxv8i16( %va, i16 signext %b) { ; CHECK-LABEL: vdiv_vx_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -478,7 +478,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 1048571 ; RV32-NEXT: addi a0, a0, 1755 -; RV32-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; RV32-NEXT: vmulh.vx v8, v8, a0 ; RV32-NEXT: vsra.vi v8, v8, 1 ; RV32-NEXT: vsrl.vi v10, v8, 15 @@ -489,7 +489,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 1048571 ; RV64-NEXT: addiw a0, a0, 1755 -; RV64-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; RV64-NEXT: vmulh.vx v8, v8, a0 ; RV64-NEXT: vsra.vi v8, v8, 1 ; RV64-NEXT: vsrl.vi v10, v8, 15 @@ -504,7 +504,7 @@ define @vdiv_vv_nxv16i16( %va, %vb) { ; CHECK-LABEL: vdiv_vv_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = sdiv %va, %vb @@ -514,7 +514,7 @@ define @vdiv_vx_nxv16i16( %va, i16 signext %b) { ; CHECK-LABEL: vdiv_vx_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -528,7 +528,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 1048571 ; RV32-NEXT: addi a0, a0, 1755 -; RV32-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; RV32-NEXT: vmulh.vx v8, v8, a0 ; RV32-NEXT: vsra.vi v8, v8, 1 ; RV32-NEXT: vsrl.vi v12, v8, 15 @@ -539,7 +539,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 1048571 ; RV64-NEXT: addiw a0, a0, 1755 -; RV64-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; RV64-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; RV64-NEXT: vmulh.vx v8, v8, a0 ; RV64-NEXT: vsra.vi v8, v8, 1 ; RV64-NEXT: vsrl.vi v12, v8, 15 @@ -554,7 +554,7 @@ define @vdiv_vv_nxv32i16( %va, %vb) { ; CHECK-LABEL: vdiv_vv_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = sdiv %va, %vb @@ -564,7 +564,7 @@ define @vdiv_vx_nxv32i16( %va, i16 signext %b) { ; CHECK-LABEL: vdiv_vx_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -578,7 +578,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 1048571 ; RV32-NEXT: addi a0, a0, 1755 -; RV32-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; RV32-NEXT: vmulh.vx v8, v8, a0 ; RV32-NEXT: vsra.vi v8, v8, 1 ; RV32-NEXT: vsrl.vi v16, v8, 15 @@ -589,7 +589,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 1048571 ; RV64-NEXT: addiw a0, a0, 1755 -; RV64-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; RV64-NEXT: vmulh.vx v8, v8, a0 ; RV64-NEXT: vsra.vi v8, v8, 1 ; RV64-NEXT: vsrl.vi v16, v8, 15 @@ -604,7 +604,7 @@ define @vdiv_vv_nxv1i32( %va, %vb) { ; CHECK-LABEL: vdiv_vv_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = sdiv %va, %vb @@ -614,7 +614,7 @@ define @vdiv_vx_nxv1i32( %va, i32 signext %b) { ; CHECK-LABEL: vdiv_vx_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -628,7 +628,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 449390 ; RV32-NEXT: addi a0, a0, -1171 -; RV32-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; RV32-NEXT: vmulh.vx v9, v8, a0 ; RV32-NEXT: vsub.vv v8, v9, v8 ; RV32-NEXT: vsrl.vi v9, v8, 31 @@ -640,7 +640,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 449390 ; RV64-NEXT: addiw a0, a0, -1171 -; RV64-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; RV64-NEXT: vmulh.vx v9, v8, a0 ; RV64-NEXT: vsub.vv v8, v9, v8 ; RV64-NEXT: vsra.vi v8, v8, 2 @@ -656,7 +656,7 @@ define @vdiv_vv_nxv2i32( %va, %vb) { ; CHECK-LABEL: vdiv_vv_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = sdiv %va, %vb @@ -666,7 +666,7 @@ define @vdiv_vx_nxv2i32( %va, i32 signext %b) { ; CHECK-LABEL: vdiv_vx_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -680,7 +680,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 449390 ; RV32-NEXT: addi a0, a0, -1171 -; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; RV32-NEXT: vmulh.vx v9, v8, a0 ; RV32-NEXT: vsub.vv v8, v9, v8 ; RV32-NEXT: vsrl.vi v9, v8, 31 @@ -692,7 +692,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 449390 ; RV64-NEXT: addiw a0, a0, -1171 -; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; RV64-NEXT: vmulh.vx v9, v8, a0 ; RV64-NEXT: vsub.vv v8, v9, v8 ; RV64-NEXT: vsra.vi v8, v8, 2 @@ -708,7 +708,7 @@ define @vdiv_vv_nxv4i32( %va, %vb) { ; CHECK-LABEL: vdiv_vv_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = sdiv %va, %vb @@ -718,7 +718,7 @@ define @vdiv_vx_nxv4i32( %va, i32 signext %b) { ; CHECK-LABEL: vdiv_vx_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -732,7 +732,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 449390 ; RV32-NEXT: addi a0, a0, -1171 -; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; RV32-NEXT: vmulh.vx v10, v8, a0 ; RV32-NEXT: vsub.vv v8, v10, v8 ; RV32-NEXT: vsrl.vi v10, v8, 31 @@ -744,7 +744,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 449390 ; RV64-NEXT: addiw a0, a0, -1171 -; RV64-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; RV64-NEXT: vmulh.vx v10, v8, a0 ; RV64-NEXT: vsub.vv v8, v10, v8 ; RV64-NEXT: vsra.vi v8, v8, 2 @@ -760,7 +760,7 @@ define @vdiv_vv_nxv8i32( %va, %vb) { ; CHECK-LABEL: vdiv_vv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = sdiv %va, %vb @@ -770,7 +770,7 @@ define @vdiv_vx_nxv8i32( %va, i32 signext %b) { ; CHECK-LABEL: vdiv_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -784,7 +784,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 449390 ; RV32-NEXT: addi a0, a0, -1171 -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vmulh.vx v12, v8, a0 ; RV32-NEXT: vsub.vv v8, v12, v8 ; RV32-NEXT: vsrl.vi v12, v8, 31 @@ -796,7 +796,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 449390 ; RV64-NEXT: addiw a0, a0, -1171 -; RV64-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV64-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV64-NEXT: vmulh.vx v12, v8, a0 ; RV64-NEXT: vsub.vv v8, v12, v8 ; RV64-NEXT: vsra.vi v8, v8, 2 @@ -812,7 +812,7 @@ define @vdiv_vv_nxv16i32( %va, %vb) { ; CHECK-LABEL: vdiv_vv_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = sdiv %va, %vb @@ -822,7 +822,7 @@ define @vdiv_vx_nxv16i32( %va, i32 signext %b) { ; CHECK-LABEL: vdiv_vx_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -836,7 +836,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 449390 ; RV32-NEXT: addi a0, a0, -1171 -; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; RV32-NEXT: vmulh.vx v16, v8, a0 ; RV32-NEXT: vsub.vv v8, v16, v8 ; RV32-NEXT: vsrl.vi v16, v8, 31 @@ -848,7 +848,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 449390 ; RV64-NEXT: addiw a0, a0, -1171 -; RV64-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; RV64-NEXT: vmulh.vx v16, v8, a0 ; RV64-NEXT: vsub.vv v8, v16, v8 ; RV64-NEXT: vsra.vi v8, v8, 2 @@ -864,7 +864,7 @@ define @vdiv_vv_nxv1i64( %va, %vb) { ; CHECK-LABEL: vdiv_vv_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = sdiv %va, %vb @@ -879,7 +879,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vdiv.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -887,7 +887,7 @@ ; ; RV64-LABEL: vdiv_vx_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV64-NEXT: vdiv.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -908,7 +908,7 @@ ; RV32-V-NEXT: addi a0, a0, 1755 ; RV32-V-NEXT: sw a0, 8(sp) ; RV32-V-NEXT: addi a0, sp, 8 -; RV32-V-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-V-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-V-NEXT: vlse64.v v9, (a0), zero ; RV32-V-NEXT: vmulh.vv v8, v8, v9 ; RV32-V-NEXT: li a0, 63 @@ -921,7 +921,7 @@ ; ZVE64X-LABEL: vdiv_vi_nxv1i64_0: ; ZVE64X: # %bb.0: ; ZVE64X-NEXT: li a0, -7 -; ZVE64X-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; ZVE64X-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; ZVE64X-NEXT: vdiv.vx v8, v8, a0 ; ZVE64X-NEXT: ret ; @@ -929,7 +929,7 @@ ; RV64-V: # %bb.0: ; RV64-V-NEXT: lui a0, %hi(.LCPI58_0) ; RV64-V-NEXT: ld a0, %lo(.LCPI58_0)(a0) -; RV64-V-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-V-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV64-V-NEXT: vmulh.vx v8, v8, a0 ; RV64-V-NEXT: li a0, 63 ; RV64-V-NEXT: vsrl.vx v9, v8, a0 @@ -945,7 +945,7 @@ define @vdiv_vv_nxv2i64( %va, %vb) { ; CHECK-LABEL: vdiv_vv_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = sdiv %va, %vb @@ -960,7 +960,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vdiv.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -968,7 +968,7 @@ ; ; RV64-LABEL: vdiv_vx_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV64-NEXT: vdiv.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -989,7 +989,7 @@ ; RV32-V-NEXT: addi a0, a0, 1755 ; RV32-V-NEXT: sw a0, 8(sp) ; RV32-V-NEXT: addi a0, sp, 8 -; RV32-V-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-V-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-V-NEXT: vlse64.v v10, (a0), zero ; RV32-V-NEXT: vmulh.vv v8, v8, v10 ; RV32-V-NEXT: li a0, 63 @@ -1002,7 +1002,7 @@ ; ZVE64X-LABEL: vdiv_vi_nxv2i64_0: ; ZVE64X: # %bb.0: ; ZVE64X-NEXT: li a0, -7 -; ZVE64X-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; ZVE64X-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; ZVE64X-NEXT: vdiv.vx v8, v8, a0 ; ZVE64X-NEXT: ret ; @@ -1010,7 +1010,7 @@ ; RV64-V: # %bb.0: ; RV64-V-NEXT: lui a0, %hi(.LCPI61_0) ; RV64-V-NEXT: ld a0, %lo(.LCPI61_0)(a0) -; RV64-V-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV64-V-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV64-V-NEXT: vmulh.vx v8, v8, a0 ; RV64-V-NEXT: li a0, 63 ; RV64-V-NEXT: vsrl.vx v10, v8, a0 @@ -1026,7 +1026,7 @@ define @vdiv_vv_nxv4i64( %va, %vb) { ; CHECK-LABEL: vdiv_vv_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = sdiv %va, %vb @@ -1041,7 +1041,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vdiv.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -1049,7 +1049,7 @@ ; ; RV64-LABEL: vdiv_vx_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV64-NEXT: vdiv.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -1070,7 +1070,7 @@ ; RV32-V-NEXT: addi a0, a0, 1755 ; RV32-V-NEXT: sw a0, 8(sp) ; RV32-V-NEXT: addi a0, sp, 8 -; RV32-V-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-V-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-V-NEXT: vlse64.v v12, (a0), zero ; RV32-V-NEXT: vmulh.vv v8, v8, v12 ; RV32-V-NEXT: li a0, 63 @@ -1083,7 +1083,7 @@ ; ZVE64X-LABEL: vdiv_vi_nxv4i64_0: ; ZVE64X: # %bb.0: ; ZVE64X-NEXT: li a0, -7 -; ZVE64X-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; ZVE64X-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; ZVE64X-NEXT: vdiv.vx v8, v8, a0 ; ZVE64X-NEXT: ret ; @@ -1091,7 +1091,7 @@ ; RV64-V: # %bb.0: ; RV64-V-NEXT: lui a0, %hi(.LCPI64_0) ; RV64-V-NEXT: ld a0, %lo(.LCPI64_0)(a0) -; RV64-V-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV64-V-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV64-V-NEXT: vmulh.vx v8, v8, a0 ; RV64-V-NEXT: li a0, 63 ; RV64-V-NEXT: vsrl.vx v12, v8, a0 @@ -1107,7 +1107,7 @@ define @vdiv_vv_nxv8i64( %va, %vb) { ; CHECK-LABEL: vdiv_vv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = sdiv %va, %vb @@ -1122,7 +1122,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vdiv.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -1130,7 +1130,7 @@ ; ; RV64-LABEL: vdiv_vx_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vdiv.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -1151,7 +1151,7 @@ ; RV32-V-NEXT: addi a0, a0, 1755 ; RV32-V-NEXT: sw a0, 8(sp) ; RV32-V-NEXT: addi a0, sp, 8 -; RV32-V-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-V-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-V-NEXT: vlse64.v v16, (a0), zero ; RV32-V-NEXT: vmulh.vv v8, v8, v16 ; RV32-V-NEXT: li a0, 63 @@ -1164,7 +1164,7 @@ ; ZVE64X-LABEL: vdiv_vi_nxv8i64_0: ; ZVE64X: # %bb.0: ; ZVE64X-NEXT: li a0, -7 -; ZVE64X-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; ZVE64X-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; ZVE64X-NEXT: vdiv.vx v8, v8, a0 ; ZVE64X-NEXT: ret ; @@ -1172,7 +1172,7 @@ ; RV64-V: # %bb.0: ; RV64-V-NEXT: lui a0, %hi(.LCPI67_0) ; RV64-V-NEXT: ld a0, %lo(.LCPI67_0)(a0) -; RV64-V-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-V-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-V-NEXT: vmulh.vx v8, v8, a0 ; RV64-V-NEXT: li a0, 63 ; RV64-V-NEXT: vsrl.vx v16, v8, a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll @@ -9,7 +9,7 @@ define @vdiv_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_nxv8i7: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vsra.vi v8, v8, 1 ; CHECK-NEXT: vmv.v.x v9, a0 @@ -39,7 +39,7 @@ define @vdiv_vv_nxv1i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv1i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -63,7 +63,7 @@ define @vdiv_vx_nxv1i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_nxv1i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -89,7 +89,7 @@ define @vdiv_vv_nxv2i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -113,7 +113,7 @@ define @vdiv_vx_nxv2i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_nxv2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -151,7 +151,7 @@ define @vdiv_vv_nxv4i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -175,7 +175,7 @@ define @vdiv_vx_nxv4i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_nxv4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -201,7 +201,7 @@ define @vdiv_vv_nxv8i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -225,7 +225,7 @@ define @vdiv_vx_nxv8i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_nxv8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -251,7 +251,7 @@ define @vdiv_vv_nxv16i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -275,7 +275,7 @@ define @vdiv_vx_nxv16i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_nxv16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -301,7 +301,7 @@ define @vdiv_vv_nxv32i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv32i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -325,7 +325,7 @@ define @vdiv_vx_nxv32i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_nxv32i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -351,7 +351,7 @@ define @vdiv_vv_nxv64i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv64i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -375,7 +375,7 @@ define @vdiv_vx_nxv64i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_nxv64i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -401,7 +401,7 @@ define @vdiv_vv_nxv1i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv1i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -425,7 +425,7 @@ define @vdiv_vx_nxv1i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_nxv1i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -451,7 +451,7 @@ define @vdiv_vv_nxv2i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -475,7 +475,7 @@ define @vdiv_vx_nxv2i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_nxv2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -501,7 +501,7 @@ define @vdiv_vv_nxv4i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -525,7 +525,7 @@ define @vdiv_vx_nxv4i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_nxv4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -551,7 +551,7 @@ define @vdiv_vv_nxv8i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -575,7 +575,7 @@ define @vdiv_vx_nxv8i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_nxv8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -601,7 +601,7 @@ define @vdiv_vv_nxv16i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -625,7 +625,7 @@ define @vdiv_vx_nxv16i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_nxv16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -651,7 +651,7 @@ define @vdiv_vv_nxv32i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv32i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -675,7 +675,7 @@ define @vdiv_vx_nxv32i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_nxv32i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -701,7 +701,7 @@ define @vdiv_vv_nxv1i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv1i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -725,7 +725,7 @@ define @vdiv_vx_nxv1i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_nxv1i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -751,7 +751,7 @@ define @vdiv_vv_nxv2i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -775,7 +775,7 @@ define @vdiv_vx_nxv2i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -801,7 +801,7 @@ define @vdiv_vv_nxv4i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -825,7 +825,7 @@ define @vdiv_vx_nxv4i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_nxv4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -851,7 +851,7 @@ define @vdiv_vv_nxv8i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -875,7 +875,7 @@ define @vdiv_vx_nxv8i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_nxv8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -901,7 +901,7 @@ define @vdiv_vv_nxv16i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -925,7 +925,7 @@ define @vdiv_vx_nxv16i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vx_nxv16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vdiv.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -951,7 +951,7 @@ define @vdiv_vv_nxv1i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv1i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -968,7 +968,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vdiv.vv v8, v8, v9, v0.t @@ -994,16 +994,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vdiv.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vx_nxv1i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vdiv.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1029,7 +1029,7 @@ define @vdiv_vv_nxv2i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1046,7 +1046,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vdiv.vv v8, v8, v10, v0.t @@ -1072,16 +1072,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vdiv.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vx_nxv2i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vdiv.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1107,7 +1107,7 @@ define @vdiv_vv_nxv4i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv4i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1124,7 +1124,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vdiv.vv v8, v8, v12, v0.t @@ -1150,16 +1150,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vdiv.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vx_nxv4i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vdiv.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1185,7 +1185,7 @@ define @vdiv_vv_nxv8i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vdiv_vv_nxv8i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vdiv.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1202,7 +1202,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vdiv.vv v8, v8, v16, v0.t @@ -1228,16 +1228,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vdiv.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vdiv_vx_nxv8i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vdiv.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vdivu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vdivu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vdivu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vdivu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vdivu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -292,7 +292,7 @@ define @intrinsic_vdivu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vdivu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vdivu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vdivu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vdivu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vdivu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vdivu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vdivu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vdivu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -717,7 +717,7 @@ define @intrinsic_vdivu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -764,7 +764,7 @@ define @intrinsic_vdivu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -811,7 +811,7 @@ define @intrinsic_vdivu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vdivu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vdivu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vdivu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vdivu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1048,7 +1048,7 @@ define @intrinsic_vdivu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1095,7 +1095,7 @@ define @intrinsic_vdivu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1142,7 +1142,7 @@ define @intrinsic_vdivu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1189,7 +1189,7 @@ define @intrinsic_vdivu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1236,7 +1236,7 @@ define @intrinsic_vdivu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1283,7 +1283,7 @@ define @intrinsic_vdivu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1330,7 +1330,7 @@ define @intrinsic_vdivu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1377,7 +1377,7 @@ define @intrinsic_vdivu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1424,7 +1424,7 @@ define @intrinsic_vdivu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1471,7 +1471,7 @@ define @intrinsic_vdivu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1518,7 +1518,7 @@ define @intrinsic_vdivu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1565,7 +1565,7 @@ define @intrinsic_vdivu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1612,7 +1612,7 @@ define @intrinsic_vdivu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1659,7 +1659,7 @@ define @intrinsic_vdivu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1706,7 +1706,7 @@ define @intrinsic_vdivu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1753,7 +1753,7 @@ define @intrinsic_vdivu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1800,7 +1800,7 @@ define @intrinsic_vdivu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1847,7 +1847,7 @@ define @intrinsic_vdivu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1898,7 +1898,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 @@ -1957,7 +1957,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vdivu.vv v8, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 @@ -2016,7 +2016,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vdivu.vv v8, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 @@ -2075,7 +2075,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vdivu.vv v8, v8, v16 ; CHECK-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vdivu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vdivu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vdivu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vdivu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vdivu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -292,7 +292,7 @@ define @intrinsic_vdivu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vdivu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vdivu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vdivu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vdivu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vdivu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vdivu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vdivu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vdivu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -717,7 +717,7 @@ define @intrinsic_vdivu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -764,7 +764,7 @@ define @intrinsic_vdivu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -811,7 +811,7 @@ define @intrinsic_vdivu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vdivu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vdivu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vdivu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vdivu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1048,7 +1048,7 @@ define @intrinsic_vdivu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1095,7 +1095,7 @@ define @intrinsic_vdivu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1142,7 +1142,7 @@ define @intrinsic_vdivu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1189,7 +1189,7 @@ define @intrinsic_vdivu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1236,7 +1236,7 @@ define @intrinsic_vdivu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1283,7 +1283,7 @@ define @intrinsic_vdivu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1330,7 +1330,7 @@ define @intrinsic_vdivu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1377,7 +1377,7 @@ define @intrinsic_vdivu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1424,7 +1424,7 @@ define @intrinsic_vdivu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1471,7 +1471,7 @@ define @intrinsic_vdivu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1518,7 +1518,7 @@ define @intrinsic_vdivu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1565,7 +1565,7 @@ define @intrinsic_vdivu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1612,7 +1612,7 @@ define @intrinsic_vdivu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1659,7 +1659,7 @@ define @intrinsic_vdivu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1706,7 +1706,7 @@ define @intrinsic_vdivu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1753,7 +1753,7 @@ define @intrinsic_vdivu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1800,7 +1800,7 @@ define @intrinsic_vdivu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1847,7 +1847,7 @@ define @intrinsic_vdivu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1894,7 +1894,7 @@ define @intrinsic_vdivu_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1941,7 +1941,7 @@ define @intrinsic_vdivu_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1988,7 +1988,7 @@ define @intrinsic_vdivu_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -2035,7 +2035,7 @@ define @intrinsic_vdivu_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vdivu_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode.ll @@ -7,7 +7,7 @@ define @vdivu_vv_nxv1i8( %va, %vb) { ; CHECK-LABEL: vdivu_vv_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = udiv %va, %vb @@ -17,7 +17,7 @@ define @vdivu_vx_nxv1i8( %va, i8 signext %b) { ; CHECK-LABEL: vdivu_vx_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -30,7 +30,7 @@ ; CHECK-LABEL: vdivu_vi_nxv1i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 33 -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmulhu.vx v8, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 5 ; CHECK-NEXT: ret @@ -55,7 +55,7 @@ define @vdivu_iv_nxv1i8_0( %va) { ; CHECK-LABEL: vdivu_iv_nxv1i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: ret %head = insertelement poison, i8 0, i32 0 @@ -67,7 +67,7 @@ define @vdivu_vv_nxv2i8( %va, %vb) { ; CHECK-LABEL: vdivu_vv_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = udiv %va, %vb @@ -77,7 +77,7 @@ define @vdivu_vx_nxv2i8( %va, i8 signext %b) { ; CHECK-LABEL: vdivu_vx_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -90,7 +90,7 @@ ; CHECK-LABEL: vdivu_vi_nxv2i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 33 -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmulhu.vx v8, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 5 ; CHECK-NEXT: ret @@ -103,7 +103,7 @@ define @vdivu_vv_nxv4i8( %va, %vb) { ; CHECK-LABEL: vdivu_vv_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = udiv %va, %vb @@ -113,7 +113,7 @@ define @vdivu_vx_nxv4i8( %va, i8 signext %b) { ; CHECK-LABEL: vdivu_vx_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -126,7 +126,7 @@ ; CHECK-LABEL: vdivu_vi_nxv4i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 33 -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmulhu.vx v8, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 5 ; CHECK-NEXT: ret @@ -139,7 +139,7 @@ define @vdivu_vv_nxv8i8( %va, %vb) { ; CHECK-LABEL: vdivu_vv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = udiv %va, %vb @@ -149,7 +149,7 @@ define @vdivu_vx_nxv8i8( %va, i8 signext %b) { ; CHECK-LABEL: vdivu_vx_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -162,7 +162,7 @@ ; CHECK-LABEL: vdivu_vi_nxv8i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 33 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmulhu.vx v8, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 5 ; CHECK-NEXT: ret @@ -175,7 +175,7 @@ define @vdivu_vv_nxv16i8( %va, %vb) { ; CHECK-LABEL: vdivu_vv_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = udiv %va, %vb @@ -185,7 +185,7 @@ define @vdivu_vx_nxv16i8( %va, i8 signext %b) { ; CHECK-LABEL: vdivu_vx_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -198,7 +198,7 @@ ; CHECK-LABEL: vdivu_vi_nxv16i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 33 -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vmulhu.vx v8, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 5 ; CHECK-NEXT: ret @@ -211,7 +211,7 @@ define @vdivu_vv_nxv32i8( %va, %vb) { ; CHECK-LABEL: vdivu_vv_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = udiv %va, %vb @@ -221,7 +221,7 @@ define @vdivu_vx_nxv32i8( %va, i8 signext %b) { ; CHECK-LABEL: vdivu_vx_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -234,7 +234,7 @@ ; CHECK-LABEL: vdivu_vi_nxv32i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 33 -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vmulhu.vx v8, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 5 ; CHECK-NEXT: ret @@ -247,7 +247,7 @@ define @vdivu_vv_nxv64i8( %va, %vb) { ; CHECK-LABEL: vdivu_vv_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = udiv %va, %vb @@ -257,7 +257,7 @@ define @vdivu_vx_nxv64i8( %va, i8 signext %b) { ; CHECK-LABEL: vdivu_vx_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -270,7 +270,7 @@ ; CHECK-LABEL: vdivu_vi_nxv64i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 33 -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vmulhu.vx v8, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 5 ; CHECK-NEXT: ret @@ -283,7 +283,7 @@ define @vdivu_vv_nxv1i16( %va, %vb) { ; CHECK-LABEL: vdivu_vv_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = udiv %va, %vb @@ -293,7 +293,7 @@ define @vdivu_vx_nxv1i16( %va, i16 signext %b) { ; CHECK-LABEL: vdivu_vx_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -307,7 +307,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 2 ; RV32-NEXT: addi a0, a0, 1 -; RV32-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; RV32-NEXT: vmulhu.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 13 ; RV32-NEXT: ret @@ -316,7 +316,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 2 ; RV64-NEXT: addiw a0, a0, 1 -; RV64-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; RV64-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; RV64-NEXT: vmulhu.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 13 ; RV64-NEXT: ret @@ -329,7 +329,7 @@ define @vdivu_vv_nxv2i16( %va, %vb) { ; CHECK-LABEL: vdivu_vv_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = udiv %va, %vb @@ -339,7 +339,7 @@ define @vdivu_vx_nxv2i16( %va, i16 signext %b) { ; CHECK-LABEL: vdivu_vx_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -353,7 +353,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 2 ; RV32-NEXT: addi a0, a0, 1 -; RV32-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; RV32-NEXT: vmulhu.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 13 ; RV32-NEXT: ret @@ -362,7 +362,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 2 ; RV64-NEXT: addiw a0, a0, 1 -; RV64-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; RV64-NEXT: vmulhu.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 13 ; RV64-NEXT: ret @@ -375,7 +375,7 @@ define @vdivu_vv_nxv4i16( %va, %vb) { ; CHECK-LABEL: vdivu_vv_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = udiv %va, %vb @@ -385,7 +385,7 @@ define @vdivu_vx_nxv4i16( %va, i16 signext %b) { ; CHECK-LABEL: vdivu_vx_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -399,7 +399,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 2 ; RV32-NEXT: addi a0, a0, 1 -; RV32-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; RV32-NEXT: vmulhu.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 13 ; RV32-NEXT: ret @@ -408,7 +408,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 2 ; RV64-NEXT: addiw a0, a0, 1 -; RV64-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; RV64-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; RV64-NEXT: vmulhu.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 13 ; RV64-NEXT: ret @@ -421,7 +421,7 @@ define @vdivu_vv_nxv8i16( %va, %vb) { ; CHECK-LABEL: vdivu_vv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = udiv %va, %vb @@ -431,7 +431,7 @@ define @vdivu_vx_nxv8i16( %va, i16 signext %b) { ; CHECK-LABEL: vdivu_vx_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -445,7 +445,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 2 ; RV32-NEXT: addi a0, a0, 1 -; RV32-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; RV32-NEXT: vmulhu.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 13 ; RV32-NEXT: ret @@ -454,7 +454,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 2 ; RV64-NEXT: addiw a0, a0, 1 -; RV64-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; RV64-NEXT: vmulhu.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 13 ; RV64-NEXT: ret @@ -467,7 +467,7 @@ define @vdivu_vv_nxv16i16( %va, %vb) { ; CHECK-LABEL: vdivu_vv_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = udiv %va, %vb @@ -477,7 +477,7 @@ define @vdivu_vx_nxv16i16( %va, i16 signext %b) { ; CHECK-LABEL: vdivu_vx_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -491,7 +491,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 2 ; RV32-NEXT: addi a0, a0, 1 -; RV32-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; RV32-NEXT: vmulhu.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 13 ; RV32-NEXT: ret @@ -500,7 +500,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 2 ; RV64-NEXT: addiw a0, a0, 1 -; RV64-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; RV64-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; RV64-NEXT: vmulhu.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 13 ; RV64-NEXT: ret @@ -513,7 +513,7 @@ define @vdivu_vv_nxv32i16( %va, %vb) { ; CHECK-LABEL: vdivu_vv_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = udiv %va, %vb @@ -523,7 +523,7 @@ define @vdivu_vx_nxv32i16( %va, i16 signext %b) { ; CHECK-LABEL: vdivu_vx_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -537,7 +537,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 2 ; RV32-NEXT: addi a0, a0, 1 -; RV32-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; RV32-NEXT: vmulhu.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 13 ; RV32-NEXT: ret @@ -546,7 +546,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 2 ; RV64-NEXT: addiw a0, a0, 1 -; RV64-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; RV64-NEXT: vmulhu.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 13 ; RV64-NEXT: ret @@ -559,7 +559,7 @@ define @vdivu_vv_nxv1i32( %va, %vb) { ; CHECK-LABEL: vdivu_vv_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = udiv %va, %vb @@ -569,7 +569,7 @@ define @vdivu_vx_nxv1i32( %va, i32 signext %b) { ; CHECK-LABEL: vdivu_vx_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -583,7 +583,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 131072 ; RV32-NEXT: addi a0, a0, 1 -; RV32-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; RV32-NEXT: vmulhu.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 29 ; RV32-NEXT: ret @@ -592,7 +592,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 131072 ; RV64-NEXT: addiw a0, a0, 1 -; RV64-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; RV64-NEXT: vmulhu.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 29 ; RV64-NEXT: ret @@ -605,7 +605,7 @@ define @vdivu_vv_nxv2i32( %va, %vb) { ; CHECK-LABEL: vdivu_vv_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = udiv %va, %vb @@ -615,7 +615,7 @@ define @vdivu_vx_nxv2i32( %va, i32 signext %b) { ; CHECK-LABEL: vdivu_vx_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -629,7 +629,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 131072 ; RV32-NEXT: addi a0, a0, 1 -; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; RV32-NEXT: vmulhu.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 29 ; RV32-NEXT: ret @@ -638,7 +638,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 131072 ; RV64-NEXT: addiw a0, a0, 1 -; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; RV64-NEXT: vmulhu.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 29 ; RV64-NEXT: ret @@ -651,7 +651,7 @@ define @vdivu_vv_nxv4i32( %va, %vb) { ; CHECK-LABEL: vdivu_vv_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = udiv %va, %vb @@ -661,7 +661,7 @@ define @vdivu_vx_nxv4i32( %va, i32 signext %b) { ; CHECK-LABEL: vdivu_vx_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -675,7 +675,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 131072 ; RV32-NEXT: addi a0, a0, 1 -; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; RV32-NEXT: vmulhu.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 29 ; RV32-NEXT: ret @@ -684,7 +684,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 131072 ; RV64-NEXT: addiw a0, a0, 1 -; RV64-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; RV64-NEXT: vmulhu.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 29 ; RV64-NEXT: ret @@ -697,7 +697,7 @@ define @vdivu_vv_nxv8i32( %va, %vb) { ; CHECK-LABEL: vdivu_vv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = udiv %va, %vb @@ -707,7 +707,7 @@ define @vdivu_vx_nxv8i32( %va, i32 signext %b) { ; CHECK-LABEL: vdivu_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -721,7 +721,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 131072 ; RV32-NEXT: addi a0, a0, 1 -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vmulhu.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 29 ; RV32-NEXT: ret @@ -730,7 +730,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 131072 ; RV64-NEXT: addiw a0, a0, 1 -; RV64-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV64-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV64-NEXT: vmulhu.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 29 ; RV64-NEXT: ret @@ -743,7 +743,7 @@ define @vdivu_vv_nxv16i32( %va, %vb) { ; CHECK-LABEL: vdivu_vv_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = udiv %va, %vb @@ -753,7 +753,7 @@ define @vdivu_vx_nxv16i32( %va, i32 signext %b) { ; CHECK-LABEL: vdivu_vx_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -767,7 +767,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 131072 ; RV32-NEXT: addi a0, a0, 1 -; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; RV32-NEXT: vmulhu.vx v8, v8, a0 ; RV32-NEXT: vsrl.vi v8, v8, 29 ; RV32-NEXT: ret @@ -776,7 +776,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 131072 ; RV64-NEXT: addiw a0, a0, 1 -; RV64-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; RV64-NEXT: vmulhu.vx v8, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 29 ; RV64-NEXT: ret @@ -789,7 +789,7 @@ define @vdivu_vv_nxv1i64( %va, %vb) { ; CHECK-LABEL: vdivu_vv_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = udiv %va, %vb @@ -804,7 +804,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vdivu.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -812,7 +812,7 @@ ; ; RV64-LABEL: vdivu_vx_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV64-NEXT: vdivu.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -831,7 +831,7 @@ ; RV32-V-NEXT: li a0, 1 ; RV32-V-NEXT: sw a0, 8(sp) ; RV32-V-NEXT: addi a0, sp, 8 -; RV32-V-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-V-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-V-NEXT: vlse64.v v9, (a0), zero ; RV32-V-NEXT: vmulhu.vv v8, v8, v9 ; RV32-V-NEXT: li a0, 61 @@ -842,7 +842,7 @@ ; ZVE64X-LABEL: vdivu_vi_nxv1i64_0: ; ZVE64X: # %bb.0: ; ZVE64X-NEXT: li a0, -7 -; ZVE64X-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; ZVE64X-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; ZVE64X-NEXT: vdivu.vx v8, v8, a0 ; ZVE64X-NEXT: ret ; @@ -851,7 +851,7 @@ ; RV64-V-NEXT: li a0, 1 ; RV64-V-NEXT: slli a0, a0, 61 ; RV64-V-NEXT: addi a0, a0, 1 -; RV64-V-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-V-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV64-V-NEXT: vmulhu.vx v8, v8, a0 ; RV64-V-NEXT: li a0, 61 ; RV64-V-NEXT: vsrl.vx v8, v8, a0 @@ -865,7 +865,7 @@ define @vdivu_vi_nxv1i64_1( %va) { ; CHECK-LABEL: vdivu_vi_nxv1i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 1 ; CHECK-NEXT: ret %head = insertelement poison, i64 2, i32 0 @@ -878,7 +878,7 @@ define @vdivu_vi_nxv1i64_2( %va, %vb) { ; CHECK-LABEL: vdivu_vi_nxv1i64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vadd.vi v9, v9, 4 ; CHECK-NEXT: vsrl.vv v8, v8, v9 ; CHECK-NEXT: ret @@ -892,7 +892,7 @@ define @vdivu_vv_nxv2i64( %va, %vb) { ; CHECK-LABEL: vdivu_vv_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = udiv %va, %vb @@ -907,7 +907,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vdivu.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -915,7 +915,7 @@ ; ; RV64-LABEL: vdivu_vx_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV64-NEXT: vdivu.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -934,7 +934,7 @@ ; RV32-V-NEXT: li a0, 1 ; RV32-V-NEXT: sw a0, 8(sp) ; RV32-V-NEXT: addi a0, sp, 8 -; RV32-V-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-V-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-V-NEXT: vlse64.v v10, (a0), zero ; RV32-V-NEXT: vmulhu.vv v8, v8, v10 ; RV32-V-NEXT: li a0, 61 @@ -945,7 +945,7 @@ ; ZVE64X-LABEL: vdivu_vi_nxv2i64_0: ; ZVE64X: # %bb.0: ; ZVE64X-NEXT: li a0, -7 -; ZVE64X-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; ZVE64X-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; ZVE64X-NEXT: vdivu.vx v8, v8, a0 ; ZVE64X-NEXT: ret ; @@ -954,7 +954,7 @@ ; RV64-V-NEXT: li a0, 1 ; RV64-V-NEXT: slli a0, a0, 61 ; RV64-V-NEXT: addi a0, a0, 1 -; RV64-V-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV64-V-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV64-V-NEXT: vmulhu.vx v8, v8, a0 ; RV64-V-NEXT: li a0, 61 ; RV64-V-NEXT: vsrl.vx v8, v8, a0 @@ -968,7 +968,7 @@ define @vdivu_vi_nxv2i64_1( %va) { ; CHECK-LABEL: vdivu_vi_nxv2i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 1 ; CHECK-NEXT: ret %head = insertelement poison, i64 2, i32 0 @@ -981,7 +981,7 @@ define @vdivu_vi_nxv2i64_2( %va, %vb) { ; CHECK-LABEL: vdivu_vi_nxv2i64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vadd.vi v10, v10, 4 ; CHECK-NEXT: vsrl.vv v8, v8, v10 ; CHECK-NEXT: ret @@ -995,7 +995,7 @@ define @vdivu_vv_nxv4i64( %va, %vb) { ; CHECK-LABEL: vdivu_vv_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = udiv %va, %vb @@ -1010,7 +1010,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vdivu.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -1018,7 +1018,7 @@ ; ; RV64-LABEL: vdivu_vx_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV64-NEXT: vdivu.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -1037,7 +1037,7 @@ ; RV32-V-NEXT: li a0, 1 ; RV32-V-NEXT: sw a0, 8(sp) ; RV32-V-NEXT: addi a0, sp, 8 -; RV32-V-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-V-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-V-NEXT: vlse64.v v12, (a0), zero ; RV32-V-NEXT: vmulhu.vv v8, v8, v12 ; RV32-V-NEXT: li a0, 61 @@ -1048,7 +1048,7 @@ ; ZVE64X-LABEL: vdivu_vi_nxv4i64_0: ; ZVE64X: # %bb.0: ; ZVE64X-NEXT: li a0, -7 -; ZVE64X-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; ZVE64X-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; ZVE64X-NEXT: vdivu.vx v8, v8, a0 ; ZVE64X-NEXT: ret ; @@ -1057,7 +1057,7 @@ ; RV64-V-NEXT: li a0, 1 ; RV64-V-NEXT: slli a0, a0, 61 ; RV64-V-NEXT: addi a0, a0, 1 -; RV64-V-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV64-V-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV64-V-NEXT: vmulhu.vx v8, v8, a0 ; RV64-V-NEXT: li a0, 61 ; RV64-V-NEXT: vsrl.vx v8, v8, a0 @@ -1071,7 +1071,7 @@ define @vdivu_vi_nxv4i64_1( %va) { ; CHECK-LABEL: vdivu_vi_nxv4i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 1 ; CHECK-NEXT: ret %head = insertelement poison, i64 2, i32 0 @@ -1084,7 +1084,7 @@ define @vdivu_vi_nxv4i64_2( %va, %vb) { ; CHECK-LABEL: vdivu_vi_nxv4i64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vadd.vi v12, v12, 4 ; CHECK-NEXT: vsrl.vv v8, v8, v12 ; CHECK-NEXT: ret @@ -1098,7 +1098,7 @@ define @vdivu_vv_nxv8i64( %va, %vb) { ; CHECK-LABEL: vdivu_vv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = udiv %va, %vb @@ -1113,7 +1113,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vdivu.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -1121,7 +1121,7 @@ ; ; RV64-LABEL: vdivu_vx_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vdivu.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -1140,7 +1140,7 @@ ; RV32-V-NEXT: li a0, 1 ; RV32-V-NEXT: sw a0, 8(sp) ; RV32-V-NEXT: addi a0, sp, 8 -; RV32-V-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-V-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-V-NEXT: vlse64.v v16, (a0), zero ; RV32-V-NEXT: vmulhu.vv v8, v8, v16 ; RV32-V-NEXT: li a0, 61 @@ -1151,7 +1151,7 @@ ; ZVE64X-LABEL: vdivu_vi_nxv8i64_0: ; ZVE64X: # %bb.0: ; ZVE64X-NEXT: li a0, -7 -; ZVE64X-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; ZVE64X-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; ZVE64X-NEXT: vdivu.vx v8, v8, a0 ; ZVE64X-NEXT: ret ; @@ -1160,7 +1160,7 @@ ; RV64-V-NEXT: li a0, 1 ; RV64-V-NEXT: slli a0, a0, 61 ; RV64-V-NEXT: addi a0, a0, 1 -; RV64-V-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-V-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-V-NEXT: vmulhu.vx v8, v8, a0 ; RV64-V-NEXT: li a0, 61 ; RV64-V-NEXT: vsrl.vx v8, v8, a0 @@ -1174,7 +1174,7 @@ define @vdivu_vi_nxv8i64_1( %va) { ; CHECK-LABEL: vdivu_vi_nxv8i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 1 ; CHECK-NEXT: ret %head = insertelement poison, i64 2, i32 0 @@ -1187,7 +1187,7 @@ define @vdivu_vi_nxv8i64_2( %va, %vb) { ; CHECK-LABEL: vdivu_vi_nxv8i64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vadd.vi v16, v16, 4 ; CHECK-NEXT: vsrl.vv v8, v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll @@ -10,7 +10,7 @@ ; CHECK-LABEL: vdivu_vx_nxv8i7: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 127 -; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a2 ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vand.vx v9, v9, a2 @@ -38,7 +38,7 @@ define @vdivu_vv_nxv1i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv1i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -62,7 +62,7 @@ define @vdivu_vx_nxv1i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_nxv1i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -88,7 +88,7 @@ define @vdivu_vv_nxv2i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -112,7 +112,7 @@ define @vdivu_vx_nxv2i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_nxv2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -150,7 +150,7 @@ define @vdivu_vv_nxv4i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -174,7 +174,7 @@ define @vdivu_vx_nxv4i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_nxv4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -200,7 +200,7 @@ define @vdivu_vv_nxv8i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -224,7 +224,7 @@ define @vdivu_vx_nxv8i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_nxv8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -250,7 +250,7 @@ define @vdivu_vv_nxv16i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -274,7 +274,7 @@ define @vdivu_vx_nxv16i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_nxv16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -300,7 +300,7 @@ define @vdivu_vv_nxv32i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv32i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -324,7 +324,7 @@ define @vdivu_vx_nxv32i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_nxv32i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -350,7 +350,7 @@ define @vdivu_vv_nxv64i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv64i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -374,7 +374,7 @@ define @vdivu_vx_nxv64i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_nxv64i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -400,7 +400,7 @@ define @vdivu_vv_nxv1i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv1i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -424,7 +424,7 @@ define @vdivu_vx_nxv1i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_nxv1i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -450,7 +450,7 @@ define @vdivu_vv_nxv2i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -474,7 +474,7 @@ define @vdivu_vx_nxv2i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_nxv2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -500,7 +500,7 @@ define @vdivu_vv_nxv4i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -524,7 +524,7 @@ define @vdivu_vx_nxv4i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_nxv4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -550,7 +550,7 @@ define @vdivu_vv_nxv8i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -574,7 +574,7 @@ define @vdivu_vx_nxv8i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_nxv8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -600,7 +600,7 @@ define @vdivu_vv_nxv16i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -624,7 +624,7 @@ define @vdivu_vx_nxv16i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_nxv16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -650,7 +650,7 @@ define @vdivu_vv_nxv32i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv32i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -674,7 +674,7 @@ define @vdivu_vx_nxv32i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_nxv32i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -700,7 +700,7 @@ define @vdivu_vv_nxv1i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv1i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -724,7 +724,7 @@ define @vdivu_vx_nxv1i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_nxv1i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -750,7 +750,7 @@ define @vdivu_vv_nxv2i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -774,7 +774,7 @@ define @vdivu_vx_nxv2i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -800,7 +800,7 @@ define @vdivu_vv_nxv4i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -824,7 +824,7 @@ define @vdivu_vx_nxv4i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_nxv4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -850,7 +850,7 @@ define @vdivu_vv_nxv8i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -874,7 +874,7 @@ define @vdivu_vx_nxv8i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_nxv8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -900,7 +900,7 @@ define @vdivu_vv_nxv16i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -924,7 +924,7 @@ define @vdivu_vx_nxv16i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vx_nxv16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vdivu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -950,7 +950,7 @@ define @vdivu_vv_nxv1i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv1i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -967,7 +967,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vdivu.vv v8, v8, v9, v0.t @@ -993,16 +993,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vdivu.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vx_nxv1i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vdivu.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1028,7 +1028,7 @@ define @vdivu_vv_nxv2i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1045,7 +1045,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vdivu.vv v8, v8, v10, v0.t @@ -1071,16 +1071,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vdivu.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vx_nxv2i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vdivu.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1106,7 +1106,7 @@ define @vdivu_vv_nxv4i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv4i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1123,7 +1123,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vdivu.vv v8, v8, v12, v0.t @@ -1149,16 +1149,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vdivu.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vx_nxv4i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vdivu.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1184,7 +1184,7 @@ define @vdivu_vv_nxv8i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vdivu_vv_nxv8i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vdivu.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1201,7 +1201,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vdivu.vv v8, v8, v16, v0.t @@ -1227,16 +1227,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vdivu.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vdivu_vx_nxv8i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vdivu.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll b/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll --- a/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll @@ -9,20 +9,20 @@ define @splice_nxv1i1_offset_negone( %a, %b) #0 { ; CHECK-LABEL: splice_nxv1i1_offset_negone: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v10, v9, 1, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vslidedown.vx v10, v10, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v8, v9, 1, v0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, ma ; CHECK-NEXT: vslideup.vi v10, v8, 1 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; CHECK-NEXT: vand.vi v8, v10, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -33,20 +33,20 @@ define @splice_nxv1i1_offset_max( %a, %b) #0 { ; CHECK-LABEL: splice_nxv1i1_offset_max: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v10, v9, 1, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vslidedown.vi v10, v10, 1 -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v8, v9, 1, v0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, ma ; CHECK-NEXT: vslideup.vx v10, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; CHECK-NEXT: vand.vi v8, v10, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -59,20 +59,20 @@ define @splice_nxv2i1_offset_negone( %a, %b) #0 { ; CHECK-LABEL: splice_nxv2i1_offset_negone: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v10, v9, 1, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vx v10, v10, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v8, v9, 1, v0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, tu, ma ; CHECK-NEXT: vslideup.vi v10, v8, 1 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vand.vi v8, v10, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -83,20 +83,20 @@ define @splice_nxv2i1_offset_max( %a, %b) #0 { ; CHECK-LABEL: splice_nxv2i1_offset_max: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v10, v9, 1, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -3 -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v10, v10, 3 -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v8, v9, 1, v0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, tu, ma ; CHECK-NEXT: vslideup.vx v10, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vand.vi v8, v10, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -109,20 +109,20 @@ define @splice_nxv4i1_offset_negone( %a, %b) #0 { ; CHECK-LABEL: splice_nxv4i1_offset_negone: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v10, v9, 1, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v10, v10, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v8, v9, 1, v0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v10, v8, 1 -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; CHECK-NEXT: vand.vi v8, v10, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -133,20 +133,20 @@ define @splice_nxv4i1_offset_max( %a, %b) #0 { ; CHECK-LABEL: splice_nxv4i1_offset_max: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v10, v9, 1, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -7 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v10, v10, 7 -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v8, v9, 1, v0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vx v10, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; CHECK-NEXT: vand.vi v8, v10, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -159,19 +159,19 @@ define @splice_nxv8i1_offset_negone( %a, %b) #0 { ; CHECK-LABEL: splice_nxv8i1_offset_negone: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v10, v9, 1, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v10, v10, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v8, v9, 1, v0 -; CHECK-NEXT: vsetvli zero, zero, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, zero, e8, m1, tu, ma ; CHECK-NEXT: vslideup.vi v10, v8, 1 -; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma ; CHECK-NEXT: vand.vi v8, v10, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -182,19 +182,19 @@ define @splice_nxv8i1_offset_max( %a, %b) #0 { ; CHECK-LABEL: splice_nxv8i1_offset_max: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v10, v9, 1, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: addi a0, a0, -15 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v10, v10, 15 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v8, v9, 1, v0 -; CHECK-NEXT: vsetvli zero, zero, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, zero, e8, m1, tu, ma ; CHECK-NEXT: vslideup.vx v10, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma ; CHECK-NEXT: vand.vi v8, v10, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -207,20 +207,20 @@ define @splice_nxv16i1_offset_negone( %a, %b) #0 { ; CHECK-LABEL: splice_nxv16i1_offset_negone: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vmerge.vim v12, v10, 1, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetivli zero, 1, e8, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v12, v12, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v8, v10, 1, v0 -; CHECK-NEXT: vsetvli zero, zero, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, zero, e8, m2, tu, ma ; CHECK-NEXT: vslideup.vi v12, v8, 1 -; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma ; CHECK-NEXT: vand.vi v8, v12, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -231,20 +231,20 @@ define @splice_nxv16i1_offset_max( %a, %b) #0 { ; CHECK-LABEL: splice_nxv16i1_offset_max: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vmerge.vim v12, v10, 1, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -31 -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v12, v12, 31 -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v8, v10, 1, v0 -; CHECK-NEXT: vsetvli zero, zero, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, zero, e8, m2, tu, ma ; CHECK-NEXT: vslideup.vx v12, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma ; CHECK-NEXT: vand.vi v8, v12, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -257,20 +257,20 @@ define @splice_nxv32i1_offset_negone( %a, %b) #0 { ; CHECK-LABEL: splice_nxv32i1_offset_negone: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vmerge.vim v16, v12, 1, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetivli zero, 1, e8, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m4, ta, ma ; CHECK-NEXT: vslidedown.vx v16, v16, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v8, v12, 1, v0 -; CHECK-NEXT: vsetvli zero, zero, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, zero, e8, m4, tu, ma ; CHECK-NEXT: vslideup.vi v16, v8, 1 -; CHECK-NEXT: vsetvli zero, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, m4, ta, ma ; CHECK-NEXT: vand.vi v8, v16, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -281,21 +281,21 @@ define @splice_nxv32i1_offset_max( %a, %b) #0 { ; CHECK-LABEL: splice_nxv32i1_offset_max: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vmerge.vim v16, v12, 1, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -63 ; CHECK-NEXT: li a1, 63 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vslidedown.vx v16, v16, a1 -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v8, v12, 1, v0 -; CHECK-NEXT: vsetvli zero, zero, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, zero, e8, m4, tu, ma ; CHECK-NEXT: vslideup.vx v16, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, m4, ta, ma ; CHECK-NEXT: vand.vi v8, v16, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -308,20 +308,20 @@ define @splice_nxv64i1_offset_negone( %a, %b) #0 { ; CHECK-LABEL: splice_nxv64i1_offset_negone: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vmerge.vim v24, v16, 1, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v24, v24, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v8, v16, 1, v0 -; CHECK-NEXT: vsetvli zero, zero, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, zero, e8, m8, tu, ma ; CHECK-NEXT: vslideup.vi v24, v8, 1 -; CHECK-NEXT: vsetvli zero, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, m8, ta, ma ; CHECK-NEXT: vand.vi v8, v24, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -332,21 +332,21 @@ define @splice_nxv64i1_offset_max( %a, %b) #0 { ; CHECK-LABEL: splice_nxv64i1_offset_max: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vmerge.vim v24, v16, 1, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -127 ; CHECK-NEXT: li a1, 127 -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v24, v24, a1 -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v8, v16, 1, v0 -; CHECK-NEXT: vsetvli zero, zero, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, zero, e8, m8, tu, ma ; CHECK-NEXT: vslideup.vx v24, v8, a0 -; CHECK-NEXT: vsetvli zero, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, m8, ta, ma ; CHECK-NEXT: vand.vi v8, v24, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -370,9 +370,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1i8( %a, %b, i32 -1) @@ -385,9 +385,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -2 -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 2 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1i8( %a, %b, i32 -2) @@ -400,9 +400,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 1 -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, tu, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1i8( %a, %b, i32 1) @@ -425,9 +425,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2i8( %a, %b, i32 -1) @@ -440,9 +440,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -4 -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 4 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2i8( %a, %b, i32 -4) @@ -455,9 +455,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -3 -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 3 -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, tu, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2i8( %a, %b, i32 3) @@ -480,9 +480,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4i8( %a, %b, i32 -1) @@ -495,9 +495,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -8 -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 8 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4i8( %a, %b, i32 -8) @@ -510,9 +510,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -7 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 7 -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4i8( %a, %b, i32 7) @@ -534,9 +534,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, m1, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8i8( %a, %b, i32 -1) @@ -548,9 +548,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: addi a0, a0, -16 -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, m1, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 16 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8i8( %a, %b, i32 -16) @@ -562,9 +562,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: addi a0, a0, -15 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 15 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, tu, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8i8( %a, %b, i32 15) @@ -587,9 +587,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetivli zero, 1, e8, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, m2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16i8( %a, %b, i32 -1) @@ -603,9 +603,9 @@ ; CHECK-NEXT: slli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -32 ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, m2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, tu, ma ; CHECK-NEXT: vslideup.vx v8, v10, a1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16i8( %a, %b, i32 -32) @@ -618,9 +618,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -31 -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 31 -; CHECK-NEXT: vsetvli a1, zero, e8, m2, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, tu, ma ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16i8( %a, %b, i32 31) @@ -643,9 +643,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetivli zero, 1, e8, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, m4, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, tu, ma ; CHECK-NEXT: vslideup.vi v8, v12, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv32i8( %a, %b, i32 -1) @@ -659,9 +659,9 @@ ; CHECK-NEXT: slli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -64 ; CHECK-NEXT: li a1, 64 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, m4, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, tu, ma ; CHECK-NEXT: vslideup.vx v8, v12, a1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv32i8( %a, %b, i32 -64) @@ -675,9 +675,9 @@ ; CHECK-NEXT: slli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -63 ; CHECK-NEXT: li a1, 63 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a1 -; CHECK-NEXT: vsetvli a1, zero, e8, m4, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, tu, ma ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv32i8( %a, %b, i32 63) @@ -700,9 +700,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, m8, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, tu, ma ; CHECK-NEXT: vslideup.vi v8, v16, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv64i8( %a, %b, i32 -1) @@ -716,9 +716,9 @@ ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -128 ; CHECK-NEXT: li a1, 128 -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, m8, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, tu, ma ; CHECK-NEXT: vslideup.vx v8, v16, a1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv64i8( %a, %b, i32 -128) @@ -732,9 +732,9 @@ ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -127 ; CHECK-NEXT: li a1, 127 -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a1 -; CHECK-NEXT: vsetvli a1, zero, e8, m8, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, tu, ma ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv64i8( %a, %b, i32 127) @@ -757,9 +757,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1i16( %a, %b, i32 -1) @@ -772,9 +772,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -2 -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 2 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1i16( %a, %b, i32 -2) @@ -787,9 +787,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 1 -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, tu, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1i16( %a, %b, i32 1) @@ -812,9 +812,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2i16( %a, %b, i32 -1) @@ -827,9 +827,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -4 -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 4 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2i16( %a, %b, i32 -4) @@ -842,9 +842,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -3 -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 3 -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, tu, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2i16( %a, %b, i32 3) @@ -867,9 +867,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m1, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4i16( %a, %b, i32 -1) @@ -882,9 +882,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -8 -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m1, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 8 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4i16( %a, %b, i32 -8) @@ -897,9 +897,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -7 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 7 -; CHECK-NEXT: vsetvli a1, zero, e16, m1, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, tu, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4i16( %a, %b, i32 7) @@ -921,9 +921,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8i16( %a, %b, i32 -1) @@ -935,9 +935,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: addi a0, a0, -16 -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 16 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8i16( %a, %b, i32 -16) @@ -949,9 +949,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: addi a0, a0, -15 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 15 -; CHECK-NEXT: vsetvli a1, zero, e16, m2, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, tu, ma ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8i16( %a, %b, i32 15) @@ -974,9 +974,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetivli zero, 1, e16, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m4, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, tu, ma ; CHECK-NEXT: vslideup.vi v8, v12, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16i16( %a, %b, i32 -1) @@ -990,9 +990,9 @@ ; CHECK-NEXT: slli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -32 ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m4, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, tu, ma ; CHECK-NEXT: vslideup.vx v8, v12, a1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16i16( %a, %b, i32 -32) @@ -1005,9 +1005,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -31 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 31 -; CHECK-NEXT: vsetvli a1, zero, e16, m4, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, tu, ma ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16i16( %a, %b, i32 31) @@ -1030,9 +1030,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetivli zero, 1, e16, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m8, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, tu, ma ; CHECK-NEXT: vslideup.vi v8, v16, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv32i16( %a, %b, i32 -1) @@ -1046,9 +1046,9 @@ ; CHECK-NEXT: slli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -64 ; CHECK-NEXT: li a1, 64 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m8, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, tu, ma ; CHECK-NEXT: vslideup.vx v8, v16, a1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv32i16( %a, %b, i32 -64) @@ -1062,9 +1062,9 @@ ; CHECK-NEXT: slli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -63 ; CHECK-NEXT: li a1, 63 -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a1 -; CHECK-NEXT: vsetvli a1, zero, e16, m8, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, tu, ma ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv32i16( %a, %b, i32 63) @@ -1087,9 +1087,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1i32( %a, %b, i32 -1) @@ -1102,9 +1102,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -2 -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 2 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1i32( %a, %b, i32 -2) @@ -1117,9 +1117,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 1 -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, tu, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1i32( %a, %b, i32 1) @@ -1142,9 +1142,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, m1, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2i32( %a, %b, i32 -1) @@ -1157,9 +1157,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -4 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, m1, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 4 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2i32( %a, %b, i32 -4) @@ -1172,9 +1172,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -3 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 3 -; CHECK-NEXT: vsetvli a1, zero, e32, m1, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, tu, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2i32( %a, %b, i32 3) @@ -1197,9 +1197,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, m2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4i32( %a, %b, i32 -1) @@ -1212,9 +1212,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -8 -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, m2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 8 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4i32( %a, %b, i32 -8) @@ -1227,9 +1227,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -7 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 7 -; CHECK-NEXT: vsetvli a1, zero, e32, m2, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, tu, ma ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4i32( %a, %b, i32 7) @@ -1251,9 +1251,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetivli zero, 1, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, m4, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, tu, ma ; CHECK-NEXT: vslideup.vi v8, v12, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8i32( %a, %b, i32 -1) @@ -1265,9 +1265,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: addi a0, a0, -16 -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, m4, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, tu, ma ; CHECK-NEXT: vslideup.vi v8, v12, 16 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8i32( %a, %b, i32 -16) @@ -1279,9 +1279,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: addi a0, a0, -15 -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 15 -; CHECK-NEXT: vsetvli a1, zero, e32, m4, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, tu, ma ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8i32( %a, %b, i32 15) @@ -1304,9 +1304,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, m8, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, tu, ma ; CHECK-NEXT: vslideup.vi v8, v16, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16i32( %a, %b, i32 -1) @@ -1320,9 +1320,9 @@ ; CHECK-NEXT: slli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -32 ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, m8, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, tu, ma ; CHECK-NEXT: vslideup.vx v8, v16, a1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16i32( %a, %b, i32 -32) @@ -1335,9 +1335,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -31 -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 31 -; CHECK-NEXT: vsetvli a1, zero, e32, m8, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, tu, ma ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16i32( %a, %b, i32 31) @@ -1360,9 +1360,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e64, m1, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1i64( %a, %b, i32 -1) @@ -1375,9 +1375,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -2 -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e64, m1, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 2 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1i64( %a, %b, i32 -2) @@ -1390,9 +1390,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 1 -; CHECK-NEXT: vsetvli a1, zero, e64, m1, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m1, tu, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1i64( %a, %b, i32 1) @@ -1415,9 +1415,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e64, m2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2i64( %a, %b, i32 -1) @@ -1430,9 +1430,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -4 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e64, m2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 4 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2i64( %a, %b, i32 -4) @@ -1445,9 +1445,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -3 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 3 -; CHECK-NEXT: vsetvli a1, zero, e64, m2, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m2, tu, ma ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2i64( %a, %b, i32 3) @@ -1470,9 +1470,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e64, m4, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, tu, ma ; CHECK-NEXT: vslideup.vi v8, v12, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4i64( %a, %b, i32 -1) @@ -1485,9 +1485,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -8 -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e64, m4, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, tu, ma ; CHECK-NEXT: vslideup.vi v8, v12, 8 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4i64( %a, %b, i32 -8) @@ -1500,9 +1500,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -7 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 7 -; CHECK-NEXT: vsetvli a1, zero, e64, m4, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m4, tu, ma ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4i64( %a, %b, i32 7) @@ -1524,9 +1524,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e64, m8, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, tu, ma ; CHECK-NEXT: vslideup.vi v8, v16, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8i64( %a, %b, i32 -1) @@ -1538,9 +1538,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: addi a0, a0, -16 -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e64, m8, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, tu, ma ; CHECK-NEXT: vslideup.vi v8, v16, 16 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8i64( %a, %b, i32 -16) @@ -1552,9 +1552,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: addi a0, a0, -15 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 15 -; CHECK-NEXT: vsetvli a1, zero, e64, m8, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m8, tu, ma ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8i64( %a, %b, i32 15) @@ -1577,9 +1577,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1f16( %a, %b, i32 -1) @@ -1592,9 +1592,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -2 -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 2 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1f16( %a, %b, i32 -2) @@ -1607,9 +1607,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 1 -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, tu, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1f16( %a, %b, i32 1) @@ -1632,9 +1632,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2f16( %a, %b, i32 -1) @@ -1647,9 +1647,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -4 -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 4 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2f16( %a, %b, i32 -4) @@ -1662,9 +1662,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -3 -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 3 -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, tu, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2f16( %a, %b, i32 3) @@ -1687,9 +1687,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m1, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4f16( %a, %b, i32 -1) @@ -1702,9 +1702,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -8 -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m1, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 8 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4f16( %a, %b, i32 -8) @@ -1717,9 +1717,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -7 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 7 -; CHECK-NEXT: vsetvli a1, zero, e16, m1, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, tu, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4f16( %a, %b, i32 7) @@ -1741,9 +1741,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8f16( %a, %b, i32 -1) @@ -1755,9 +1755,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: addi a0, a0, -16 -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 16 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8f16( %a, %b, i32 -16) @@ -1769,9 +1769,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: addi a0, a0, -15 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 15 -; CHECK-NEXT: vsetvli a1, zero, e16, m2, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, tu, ma ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8f16( %a, %b, i32 15) @@ -1794,9 +1794,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetivli zero, 1, e16, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m4, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, tu, ma ; CHECK-NEXT: vslideup.vi v8, v12, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16f16( %a, %b, i32 -1) @@ -1810,9 +1810,9 @@ ; CHECK-NEXT: slli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -32 ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m4, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, tu, ma ; CHECK-NEXT: vslideup.vx v8, v12, a1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16f16( %a, %b, i32 -32) @@ -1825,9 +1825,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -31 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 31 -; CHECK-NEXT: vsetvli a1, zero, e16, m4, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, tu, ma ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16f16( %a, %b, i32 31) @@ -1850,9 +1850,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetivli zero, 1, e16, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m8, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, tu, ma ; CHECK-NEXT: vslideup.vi v8, v16, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv32f16( %a, %b, i32 -1) @@ -1866,9 +1866,9 @@ ; CHECK-NEXT: slli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -64 ; CHECK-NEXT: li a1, 64 -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m8, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, tu, ma ; CHECK-NEXT: vslideup.vx v8, v16, a1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv32f16( %a, %b, i32 -64) @@ -1882,9 +1882,9 @@ ; CHECK-NEXT: slli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -63 ; CHECK-NEXT: li a1, 63 -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a1 -; CHECK-NEXT: vsetvli a1, zero, e16, m8, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, tu, ma ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv32f16( %a, %b, i32 63) @@ -1907,9 +1907,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1f32( %a, %b, i32 -1) @@ -1922,9 +1922,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -2 -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 2 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1f32( %a, %b, i32 -2) @@ -1937,9 +1937,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 1 -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, tu, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1f32( %a, %b, i32 1) @@ -1962,9 +1962,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, m1, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2f32( %a, %b, i32 -1) @@ -1977,9 +1977,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -4 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, m1, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 4 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2f32( %a, %b, i32 -4) @@ -1992,9 +1992,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -3 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 3 -; CHECK-NEXT: vsetvli a1, zero, e32, m1, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, tu, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2f32( %a, %b, i32 3) @@ -2017,9 +2017,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, m2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4f32( %a, %b, i32 -1) @@ -2032,9 +2032,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -8 -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, m2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 8 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4f32( %a, %b, i32 -8) @@ -2047,9 +2047,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -7 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 7 -; CHECK-NEXT: vsetvli a1, zero, e32, m2, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, tu, ma ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4f32( %a, %b, i32 7) @@ -2071,9 +2071,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetivli zero, 1, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, m4, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, tu, ma ; CHECK-NEXT: vslideup.vi v8, v12, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8f32( %a, %b, i32 -1) @@ -2085,9 +2085,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: addi a0, a0, -16 -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, m4, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, tu, ma ; CHECK-NEXT: vslideup.vi v8, v12, 16 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8f32( %a, %b, i32 -16) @@ -2099,9 +2099,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: addi a0, a0, -15 -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 15 -; CHECK-NEXT: vsetvli a1, zero, e32, m4, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, tu, ma ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8f32( %a, %b, i32 15) @@ -2124,9 +2124,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, m8, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, tu, ma ; CHECK-NEXT: vslideup.vi v8, v16, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16f32( %a, %b, i32 -1) @@ -2140,9 +2140,9 @@ ; CHECK-NEXT: slli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -32 ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, m8, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, tu, ma ; CHECK-NEXT: vslideup.vx v8, v16, a1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16f32( %a, %b, i32 -32) @@ -2155,9 +2155,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -31 -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 31 -; CHECK-NEXT: vsetvli a1, zero, e32, m8, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, tu, ma ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv16f32( %a, %b, i32 31) @@ -2180,9 +2180,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e64, m1, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1f64( %a, %b, i32 -1) @@ -2195,9 +2195,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -2 -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e64, m1, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 2 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1f64( %a, %b, i32 -2) @@ -2210,9 +2210,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 1 -; CHECK-NEXT: vsetvli a1, zero, e64, m1, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m1, tu, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv1f64( %a, %b, i32 1) @@ -2235,9 +2235,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e64, m2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2f64( %a, %b, i32 -1) @@ -2250,9 +2250,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -4 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e64, m2, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 4 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2f64( %a, %b, i32 -4) @@ -2265,9 +2265,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: addi a0, a0, -3 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 3 -; CHECK-NEXT: vsetvli a1, zero, e64, m2, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m2, tu, ma ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv2f64( %a, %b, i32 3) @@ -2290,9 +2290,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e64, m4, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, tu, ma ; CHECK-NEXT: vslideup.vi v8, v12, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4f64( %a, %b, i32 -1) @@ -2305,9 +2305,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -8 -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e64, m4, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, tu, ma ; CHECK-NEXT: vslideup.vi v8, v12, 8 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4f64( %a, %b, i32 -8) @@ -2320,9 +2320,9 @@ ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -7 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 7 -; CHECK-NEXT: vsetvli a1, zero, e64, m4, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m4, tu, ma ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv4f64( %a, %b, i32 7) @@ -2344,9 +2344,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: addi a0, a0, -1 -; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e64, m8, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, tu, ma ; CHECK-NEXT: vslideup.vi v8, v16, 1 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8f64( %a, %b, i32 -1) @@ -2358,9 +2358,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: addi a0, a0, -16 -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e64, m8, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, tu, ma ; CHECK-NEXT: vslideup.vi v8, v16, 16 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8f64( %a, %b, i32 -16) @@ -2372,9 +2372,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: addi a0, a0, -15 -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 15 -; CHECK-NEXT: vsetvli a1, zero, e64, m8, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m8, tu, ma ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.splice.nxv8f64( %a, %b, i32 15) diff --git a/llvm/test/CodeGen/RISCV/rvv/vexts-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vexts-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vexts-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vexts-sdnode.ll @@ -5,7 +5,7 @@ define @vsext_nxv1i8_nxv1i16( %va) { ; CHECK-LABEL: vsext_nxv1i8_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vsext.vf2 v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -16,7 +16,7 @@ define @vzext_nxv1i8_nxv1i16( %va) { ; CHECK-LABEL: vzext_nxv1i8_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vzext.vf2 v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -27,7 +27,7 @@ define @vsext_nxv1i8_nxv1i32( %va) { ; CHECK-LABEL: vsext_nxv1i8_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vsext.vf4 v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -38,7 +38,7 @@ define @vzext_nxv1i8_nxv1i32( %va) { ; CHECK-LABEL: vzext_nxv1i8_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vzext.vf4 v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -49,7 +49,7 @@ define @vsext_nxv1i8_nxv1i64( %va) { ; CHECK-LABEL: vsext_nxv1i8_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vsext.vf8 v9, v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -60,7 +60,7 @@ define @vzext_nxv1i8_nxv1i64( %va) { ; CHECK-LABEL: vzext_nxv1i8_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vzext.vf8 v9, v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -71,7 +71,7 @@ define @vsext_nxv2i8_nxv2i16( %va) { ; CHECK-LABEL: vsext_nxv2i8_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vsext.vf2 v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -82,7 +82,7 @@ define @vzext_nxv2i8_nxv2i16( %va) { ; CHECK-LABEL: vzext_nxv2i8_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vzext.vf2 v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -93,7 +93,7 @@ define @vsext_nxv2i8_nxv2i32( %va) { ; CHECK-LABEL: vsext_nxv2i8_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vsext.vf4 v9, v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -104,7 +104,7 @@ define @vzext_nxv2i8_nxv2i32( %va) { ; CHECK-LABEL: vzext_nxv2i8_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vzext.vf4 v9, v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -115,7 +115,7 @@ define @vsext_nxv2i8_nxv2i64( %va) { ; CHECK-LABEL: vsext_nxv2i8_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vsext.vf8 v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -126,7 +126,7 @@ define @vzext_nxv2i8_nxv2i64( %va) { ; CHECK-LABEL: vzext_nxv2i8_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vzext.vf8 v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -137,7 +137,7 @@ define @vsext_nxv4i8_nxv4i16( %va) { ; CHECK-LABEL: vsext_nxv4i8_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vsext.vf2 v9, v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -148,7 +148,7 @@ define @vzext_nxv4i8_nxv4i16( %va) { ; CHECK-LABEL: vzext_nxv4i8_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vzext.vf2 v9, v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -159,7 +159,7 @@ define @vsext_nxv4i8_nxv4i32( %va) { ; CHECK-LABEL: vsext_nxv4i8_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vsext.vf4 v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -170,7 +170,7 @@ define @vzext_nxv4i8_nxv4i32( %va) { ; CHECK-LABEL: vzext_nxv4i8_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vzext.vf4 v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -181,7 +181,7 @@ define @vsext_nxv4i8_nxv4i64( %va) { ; CHECK-LABEL: vsext_nxv4i8_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vsext.vf8 v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -192,7 +192,7 @@ define @vzext_nxv4i8_nxv4i64( %va) { ; CHECK-LABEL: vzext_nxv4i8_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vzext.vf8 v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -203,7 +203,7 @@ define @vsext_nxv8i8_nxv8i16( %va) { ; CHECK-LABEL: vsext_nxv8i8_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vsext.vf2 v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -214,7 +214,7 @@ define @vzext_nxv8i8_nxv8i16( %va) { ; CHECK-LABEL: vzext_nxv8i8_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -225,7 +225,7 @@ define @vsext_nxv8i8_nxv8i32( %va) { ; CHECK-LABEL: vsext_nxv8i8_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vsext.vf4 v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -236,7 +236,7 @@ define @vzext_nxv8i8_nxv8i32( %va) { ; CHECK-LABEL: vzext_nxv8i8_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vzext.vf4 v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -247,7 +247,7 @@ define @vsext_nxv8i8_nxv8i64( %va) { ; CHECK-LABEL: vsext_nxv8i8_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vsext.vf8 v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -258,7 +258,7 @@ define @vzext_nxv8i8_nxv8i64( %va) { ; CHECK-LABEL: vzext_nxv8i8_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vzext.vf8 v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -269,7 +269,7 @@ define @vsext_nxv16i8_nxv16i16( %va) { ; CHECK-LABEL: vsext_nxv16i8_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vsext.vf2 v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -280,7 +280,7 @@ define @vzext_nxv16i8_nxv16i16( %va) { ; CHECK-LABEL: vzext_nxv16i8_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vzext.vf2 v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -291,7 +291,7 @@ define @vsext_nxv16i8_nxv16i32( %va) { ; CHECK-LABEL: vsext_nxv16i8_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vsext.vf4 v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -302,7 +302,7 @@ define @vzext_nxv16i8_nxv16i32( %va) { ; CHECK-LABEL: vzext_nxv16i8_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vzext.vf4 v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -313,7 +313,7 @@ define @vsext_nxv32i8_nxv32i16( %va) { ; CHECK-LABEL: vsext_nxv32i8_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vsext.vf2 v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -324,7 +324,7 @@ define @vzext_nxv32i8_nxv32i16( %va) { ; CHECK-LABEL: vzext_nxv32i8_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vzext.vf2 v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -335,7 +335,7 @@ define @vsext_nxv1i16_nxv1i32( %va) { ; CHECK-LABEL: vsext_nxv1i16_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vsext.vf2 v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -346,7 +346,7 @@ define @vzext_nxv1i16_nxv1i32( %va) { ; CHECK-LABEL: vzext_nxv1i16_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vzext.vf2 v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -357,7 +357,7 @@ define @vsext_nxv1i16_nxv1i64( %va) { ; CHECK-LABEL: vsext_nxv1i16_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vsext.vf4 v9, v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -368,7 +368,7 @@ define @vzext_nxv1i16_nxv1i64( %va) { ; CHECK-LABEL: vzext_nxv1i16_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vzext.vf4 v9, v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -379,7 +379,7 @@ define @vsext_nxv2i16_nxv2i32( %va) { ; CHECK-LABEL: vsext_nxv2i16_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vsext.vf2 v9, v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -390,7 +390,7 @@ define @vzext_nxv2i16_nxv2i32( %va) { ; CHECK-LABEL: vzext_nxv2i16_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vzext.vf2 v9, v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -401,7 +401,7 @@ define @vsext_nxv2i16_nxv2i64( %va) { ; CHECK-LABEL: vsext_nxv2i16_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vsext.vf4 v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -412,7 +412,7 @@ define @vzext_nxv2i16_nxv2i64( %va) { ; CHECK-LABEL: vzext_nxv2i16_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vzext.vf4 v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -423,7 +423,7 @@ define @vsext_nxv4i16_nxv4i32( %va) { ; CHECK-LABEL: vsext_nxv4i16_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vsext.vf2 v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -434,7 +434,7 @@ define @vzext_nxv4i16_nxv4i32( %va) { ; CHECK-LABEL: vzext_nxv4i16_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -445,7 +445,7 @@ define @vsext_nxv4i16_nxv4i64( %va) { ; CHECK-LABEL: vsext_nxv4i16_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vsext.vf4 v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -456,7 +456,7 @@ define @vzext_nxv4i16_nxv4i64( %va) { ; CHECK-LABEL: vzext_nxv4i16_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vzext.vf4 v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -467,7 +467,7 @@ define @vsext_nxv8i16_nxv8i32( %va) { ; CHECK-LABEL: vsext_nxv8i16_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vsext.vf2 v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -478,7 +478,7 @@ define @vzext_nxv8i16_nxv8i32( %va) { ; CHECK-LABEL: vzext_nxv8i16_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vzext.vf2 v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -489,7 +489,7 @@ define @vsext_nxv8i16_nxv8i64( %va) { ; CHECK-LABEL: vsext_nxv8i16_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vsext.vf4 v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -500,7 +500,7 @@ define @vzext_nxv8i16_nxv8i64( %va) { ; CHECK-LABEL: vzext_nxv8i16_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vzext.vf4 v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -511,7 +511,7 @@ define @vsext_nxv16i16_nxv16i32( %va) { ; CHECK-LABEL: vsext_nxv16i16_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vsext.vf2 v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -522,7 +522,7 @@ define @vzext_nxv16i16_nxv16i32( %va) { ; CHECK-LABEL: vzext_nxv16i16_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vzext.vf2 v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -533,7 +533,7 @@ define @vsext_nxv1i32_nxv1i64( %va) { ; CHECK-LABEL: vsext_nxv1i32_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vsext.vf2 v9, v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -544,7 +544,7 @@ define @vzext_nxv1i32_nxv1i64( %va) { ; CHECK-LABEL: vzext_nxv1i32_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vzext.vf2 v9, v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -555,7 +555,7 @@ define @vsext_nxv2i32_nxv2i64( %va) { ; CHECK-LABEL: vsext_nxv2i32_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vsext.vf2 v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -566,7 +566,7 @@ define @vzext_nxv2i32_nxv2i64( %va) { ; CHECK-LABEL: vzext_nxv2i32_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -577,7 +577,7 @@ define @vsext_nxv4i32_nxv4i64( %va) { ; CHECK-LABEL: vsext_nxv4i32_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vsext.vf2 v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -588,7 +588,7 @@ define @vzext_nxv4i32_nxv4i64( %va) { ; CHECK-LABEL: vzext_nxv4i32_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vzext.vf2 v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -599,7 +599,7 @@ define @vsext_nxv8i32_nxv8i64( %va) { ; CHECK-LABEL: vsext_nxv8i32_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vsext.vf2 v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -610,7 +610,7 @@ define @vzext_nxv8i32_nxv8i64( %va) { ; CHECK-LABEL: vzext_nxv8i32_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vzext.vf2 v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode.ll @@ -9,7 +9,7 @@ define @vfabs_nxv1f16( %v) { ; CHECK-LABEL: vfabs_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv1f16( %v) @@ -21,7 +21,7 @@ define @vfabs_nxv2f16( %v) { ; CHECK-LABEL: vfabs_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv2f16( %v) @@ -33,7 +33,7 @@ define @vfabs_nxv4f16( %v) { ; CHECK-LABEL: vfabs_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv4f16( %v) @@ -45,7 +45,7 @@ define @vfabs_nxv8f16( %v) { ; CHECK-LABEL: vfabs_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv8f16( %v) @@ -57,7 +57,7 @@ define @vfabs_nxv16f16( %v) { ; CHECK-LABEL: vfabs_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv16f16( %v) @@ -69,7 +69,7 @@ define @vfabs_nxv32f16( %v) { ; CHECK-LABEL: vfabs_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv32f16( %v) @@ -81,7 +81,7 @@ define @vfabs_nxv1f32( %v) { ; CHECK-LABEL: vfabs_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv1f32( %v) @@ -93,7 +93,7 @@ define @vfabs_nxv2f32( %v) { ; CHECK-LABEL: vfabs_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv2f32( %v) @@ -105,7 +105,7 @@ define @vfabs_nxv4f32( %v) { ; CHECK-LABEL: vfabs_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv4f32( %v) @@ -117,7 +117,7 @@ define @vfabs_nxv8f32( %v) { ; CHECK-LABEL: vfabs_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv8f32( %v) @@ -129,7 +129,7 @@ define @vfabs_nxv16f32( %v) { ; CHECK-LABEL: vfabs_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv16f32( %v) @@ -141,7 +141,7 @@ define @vfabs_nxv1f64( %v) { ; CHECK-LABEL: vfabs_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv1f64( %v) @@ -153,7 +153,7 @@ define @vfabs_nxv2f64( %v) { ; CHECK-LABEL: vfabs_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv2f64( %v) @@ -165,7 +165,7 @@ define @vfabs_nxv4f64( %v) { ; CHECK-LABEL: vfabs_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv4f64( %v) @@ -177,7 +177,7 @@ define @vfabs_nxv8f64( %v) { ; CHECK-LABEL: vfabs_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.fabs.nxv8f64( %v) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll @@ -19,7 +19,7 @@ define @vfabs_vv_nxv1f16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv1f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -43,7 +43,7 @@ define @vfabs_vv_nxv2f16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -67,7 +67,7 @@ define @vfabs_vv_nxv4f16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -91,7 +91,7 @@ define @vfabs_vv_nxv8f16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -115,7 +115,7 @@ define @vfabs_vv_nxv16f16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -139,7 +139,7 @@ define @vfabs_vv_nxv32f16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv32f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -163,7 +163,7 @@ define @vfabs_vv_nxv1f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv1f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -187,7 +187,7 @@ define @vfabs_vv_nxv2f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -211,7 +211,7 @@ define @vfabs_vv_nxv4f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -235,7 +235,7 @@ define @vfabs_vv_nxv8f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -259,7 +259,7 @@ define @vfabs_vv_nxv16f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -283,7 +283,7 @@ define @vfabs_vv_nxv1f64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv1f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -307,7 +307,7 @@ define @vfabs_vv_nxv2f64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -331,7 +331,7 @@ define @vfabs_vv_nxv4f64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -355,7 +355,7 @@ define @vfabs_vv_nxv7f64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv7f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -379,7 +379,7 @@ define @vfabs_vv_nxv8f64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfabs_vv_nxv8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -398,7 +398,7 @@ ; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a4, a1, 3 -; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma ; CHECK-NEXT: sub a3, a0, a1 ; CHECK-NEXT: vslidedown.vx v0, v0, a4 ; CHECK-NEXT: bltu a0, a3, .LBB32_2 @@ -429,14 +429,14 @@ ; CHECK-NEXT: mv a2, a1 ; CHECK-NEXT: .LBB33_2: ; CHECK-NEXT: li a3, 0 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: sub a1, a0, a1 ; CHECK-NEXT: vfabs.v v8, v8 ; CHECK-NEXT: bltu a0, a1, .LBB33_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a3, a1 ; CHECK-NEXT: .LBB33_4: -; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; CHECK-NEXT: vfabs.v v16, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode.ll @@ -7,7 +7,7 @@ define @vfadd_vv_nxv1f16( %va, %vb) { ; CHECK-LABEL: vfadd_vv_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = fadd %va, %vb @@ -17,7 +17,7 @@ define @vfadd_vf_nxv1f16( %va, half %b) { ; CHECK-LABEL: vfadd_vf_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -29,7 +29,7 @@ define @vfadd_vv_nxv2f16( %va, %vb) { ; CHECK-LABEL: vfadd_vv_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = fadd %va, %vb @@ -39,7 +39,7 @@ define @vfadd_vf_nxv2f16( %va, half %b) { ; CHECK-LABEL: vfadd_vf_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -51,7 +51,7 @@ define @vfadd_vv_nxv4f16( %va, %vb) { ; CHECK-LABEL: vfadd_vv_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = fadd %va, %vb @@ -61,7 +61,7 @@ define @vfadd_vf_nxv4f16( %va, half %b) { ; CHECK-LABEL: vfadd_vf_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -73,7 +73,7 @@ define @vfadd_vv_nxv8f16( %va, %vb) { ; CHECK-LABEL: vfadd_vv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = fadd %va, %vb @@ -83,7 +83,7 @@ define @vfadd_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: vfadd_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -95,7 +95,7 @@ define @vfadd_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: vfadd_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -107,7 +107,7 @@ define @vfadd_vv_nxv16f16( %va, %vb) { ; CHECK-LABEL: vfadd_vv_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = fadd %va, %vb @@ -117,7 +117,7 @@ define @vfadd_vf_nxv16f16( %va, half %b) { ; CHECK-LABEL: vfadd_vf_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -129,7 +129,7 @@ define @vfadd_vv_nxv32f16( %va, %vb) { ; CHECK-LABEL: vfadd_vv_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = fadd %va, %vb @@ -139,7 +139,7 @@ define @vfadd_vf_nxv32f16( %va, half %b) { ; CHECK-LABEL: vfadd_vf_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -151,7 +151,7 @@ define @vfadd_vv_nxv1f32( %va, %vb) { ; CHECK-LABEL: vfadd_vv_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = fadd %va, %vb @@ -161,7 +161,7 @@ define @vfadd_vf_nxv1f32( %va, float %b) { ; CHECK-LABEL: vfadd_vf_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -173,7 +173,7 @@ define @vfadd_vv_nxv2f32( %va, %vb) { ; CHECK-LABEL: vfadd_vv_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = fadd %va, %vb @@ -183,7 +183,7 @@ define @vfadd_vf_nxv2f32( %va, float %b) { ; CHECK-LABEL: vfadd_vf_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -195,7 +195,7 @@ define @vfadd_vv_nxv4f32( %va, %vb) { ; CHECK-LABEL: vfadd_vv_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = fadd %va, %vb @@ -205,7 +205,7 @@ define @vfadd_vf_nxv4f32( %va, float %b) { ; CHECK-LABEL: vfadd_vf_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -217,7 +217,7 @@ define @vfadd_vv_nxv8f32( %va, %vb) { ; CHECK-LABEL: vfadd_vv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = fadd %va, %vb @@ -227,7 +227,7 @@ define @vfadd_vf_nxv8f32( %va, float %b) { ; CHECK-LABEL: vfadd_vf_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -239,7 +239,7 @@ define @vfadd_fv_nxv8f32( %va, float %b) { ; CHECK-LABEL: vfadd_fv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -251,7 +251,7 @@ define @vfadd_vv_nxv16f32( %va, %vb) { ; CHECK-LABEL: vfadd_vv_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = fadd %va, %vb @@ -261,7 +261,7 @@ define @vfadd_vf_nxv16f32( %va, float %b) { ; CHECK-LABEL: vfadd_vf_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -273,7 +273,7 @@ define @vfadd_vv_nxv1f64( %va, %vb) { ; CHECK-LABEL: vfadd_vv_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = fadd %va, %vb @@ -283,7 +283,7 @@ define @vfadd_vf_nxv1f64( %va, double %b) { ; CHECK-LABEL: vfadd_vf_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -295,7 +295,7 @@ define @vfadd_vv_nxv2f64( %va, %vb) { ; CHECK-LABEL: vfadd_vv_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = fadd %va, %vb @@ -305,7 +305,7 @@ define @vfadd_vf_nxv2f64( %va, double %b) { ; CHECK-LABEL: vfadd_vf_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -317,7 +317,7 @@ define @vfadd_vv_nxv4f64( %va, %vb) { ; CHECK-LABEL: vfadd_vv_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = fadd %va, %vb @@ -327,7 +327,7 @@ define @vfadd_vf_nxv4f64( %va, double %b) { ; CHECK-LABEL: vfadd_vf_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -339,7 +339,7 @@ define @vfadd_vv_nxv8f64( %va, %vb) { ; CHECK-LABEL: vfadd_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = fadd %va, %vb @@ -349,7 +349,7 @@ define @vfadd_vf_nxv8f64( %va, double %b) { ; CHECK-LABEL: vfadd_vf_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -361,7 +361,7 @@ define @vfadd_fv_nxv8f64( %va, double %b) { ; CHECK-LABEL: vfadd_fv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll @@ -19,7 +19,7 @@ define @vfadd_vv_nxv1f16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv1f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -55,7 +55,7 @@ define @vfadd_vf_nxv1f16_unmasked( %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_nxv1f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -69,7 +69,7 @@ define @vfadd_vf_nxv1f16_unmasked_commute( %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_nxv1f16_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -95,7 +95,7 @@ define @vfadd_vv_nxv2f16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -119,7 +119,7 @@ define @vfadd_vf_nxv2f16_unmasked( %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -145,7 +145,7 @@ define @vfadd_vv_nxv4f16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -169,7 +169,7 @@ define @vfadd_vf_nxv4f16_unmasked( %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_nxv4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -195,7 +195,7 @@ define @vfadd_vv_nxv8f16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -219,7 +219,7 @@ define @vfadd_vf_nxv8f16_unmasked( %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_nxv8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -245,7 +245,7 @@ define @vfadd_vv_nxv16f16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -269,7 +269,7 @@ define @vfadd_vf_nxv16f16_unmasked( %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_nxv16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -295,7 +295,7 @@ define @vfadd_vv_nxv32f16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv32f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -319,7 +319,7 @@ define @vfadd_vf_nxv32f16_unmasked( %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_nxv32f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -345,7 +345,7 @@ define @vfadd_vv_nxv1f32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv1f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -369,7 +369,7 @@ define @vfadd_vf_nxv1f32_unmasked( %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_nxv1f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -395,7 +395,7 @@ define @vfadd_vv_nxv2f32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -419,7 +419,7 @@ define @vfadd_vf_nxv2f32_unmasked( %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -445,7 +445,7 @@ define @vfadd_vv_nxv4f32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -469,7 +469,7 @@ define @vfadd_vf_nxv4f32_unmasked( %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_nxv4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -495,7 +495,7 @@ define @vfadd_vv_nxv8f32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -519,7 +519,7 @@ define @vfadd_vf_nxv8f32_unmasked( %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_nxv8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -545,7 +545,7 @@ define @vfadd_vv_nxv16f32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -569,7 +569,7 @@ define @vfadd_vf_nxv16f32_unmasked( %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_nxv16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -595,7 +595,7 @@ define @vfadd_vv_nxv1f64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv1f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -619,7 +619,7 @@ define @vfadd_vf_nxv1f64_unmasked( %va, double %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_nxv1f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -645,7 +645,7 @@ define @vfadd_vv_nxv2f64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -669,7 +669,7 @@ define @vfadd_vf_nxv2f64_unmasked( %va, double %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -695,7 +695,7 @@ define @vfadd_vv_nxv4f64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -719,7 +719,7 @@ define @vfadd_vf_nxv4f64_unmasked( %va, double %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_nxv4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -757,7 +757,7 @@ define @vfadd_vv_nxv8f64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vv_nxv8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -781,7 +781,7 @@ define @vfadd_vf_nxv8f64_unmasked( %va, double %b, i32 zeroext %evl) { ; CHECK-LABEL: vfadd_vf_nxv8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfadd.ll @@ -12,7 +12,7 @@ define @intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -59,7 +59,7 @@ define @intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -106,7 +106,7 @@ define @intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -153,7 +153,7 @@ define @intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -200,7 +200,7 @@ define @intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -247,7 +247,7 @@ define @intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -295,7 +295,7 @@ define @intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -342,7 +342,7 @@ define @intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -389,7 +389,7 @@ define @intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -436,7 +436,7 @@ define @intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -483,7 +483,7 @@ define @intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -531,7 +531,7 @@ define @intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -578,7 +578,7 @@ define @intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -625,7 +625,7 @@ define @intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -672,7 +672,7 @@ define @intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -720,7 +720,7 @@ define @intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -767,7 +767,7 @@ define @intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -814,7 +814,7 @@ define @intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -861,7 +861,7 @@ define @intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -908,7 +908,7 @@ define @intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -955,7 +955,7 @@ define @intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1002,7 +1002,7 @@ define @intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1049,7 +1049,7 @@ define @intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1096,7 +1096,7 @@ define @intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1143,7 +1143,7 @@ define @intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1190,7 +1190,7 @@ define @intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1237,7 +1237,7 @@ define @intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1284,7 +1284,7 @@ define @intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1331,7 +1331,7 @@ define @intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1378,7 +1378,7 @@ define @intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfadd.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfclass.ll b/llvm/test/CodeGen/RISCV/rvv/vfclass.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfclass.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfclass.ll @@ -11,7 +11,7 @@ define @intrinsic_vfclass_v_nxv1i16_nxv1f16( ; CHECK-LABEL: intrinsic_vfclass_v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfclass.v v8, v8 ; CHECK-NEXT: ret %0, @@ -59,7 +59,7 @@ define @intrinsic_vfclass_v_nxv2i16_nxv2f16( ; CHECK-LABEL: intrinsic_vfclass_v_nxv2i16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfclass.v v8, v8 ; CHECK-NEXT: ret %0, @@ -107,7 +107,7 @@ define @intrinsic_vfclass_v_nxv4i16_nxv4f16( ; CHECK-LABEL: intrinsic_vfclass_v_nxv4i16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfclass.v v8, v8 ; CHECK-NEXT: ret %0, @@ -155,7 +155,7 @@ define @intrinsic_vfclass_v_nxv8i16_nxv8f16( ; CHECK-LABEL: intrinsic_vfclass_v_nxv8i16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfclass.v v8, v8 ; CHECK-NEXT: ret %0, @@ -203,7 +203,7 @@ define @intrinsic_vfclass_v_nxv16i16_nxv16f16( ; CHECK-LABEL: intrinsic_vfclass_v_nxv16i16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfclass.v v8, v8 ; CHECK-NEXT: ret %0, @@ -251,7 +251,7 @@ define @intrinsic_vfclass_v_nxv32i16_nxv32f16( ; CHECK-LABEL: intrinsic_vfclass_v_nxv32i16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfclass.v v8, v8 ; CHECK-NEXT: ret %0, @@ -299,7 +299,7 @@ define @intrinsic_vfclass_v_nxv1i32_nxv1f32( ; CHECK-LABEL: intrinsic_vfclass_v_nxv1i32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfclass.v v8, v8 ; CHECK-NEXT: ret %0, @@ -347,7 +347,7 @@ define @intrinsic_vfclass_v_nxv2i32_nxv2f32( ; CHECK-LABEL: intrinsic_vfclass_v_nxv2i32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfclass.v v8, v8 ; CHECK-NEXT: ret %0, @@ -395,7 +395,7 @@ define @intrinsic_vfclass_v_nxv4i32_nxv4f32( ; CHECK-LABEL: intrinsic_vfclass_v_nxv4i32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfclass.v v8, v8 ; CHECK-NEXT: ret %0, @@ -443,7 +443,7 @@ define @intrinsic_vfclass_v_nxv8i32_nxv8f32( ; CHECK-LABEL: intrinsic_vfclass_v_nxv8i32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfclass.v v8, v8 ; CHECK-NEXT: ret %0, @@ -491,7 +491,7 @@ define @intrinsic_vfclass_v_nxv16i32_nxv16f32( ; CHECK-LABEL: intrinsic_vfclass_v_nxv16i32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfclass.v v8, v8 ; CHECK-NEXT: ret %0, @@ -539,7 +539,7 @@ define @intrinsic_vfclass_v_nxv1i64_nxv1f64( ; CHECK-LABEL: intrinsic_vfclass_v_nxv1i64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfclass.v v8, v8 ; CHECK-NEXT: ret %0, @@ -587,7 +587,7 @@ define @intrinsic_vfclass_v_nxv2i64_nxv2f64( ; CHECK-LABEL: intrinsic_vfclass_v_nxv2i64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfclass.v v8, v8 ; CHECK-NEXT: ret %0, @@ -635,7 +635,7 @@ define @intrinsic_vfclass_v_nxv4i64_nxv4f64( ; CHECK-LABEL: intrinsic_vfclass_v_nxv4i64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfclass.v v8, v8 ; CHECK-NEXT: ret %0, @@ -683,7 +683,7 @@ define @intrinsic_vfclass_v_nxv8i64_nxv8f64( ; CHECK-LABEL: intrinsic_vfclass_v_nxv8i64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfclass.v v8, v8 ; CHECK-NEXT: ret %0, diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcopysign-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfcopysign-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfcopysign-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcopysign-sdnode.ll @@ -9,7 +9,7 @@ define @vfcopysign_vv_nxv1f16( %vm, %vs) { ; CHECK-LABEL: vfcopysign_vv_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v9 ; CHECK-NEXT: ret %r = call @llvm.copysign.nxv1f16( %vm, %vs) @@ -19,7 +19,7 @@ define @vfcopysign_vf_nxv1f16( %vm, half %s) { ; CHECK-LABEL: vfcopysign_vf_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %s, i32 0 @@ -31,7 +31,7 @@ define @vfcopynsign_vv_nxv1f16( %vm, %vs) { ; CHECK-LABEL: vfcopynsign_vv_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 ; CHECK-NEXT: ret %n = fneg %vs @@ -42,7 +42,7 @@ define @vfcopynsign_vf_nxv1f16( %vm, half %s) { ; CHECK-LABEL: vfcopynsign_vf_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %s, i32 0 @@ -55,7 +55,7 @@ define @vfcopysign_exttrunc_vv_nxv1f16_nxv1f32( %vm, %vs) { ; CHECK-LABEL: vfcopysign_exttrunc_vv_nxv1f16_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v10, v9 ; CHECK-NEXT: vfsgnj.vv v8, v8, v10 ; CHECK-NEXT: ret @@ -67,9 +67,9 @@ define @vfcopysign_exttrunc_vf_nxv1f16_nxv1f32( %vm, float %s) { ; CHECK-LABEL: vfcopysign_exttrunc_vf_nxv1f16_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfmv.v.f v9, fa0 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v10, v9 ; CHECK-NEXT: vfsgnj.vv v8, v8, v10 ; CHECK-NEXT: ret @@ -83,7 +83,7 @@ define @vfcopynsign_exttrunc_vv_nxv1f16_nxv1f32( %vm, %vs) { ; CHECK-LABEL: vfcopynsign_exttrunc_vv_nxv1f16_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v10, v9 ; CHECK-NEXT: vfsgnjn.vv v8, v8, v10 ; CHECK-NEXT: ret @@ -96,9 +96,9 @@ define @vfcopynsign_exttrunc_vf_nxv1f16_nxv1f32( %vm, float %s) { ; CHECK-LABEL: vfcopynsign_exttrunc_vf_nxv1f16_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfmv.v.f v9, fa0 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v10, v9 ; CHECK-NEXT: vfsgnjn.vv v8, v8, v10 ; CHECK-NEXT: ret @@ -113,9 +113,9 @@ define @vfcopysign_exttrunc_vv_nxv1f16_nxv1f64( %vm, %vs) { ; CHECK-LABEL: vfcopysign_exttrunc_vv_nxv1f16_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfncvt.rod.f.f.w v10, v9 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v9, v10 ; CHECK-NEXT: vfsgnj.vv v8, v8, v9 ; CHECK-NEXT: ret @@ -127,11 +127,11 @@ define @vfcopysign_exttrunc_vf_nxv1f16_nxv1f64( %vm, double %s) { ; CHECK-LABEL: vfcopysign_exttrunc_vf_nxv1f16_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfmv.v.f v9, fa0 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfncvt.rod.f.f.w v10, v9 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v9, v10 ; CHECK-NEXT: vfsgnj.vv v8, v8, v9 ; CHECK-NEXT: ret @@ -145,9 +145,9 @@ define @vfcopynsign_exttrunc_vv_nxv1f16_nxv1f64( %vm, %vs) { ; CHECK-LABEL: vfcopynsign_exttrunc_vv_nxv1f16_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfncvt.rod.f.f.w v10, v9 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v9, v10 ; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 ; CHECK-NEXT: ret @@ -160,11 +160,11 @@ define @vfcopynsign_exttrunc_vf_nxv1f16_nxv1f64( %vm, double %s) { ; CHECK-LABEL: vfcopynsign_exttrunc_vf_nxv1f16_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfmv.v.f v9, fa0 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfncvt.rod.f.f.w v10, v9 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v9, v10 ; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 ; CHECK-NEXT: ret @@ -181,7 +181,7 @@ define @vfcopysign_vv_nxv2f16( %vm, %vs) { ; CHECK-LABEL: vfcopysign_vv_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v9 ; CHECK-NEXT: ret %r = call @llvm.copysign.nxv2f16( %vm, %vs) @@ -191,7 +191,7 @@ define @vfcopysign_vf_nxv2f16( %vm, half %s) { ; CHECK-LABEL: vfcopysign_vf_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %s, i32 0 @@ -203,7 +203,7 @@ define @vfcopynsign_vv_nxv2f16( %vm, %vs) { ; CHECK-LABEL: vfcopynsign_vv_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 ; CHECK-NEXT: ret %n = fneg %vs @@ -214,7 +214,7 @@ define @vfcopynsign_vf_nxv2f16( %vm, half %s) { ; CHECK-LABEL: vfcopynsign_vf_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %s, i32 0 @@ -229,7 +229,7 @@ define @vfcopysign_vv_nxv4f16( %vm, %vs) { ; CHECK-LABEL: vfcopysign_vv_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v9 ; CHECK-NEXT: ret %r = call @llvm.copysign.nxv4f16( %vm, %vs) @@ -239,7 +239,7 @@ define @vfcopysign_vf_nxv4f16( %vm, half %s) { ; CHECK-LABEL: vfcopysign_vf_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %s, i32 0 @@ -251,7 +251,7 @@ define @vfcopynsign_vv_nxv4f16( %vm, %vs) { ; CHECK-LABEL: vfcopynsign_vv_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 ; CHECK-NEXT: ret %n = fneg %vs @@ -262,7 +262,7 @@ define @vfcopynsign_vf_nxv4f16( %vm, half %s) { ; CHECK-LABEL: vfcopynsign_vf_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %s, i32 0 @@ -277,7 +277,7 @@ define @vfcopysign_vv_nxv8f16( %vm, %vs) { ; CHECK-LABEL: vfcopysign_vv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v10 ; CHECK-NEXT: ret %r = call @llvm.copysign.nxv8f16( %vm, %vs) @@ -287,7 +287,7 @@ define @vfcopysign_vf_nxv8f16( %vm, half %s) { ; CHECK-LABEL: vfcopysign_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %s, i32 0 @@ -299,7 +299,7 @@ define @vfcopynsign_vv_nxv8f16( %vm, %vs) { ; CHECK-LABEL: vfcopynsign_vv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfsgnjn.vv v8, v8, v10 ; CHECK-NEXT: ret %n = fneg %vs @@ -310,7 +310,7 @@ define @vfcopynsign_vf_nxv8f16( %vm, half %s) { ; CHECK-LABEL: vfcopynsign_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %s, i32 0 @@ -323,7 +323,7 @@ define @vfcopysign_exttrunc_vv_nxv8f16_nxv8f32( %vm, %vs) { ; CHECK-LABEL: vfcopysign_exttrunc_vv_nxv8f16_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v10, v12 ; CHECK-NEXT: vfsgnj.vv v8, v8, v10 ; CHECK-NEXT: ret @@ -335,9 +335,9 @@ define @vfcopysign_exttrunc_vf_nxv8f16_nxv8f32( %vm, float %s) { ; CHECK-LABEL: vfcopysign_exttrunc_vf_nxv8f16_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfmv.v.f v12, fa0 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v10, v12 ; CHECK-NEXT: vfsgnj.vv v8, v8, v10 ; CHECK-NEXT: ret @@ -351,7 +351,7 @@ define @vfcopynsign_exttrunc_vv_nxv8f16_nxv8f32( %vm, %vs) { ; CHECK-LABEL: vfcopynsign_exttrunc_vv_nxv8f16_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v10, v12 ; CHECK-NEXT: vfsgnjn.vv v8, v8, v10 ; CHECK-NEXT: ret @@ -364,9 +364,9 @@ define @vfcopynsign_exttrunc_vf_nxv8f16_nxv8f32( %vm, float %s) { ; CHECK-LABEL: vfcopynsign_exttrunc_vf_nxv8f16_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfmv.v.f v12, fa0 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v10, v12 ; CHECK-NEXT: vfsgnjn.vv v8, v8, v10 ; CHECK-NEXT: ret @@ -381,9 +381,9 @@ define @vfcopysign_exttrunc_vv_nxv8f16_nxv8f64( %vm, %vs) { ; CHECK-LABEL: vfcopysign_exttrunc_vv_nxv8f16_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfncvt.rod.f.f.w v12, v16 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v10, v12 ; CHECK-NEXT: vfsgnj.vv v8, v8, v10 ; CHECK-NEXT: ret @@ -395,11 +395,11 @@ define @vfcopysign_exttrunc_vf_nxv8f16_nxv8f64( %vm, double %s) { ; CHECK-LABEL: vfcopysign_exttrunc_vf_nxv8f16_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfmv.v.f v16, fa0 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-NEXT: vfncvt.rod.f.f.w v12, v16 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v10, v12 ; CHECK-NEXT: vfsgnj.vv v8, v8, v10 ; CHECK-NEXT: ret @@ -413,9 +413,9 @@ define @vfcopynsign_exttrunc_vv_nxv8f16_nxv8f64( %vm, %vs) { ; CHECK-LABEL: vfcopynsign_exttrunc_vv_nxv8f16_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfncvt.rod.f.f.w v12, v16 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v10, v12 ; CHECK-NEXT: vfsgnjn.vv v8, v8, v10 ; CHECK-NEXT: ret @@ -428,11 +428,11 @@ define @vfcopynsign_exttrunc_vf_nxv8f16_nxv8f64( %vm, double %s) { ; CHECK-LABEL: vfcopynsign_exttrunc_vf_nxv8f16_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfmv.v.f v16, fa0 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-NEXT: vfncvt.rod.f.f.w v12, v16 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v10, v12 ; CHECK-NEXT: vfsgnjn.vv v8, v8, v10 ; CHECK-NEXT: ret @@ -449,7 +449,7 @@ define @vfcopysign_vv_nxv16f16( %vm, %vs) { ; CHECK-LABEL: vfcopysign_vv_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v12 ; CHECK-NEXT: ret %r = call @llvm.copysign.nxv16f16( %vm, %vs) @@ -459,7 +459,7 @@ define @vfcopysign_vf_nxv16f16( %vm, half %s) { ; CHECK-LABEL: vfcopysign_vf_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %s, i32 0 @@ -471,7 +471,7 @@ define @vfcopynsign_vv_nxv16f16( %vm, %vs) { ; CHECK-LABEL: vfcopynsign_vv_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfsgnjn.vv v8, v8, v12 ; CHECK-NEXT: ret %n = fneg %vs @@ -482,7 +482,7 @@ define @vfcopynsign_vf_nxv16f16( %vm, half %s) { ; CHECK-LABEL: vfcopynsign_vf_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %s, i32 0 @@ -497,7 +497,7 @@ define @vfcopysign_vv_nxv32f16( %vm, %vs) { ; CHECK-LABEL: vfcopysign_vv_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v16 ; CHECK-NEXT: ret %r = call @llvm.copysign.nxv32f16( %vm, %vs) @@ -507,7 +507,7 @@ define @vfcopysign_vf_nxv32f16( %vm, half %s) { ; CHECK-LABEL: vfcopysign_vf_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %s, i32 0 @@ -519,7 +519,7 @@ define @vfcopynsign_vv_nxv32f16( %vm, %vs) { ; CHECK-LABEL: vfcopynsign_vv_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vfsgnjn.vv v8, v8, v16 ; CHECK-NEXT: ret %n = fneg %vs @@ -530,7 +530,7 @@ define @vfcopynsign_vf_nxv32f16( %vm, half %s) { ; CHECK-LABEL: vfcopynsign_vf_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %s, i32 0 @@ -545,7 +545,7 @@ define @vfcopysign_vv_nxv1f32( %vm, %vs) { ; CHECK-LABEL: vfcopysign_vv_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v9 ; CHECK-NEXT: ret %r = call @llvm.copysign.nxv1f32( %vm, %vs) @@ -555,7 +555,7 @@ define @vfcopysign_vf_nxv1f32( %vm, float %s) { ; CHECK-LABEL: vfcopysign_vf_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %s, i32 0 @@ -567,7 +567,7 @@ define @vfcopynsign_vv_nxv1f32( %vm, %vs) { ; CHECK-LABEL: vfcopynsign_vv_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 ; CHECK-NEXT: ret %n = fneg %vs @@ -578,7 +578,7 @@ define @vfcopynsign_vf_nxv1f32( %vm, float %s) { ; CHECK-LABEL: vfcopynsign_vf_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %s, i32 0 @@ -591,9 +591,9 @@ define @vfcopysign_exttrunc_vv_nxv1f32_nxv1f16( %vm, %vs) { ; CHECK-LABEL: vfcopysign_exttrunc_vv_nxv1f32_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v10, v9 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v10 ; CHECK-NEXT: ret %e = fpext %vs to @@ -604,10 +604,10 @@ define @vfcopysign_exttrunc_vf_nxv1f32_nxv1f16( %vm, half %s) { ; CHECK-LABEL: vfcopysign_exttrunc_vf_nxv1f32_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vfwcvt.f.f.v v10, v9 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, half %s, i32 0 @@ -620,9 +620,9 @@ define @vfcopynsign_exttrunc_vv_nxv1f32_nxv1f16( %vm, %vs) { ; CHECK-LABEL: vfcopynsign_exttrunc_vv_nxv1f32_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v10, v9 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfsgnjn.vv v8, v8, v10 ; CHECK-NEXT: ret %n = fneg %vs @@ -634,10 +634,10 @@ define @vfcopynsign_exttrunc_vf_nxv1f32_nxv1f16( %vm, half %s) { ; CHECK-LABEL: vfcopynsign_exttrunc_vf_nxv1f32_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vfwcvt.f.f.v v10, v9 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfsgnjn.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, half %s, i32 0 @@ -651,7 +651,7 @@ define @vfcopysign_exttrunc_vv_nxv1f32_nxv1f64( %vm, %vs) { ; CHECK-LABEL: vfcopysign_exttrunc_vv_nxv1f32_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v10, v9 ; CHECK-NEXT: vfsgnj.vv v8, v8, v10 ; CHECK-NEXT: ret @@ -663,9 +663,9 @@ define @vfcopysign_exttrunc_vf_nxv1f32_nxv1f64( %vm, double %s) { ; CHECK-LABEL: vfcopysign_exttrunc_vf_nxv1f32_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfmv.v.f v9, fa0 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v10, v9 ; CHECK-NEXT: vfsgnj.vv v8, v8, v10 ; CHECK-NEXT: ret @@ -679,7 +679,7 @@ define @vfcopynsign_exttrunc_vv_nxv1f32_nxv1f64( %vm, %vs) { ; CHECK-LABEL: vfcopynsign_exttrunc_vv_nxv1f32_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v10, v9 ; CHECK-NEXT: vfsgnjn.vv v8, v8, v10 ; CHECK-NEXT: ret @@ -692,9 +692,9 @@ define @vfcopynsign_exttrunc_vf_nxv1f32_nxv1f64( %vm, double %s) { ; CHECK-LABEL: vfcopynsign_exttrunc_vf_nxv1f32_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfmv.v.f v9, fa0 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v10, v9 ; CHECK-NEXT: vfsgnjn.vv v8, v8, v10 ; CHECK-NEXT: ret @@ -711,7 +711,7 @@ define @vfcopysign_vv_nxv2f32( %vm, %vs) { ; CHECK-LABEL: vfcopysign_vv_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v9 ; CHECK-NEXT: ret %r = call @llvm.copysign.nxv2f32( %vm, %vs) @@ -721,7 +721,7 @@ define @vfcopysign_vf_nxv2f32( %vm, float %s) { ; CHECK-LABEL: vfcopysign_vf_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %s, i32 0 @@ -733,7 +733,7 @@ define @vfcopynsign_vv_nxv2f32( %vm, %vs) { ; CHECK-LABEL: vfcopynsign_vv_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 ; CHECK-NEXT: ret %n = fneg %vs @@ -744,7 +744,7 @@ define @vfcopynsign_vf_nxv2f32( %vm, float %s) { ; CHECK-LABEL: vfcopynsign_vf_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %s, i32 0 @@ -759,7 +759,7 @@ define @vfcopysign_vv_nxv4f32( %vm, %vs) { ; CHECK-LABEL: vfcopysign_vv_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v10 ; CHECK-NEXT: ret %r = call @llvm.copysign.nxv4f32( %vm, %vs) @@ -769,7 +769,7 @@ define @vfcopysign_vf_nxv4f32( %vm, float %s) { ; CHECK-LABEL: vfcopysign_vf_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %s, i32 0 @@ -781,7 +781,7 @@ define @vfcopynsign_vv_nxv4f32( %vm, %vs) { ; CHECK-LABEL: vfcopynsign_vv_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfsgnjn.vv v8, v8, v10 ; CHECK-NEXT: ret %n = fneg %vs @@ -792,7 +792,7 @@ define @vfcopynsign_vf_nxv4f32( %vm, float %s) { ; CHECK-LABEL: vfcopynsign_vf_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %s, i32 0 @@ -807,7 +807,7 @@ define @vfcopysign_vv_nxv8f32( %vm, %vs) { ; CHECK-LABEL: vfcopysign_vv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v12 ; CHECK-NEXT: ret %r = call @llvm.copysign.nxv8f32( %vm, %vs) @@ -817,7 +817,7 @@ define @vfcopysign_vf_nxv8f32( %vm, float %s) { ; CHECK-LABEL: vfcopysign_vf_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %s, i32 0 @@ -829,7 +829,7 @@ define @vfcopynsign_vv_nxv8f32( %vm, %vs) { ; CHECK-LABEL: vfcopynsign_vv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfsgnjn.vv v8, v8, v12 ; CHECK-NEXT: ret %n = fneg %vs @@ -840,7 +840,7 @@ define @vfcopynsign_vf_nxv8f32( %vm, float %s) { ; CHECK-LABEL: vfcopynsign_vf_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %s, i32 0 @@ -853,9 +853,9 @@ define @vfcopysign_exttrunc_vv_nxv8f32_nxv8f16( %vm, %vs) { ; CHECK-LABEL: vfcopysign_exttrunc_vv_nxv8f32_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v16, v12 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v16 ; CHECK-NEXT: ret %e = fpext %vs to @@ -866,10 +866,10 @@ define @vfcopysign_exttrunc_vf_nxv8f32_nxv8f16( %vm, half %s) { ; CHECK-LABEL: vfcopysign_exttrunc_vf_nxv8f32_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vfwcvt.f.f.v v16, v12 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, half %s, i32 0 @@ -882,9 +882,9 @@ define @vfcopynsign_exttrunc_vv_nxv8f32_nxv8f16( %vm, %vs) { ; CHECK-LABEL: vfcopynsign_exttrunc_vv_nxv8f32_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v16, v12 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-NEXT: vfsgnjn.vv v8, v8, v16 ; CHECK-NEXT: ret %n = fneg %vs @@ -896,10 +896,10 @@ define @vfcopynsign_exttrunc_vf_nxv8f32_nxv8f16( %vm, half %s) { ; CHECK-LABEL: vfcopynsign_exttrunc_vf_nxv8f32_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfmv.v.f v12, fa0 ; CHECK-NEXT: vfwcvt.f.f.v v16, v12 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-NEXT: vfsgnjn.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, half %s, i32 0 @@ -913,7 +913,7 @@ define @vfcopysign_exttrunc_vv_nxv8f32_nxv8f64( %vm, %vs) { ; CHECK-LABEL: vfcopysign_exttrunc_vv_nxv8f32_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v12, v16 ; CHECK-NEXT: vfsgnj.vv v8, v8, v12 ; CHECK-NEXT: ret @@ -925,9 +925,9 @@ define @vfcopysign_exttrunc_vf_nxv8f32_nxv8f64( %vm, double %s) { ; CHECK-LABEL: vfcopysign_exttrunc_vf_nxv8f32_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfmv.v.f v16, fa0 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v12, v16 ; CHECK-NEXT: vfsgnj.vv v8, v8, v12 ; CHECK-NEXT: ret @@ -941,7 +941,7 @@ define @vfcopynsign_exttrunc_vv_nxv8f32_nxv8f64( %vm, %vs) { ; CHECK-LABEL: vfcopynsign_exttrunc_vv_nxv8f32_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v12, v16 ; CHECK-NEXT: vfsgnjn.vv v8, v8, v12 ; CHECK-NEXT: ret @@ -954,9 +954,9 @@ define @vfcopynsign_exttrunc_vf_nxv8f32_nxv8f64( %vm, double %s) { ; CHECK-LABEL: vfcopynsign_exttrunc_vf_nxv8f32_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfmv.v.f v16, fa0 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v12, v16 ; CHECK-NEXT: vfsgnjn.vv v8, v8, v12 ; CHECK-NEXT: ret @@ -973,7 +973,7 @@ define @vfcopysign_vv_nxv16f32( %vm, %vs) { ; CHECK-LABEL: vfcopysign_vv_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v16 ; CHECK-NEXT: ret %r = call @llvm.copysign.nxv16f32( %vm, %vs) @@ -983,7 +983,7 @@ define @vfcopysign_vf_nxv16f32( %vm, float %s) { ; CHECK-LABEL: vfcopysign_vf_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %s, i32 0 @@ -995,7 +995,7 @@ define @vfcopynsign_vv_nxv16f32( %vm, %vs) { ; CHECK-LABEL: vfcopynsign_vv_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vfsgnjn.vv v8, v8, v16 ; CHECK-NEXT: ret %n = fneg %vs @@ -1006,7 +1006,7 @@ define @vfcopynsign_vf_nxv16f32( %vm, float %s) { ; CHECK-LABEL: vfcopynsign_vf_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %s, i32 0 @@ -1021,7 +1021,7 @@ define @vfcopysign_vv_nxv1f64( %vm, %vs) { ; CHECK-LABEL: vfcopysign_vv_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v9 ; CHECK-NEXT: ret %r = call @llvm.copysign.nxv1f64( %vm, %vs) @@ -1031,7 +1031,7 @@ define @vfcopysign_vf_nxv1f64( %vm, double %s) { ; CHECK-LABEL: vfcopysign_vf_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %s, i32 0 @@ -1043,7 +1043,7 @@ define @vfcopynsign_vv_nxv1f64( %vm, %vs) { ; CHECK-LABEL: vfcopynsign_vv_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 ; CHECK-NEXT: ret %n = fneg %vs @@ -1054,7 +1054,7 @@ define @vfcopynsign_vf_nxv1f64( %vm, double %s) { ; CHECK-LABEL: vfcopynsign_vf_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %s, i32 0 @@ -1067,11 +1067,11 @@ define @vfcopysign_exttrunc_vv_nxv1f64_nxv1f16( %vm, %vs) { ; CHECK-LABEL: vfcopysign_exttrunc_vv_nxv1f64_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v10, v9 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v9, v10 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v9 ; CHECK-NEXT: ret %e = fpext %vs to @@ -1082,12 +1082,12 @@ define @vfcopysign_exttrunc_vf_nxv1f64_nxv1f16( %vm, half %s) { ; CHECK-LABEL: vfcopysign_exttrunc_vf_nxv1f64_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vfwcvt.f.f.v v10, v9 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v9, v10 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, half %s, i32 0 @@ -1100,11 +1100,11 @@ define @vfcopynsign_exttrunc_vv_nxv1f64_nxv1f16( %vm, %vs) { ; CHECK-LABEL: vfcopynsign_exttrunc_vv_nxv1f64_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v10, v9 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v9, v10 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 ; CHECK-NEXT: ret %n = fneg %vs @@ -1116,12 +1116,12 @@ define @vfcopynsign_exttrunc_vf_nxv1f64_nxv1f16( %vm, half %s) { ; CHECK-LABEL: vfcopynsign_exttrunc_vf_nxv1f64_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vfwcvt.f.f.v v10, v9 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v9, v10 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, half %s, i32 0 @@ -1135,9 +1135,9 @@ define @vfcopysign_exttrunc_vv_nxv1f64_nxv1f32( %vm, %vs) { ; CHECK-LABEL: vfcopysign_exttrunc_vv_nxv1f64_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v10, v9 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v10 ; CHECK-NEXT: ret %e = fpext %vs to @@ -1148,10 +1148,10 @@ define @vfcopysign_exttrunc_vf_nxv1f64_nxv1f32( %vm, float %s) { ; CHECK-LABEL: vfcopysign_exttrunc_vf_nxv1f64_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vfwcvt.f.f.v v10, v9 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, float %s, i32 0 @@ -1164,9 +1164,9 @@ define @vfcopynsign_exttrunc_vv_nxv1f64_nxv1f32( %vm, %vs) { ; CHECK-LABEL: vfcopynsign_exttrunc_vv_nxv1f64_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v10, v9 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; CHECK-NEXT: vfsgnjn.vv v8, v8, v10 ; CHECK-NEXT: ret %n = fneg %vs @@ -1178,10 +1178,10 @@ define @vfcopynsign_exttrunc_vf_nxv1f64_nxv1f32( %vm, float %s) { ; CHECK-LABEL: vfcopynsign_exttrunc_vf_nxv1f64_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vfwcvt.f.f.v v10, v9 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; CHECK-NEXT: vfsgnjn.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, float %s, i32 0 @@ -1197,7 +1197,7 @@ define @vfcopysign_vv_nxv2f64( %vm, %vs) { ; CHECK-LABEL: vfcopysign_vv_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v10 ; CHECK-NEXT: ret %r = call @llvm.copysign.nxv2f64( %vm, %vs) @@ -1207,7 +1207,7 @@ define @vfcopysign_vf_nxv2f64( %vm, double %s) { ; CHECK-LABEL: vfcopysign_vf_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %s, i32 0 @@ -1219,7 +1219,7 @@ define @vfcopynsign_vv_nxv2f64( %vm, %vs) { ; CHECK-LABEL: vfcopynsign_vv_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vfsgnjn.vv v8, v8, v10 ; CHECK-NEXT: ret %n = fneg %vs @@ -1230,7 +1230,7 @@ define @vfcopynsign_vf_nxv2f64( %vm, double %s) { ; CHECK-LABEL: vfcopynsign_vf_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %s, i32 0 @@ -1245,7 +1245,7 @@ define @vfcopysign_vv_nxv4f64( %vm, %vs) { ; CHECK-LABEL: vfcopysign_vv_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v12 ; CHECK-NEXT: ret %r = call @llvm.copysign.nxv4f64( %vm, %vs) @@ -1255,7 +1255,7 @@ define @vfcopysign_vf_nxv4f64( %vm, double %s) { ; CHECK-LABEL: vfcopysign_vf_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %s, i32 0 @@ -1267,7 +1267,7 @@ define @vfcopynsign_vv_nxv4f64( %vm, %vs) { ; CHECK-LABEL: vfcopynsign_vv_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vfsgnjn.vv v8, v8, v12 ; CHECK-NEXT: ret %n = fneg %vs @@ -1278,7 +1278,7 @@ define @vfcopynsign_vf_nxv4f64( %vm, double %s) { ; CHECK-LABEL: vfcopynsign_vf_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %s, i32 0 @@ -1293,7 +1293,7 @@ define @vfcopysign_vv_nxv8f64( %vm, %vs) { ; CHECK-LABEL: vfcopysign_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v16 ; CHECK-NEXT: ret %r = call @llvm.copysign.nxv8f64( %vm, %vs) @@ -1303,7 +1303,7 @@ define @vfcopysign_vf_nxv8f64( %vm, double %s) { ; CHECK-LABEL: vfcopysign_vf_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %s, i32 0 @@ -1315,7 +1315,7 @@ define @vfcopynsign_vv_nxv8f64( %vm, %vs) { ; CHECK-LABEL: vfcopynsign_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfsgnjn.vv v8, v8, v16 ; CHECK-NEXT: ret %n = fneg %vs @@ -1326,7 +1326,7 @@ define @vfcopynsign_vf_nxv8f64( %vm, double %s) { ; CHECK-LABEL: vfcopynsign_vf_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %s, i32 0 @@ -1339,11 +1339,11 @@ define @vfcopysign_exttrunc_vv_nxv8f64_nxv8f16( %vm, %vs) { ; CHECK-LABEL: vfcopysign_exttrunc_vv_nxv8f64_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v20, v16 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v24, v20 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v24 ; CHECK-NEXT: ret %e = fpext %vs to @@ -1354,12 +1354,12 @@ define @vfcopysign_exttrunc_vf_nxv8f64_nxv8f16( %vm, half %s) { ; CHECK-LABEL: vfcopysign_exttrunc_vf_nxv8f64_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vfwcvt.f.f.v v20, v16 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v24, v20 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v24 ; CHECK-NEXT: ret %head = insertelement poison, half %s, i32 0 @@ -1372,11 +1372,11 @@ define @vfcopynsign_exttrunc_vv_nxv8f64_nxv8f16( %vm, %vs) { ; CHECK-LABEL: vfcopynsign_exttrunc_vv_nxv8f64_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v20, v16 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v24, v20 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-NEXT: vfsgnjn.vv v8, v8, v24 ; CHECK-NEXT: ret %n = fneg %vs @@ -1388,12 +1388,12 @@ define @vfcopynsign_exttrunc_vf_nxv8f64_nxv8f16( %vm, half %s) { ; CHECK-LABEL: vfcopynsign_exttrunc_vf_nxv8f64_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vfwcvt.f.f.v v20, v16 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v24, v20 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-NEXT: vfsgnjn.vv v8, v8, v24 ; CHECK-NEXT: ret %head = insertelement poison, half %s, i32 0 @@ -1407,9 +1407,9 @@ define @vfcopysign_exttrunc_vv_nxv8f64_nxv8f32( %vm, %vs) { ; CHECK-LABEL: vfcopysign_exttrunc_vv_nxv8f64_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v24, v16 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v24 ; CHECK-NEXT: ret %e = fpext %vs to @@ -1420,10 +1420,10 @@ define @vfcopysign_exttrunc_vf_nxv8f64_nxv8f32( %vm, float %s) { ; CHECK-LABEL: vfcopysign_exttrunc_vf_nxv8f64_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vfwcvt.f.f.v v24, v16 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v24 ; CHECK-NEXT: ret %head = insertelement poison, float %s, i32 0 @@ -1436,9 +1436,9 @@ define @vfcopynsign_exttrunc_vv_nxv8f64_nxv8f32( %vm, %vs) { ; CHECK-LABEL: vfcopynsign_exttrunc_vv_nxv8f64_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v24, v16 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-NEXT: vfsgnjn.vv v8, v8, v24 ; CHECK-NEXT: ret %n = fneg %vs @@ -1450,10 +1450,10 @@ define @vfcopynsign_exttrunc_vf_nxv8f64_nxv8f32( %vm, float %s) { ; CHECK-LABEL: vfcopynsign_exttrunc_vf_nxv8f64_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vfwcvt.f.f.v v24, v16 -; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-NEXT: vfsgnjn.vv v8, v8, v24 ; CHECK-NEXT: ret %head = insertelement poison, float %s, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x.ll @@ -11,7 +11,7 @@ define @intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfcvt.f.x.v v8, v8 ; CHECK-NEXT: ret entry: @@ -54,7 +54,7 @@ define @intrinsic_vfcvt_f.x.v_nxv2f16_nxv2i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfcvt.f.x.v v8, v8 ; CHECK-NEXT: ret entry: @@ -97,7 +97,7 @@ define @intrinsic_vfcvt_f.x.v_nxv4f16_nxv4i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfcvt.f.x.v v8, v8 ; CHECK-NEXT: ret entry: @@ -140,7 +140,7 @@ define @intrinsic_vfcvt_f.x.v_nxv8f16_nxv8i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfcvt.f.x.v v8, v8 ; CHECK-NEXT: ret entry: @@ -183,7 +183,7 @@ define @intrinsic_vfcvt_f.x.v_nxv16f16_nxv16i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfcvt.f.x.v v8, v8 ; CHECK-NEXT: ret entry: @@ -226,7 +226,7 @@ define @intrinsic_vfcvt_f.x.v_nxv32f16_nxv32i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfcvt.f.x.v v8, v8 ; CHECK-NEXT: ret entry: @@ -269,7 +269,7 @@ define @intrinsic_vfcvt_f.x.v_nxv1f32_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfcvt.f.x.v v8, v8 ; CHECK-NEXT: ret entry: @@ -312,7 +312,7 @@ define @intrinsic_vfcvt_f.x.v_nxv2f32_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfcvt.f.x.v v8, v8 ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ define @intrinsic_vfcvt_f.x.v_nxv4f32_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfcvt.f.x.v v8, v8 ; CHECK-NEXT: ret entry: @@ -398,7 +398,7 @@ define @intrinsic_vfcvt_f.x.v_nxv8f32_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfcvt.f.x.v v8, v8 ; CHECK-NEXT: ret entry: @@ -441,7 +441,7 @@ define @intrinsic_vfcvt_f.x.v_nxv16f32_nxv16i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfcvt.f.x.v v8, v8 ; CHECK-NEXT: ret entry: @@ -484,7 +484,7 @@ define @intrinsic_vfcvt_f.x.v_nxv1f64_nxv1i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfcvt.f.x.v v8, v8 ; CHECK-NEXT: ret entry: @@ -527,7 +527,7 @@ define @intrinsic_vfcvt_f.x.v_nxv2f64_nxv2i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfcvt.f.x.v v8, v8 ; CHECK-NEXT: ret entry: @@ -570,7 +570,7 @@ define @intrinsic_vfcvt_f.x.v_nxv4f64_nxv4i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfcvt.f.x.v v8, v8 ; CHECK-NEXT: ret entry: @@ -613,7 +613,7 @@ define @intrinsic_vfcvt_f.x.v_nxv8f64_nxv8i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfcvt.f.x.v v8, v8 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu.ll @@ -11,7 +11,7 @@ define @intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 ; CHECK-NEXT: ret entry: @@ -54,7 +54,7 @@ define @intrinsic_vfcvt_f.xu.v_nxv2f16_nxv2i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 ; CHECK-NEXT: ret entry: @@ -97,7 +97,7 @@ define @intrinsic_vfcvt_f.xu.v_nxv4f16_nxv4i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 ; CHECK-NEXT: ret entry: @@ -140,7 +140,7 @@ define @intrinsic_vfcvt_f.xu.v_nxv8f16_nxv8i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 ; CHECK-NEXT: ret entry: @@ -183,7 +183,7 @@ define @intrinsic_vfcvt_f.xu.v_nxv16f16_nxv16i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 ; CHECK-NEXT: ret entry: @@ -226,7 +226,7 @@ define @intrinsic_vfcvt_f.xu.v_nxv32f16_nxv32i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 ; CHECK-NEXT: ret entry: @@ -269,7 +269,7 @@ define @intrinsic_vfcvt_f.xu.v_nxv1f32_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 ; CHECK-NEXT: ret entry: @@ -312,7 +312,7 @@ define @intrinsic_vfcvt_f.xu.v_nxv2f32_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ define @intrinsic_vfcvt_f.xu.v_nxv4f32_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 ; CHECK-NEXT: ret entry: @@ -398,7 +398,7 @@ define @intrinsic_vfcvt_f.xu.v_nxv8f32_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 ; CHECK-NEXT: ret entry: @@ -441,7 +441,7 @@ define @intrinsic_vfcvt_f.xu.v_nxv16f32_nxv16i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 ; CHECK-NEXT: ret entry: @@ -484,7 +484,7 @@ define @intrinsic_vfcvt_f.xu.v_nxv1f64_nxv1i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 ; CHECK-NEXT: ret entry: @@ -527,7 +527,7 @@ define @intrinsic_vfcvt_f.xu.v_nxv2f64_nxv2i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 ; CHECK-NEXT: ret entry: @@ -570,7 +570,7 @@ define @intrinsic_vfcvt_f.xu.v_nxv4f64_nxv4i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 ; CHECK-NEXT: ret entry: @@ -613,7 +613,7 @@ define @intrinsic_vfcvt_f.xu.v_nxv8f64_nxv8i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f.ll @@ -11,7 +11,7 @@ define @intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -54,7 +54,7 @@ define @intrinsic_vfcvt_rtz.x.f.v_nxv2i16_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv2i16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -97,7 +97,7 @@ define @intrinsic_vfcvt_rtz.x.f.v_nxv4i16_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv4i16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -140,7 +140,7 @@ define @intrinsic_vfcvt_rtz.x.f.v_nxv8i16_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv8i16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -183,7 +183,7 @@ define @intrinsic_vfcvt_rtz.x.f.v_nxv16i16_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv16i16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -226,7 +226,7 @@ define @intrinsic_vfcvt_rtz.x.f.v_nxv32i16_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv32i16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -269,7 +269,7 @@ define @intrinsic_vfcvt_rtz.x.f.v_nxv1i32_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv1i32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -312,7 +312,7 @@ define @intrinsic_vfcvt_rtz.x.f.v_nxv2i32_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv2i32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ define @intrinsic_vfcvt_rtz.x.f.v_nxv4i32_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv4i32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -398,7 +398,7 @@ define @intrinsic_vfcvt_rtz.x.f.v_nxv8i32_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv8i32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -441,7 +441,7 @@ define @intrinsic_vfcvt_rtz.x.f.v_nxv16i32_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv16i32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -484,7 +484,7 @@ define @intrinsic_vfcvt_rtz.x.f.v_nxv1i64_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv1i64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -527,7 +527,7 @@ define @intrinsic_vfcvt_rtz.x.f.v_nxv2i64_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv2i64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -570,7 +570,7 @@ define @intrinsic_vfcvt_rtz.x.f.v_nxv4i64_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv4i64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -613,7 +613,7 @@ define @intrinsic_vfcvt_rtz.x.f.v_nxv8i64_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv8i64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f.ll @@ -11,7 +11,7 @@ define @intrinsic_vfcvt_rtz.xu.f.v_nxv1i16_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -54,7 +54,7 @@ define @intrinsic_vfcvt_rtz.xu.f.v_nxv2i16_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv2i16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -97,7 +97,7 @@ define @intrinsic_vfcvt_rtz.xu.f.v_nxv4i16_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv4i16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -140,7 +140,7 @@ define @intrinsic_vfcvt_rtz.xu.f.v_nxv8i16_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv8i16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -183,7 +183,7 @@ define @intrinsic_vfcvt_rtz.xu.f.v_nxv16i16_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv16i16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -226,7 +226,7 @@ define @intrinsic_vfcvt_rtz.xu.f.v_nxv32i16_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv32i16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -269,7 +269,7 @@ define @intrinsic_vfcvt_rtz.xu.f.v_nxv1i32_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv1i32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -312,7 +312,7 @@ define @intrinsic_vfcvt_rtz.xu.f.v_nxv2i32_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv2i32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ define @intrinsic_vfcvt_rtz.xu.f.v_nxv4i32_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv4i32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -398,7 +398,7 @@ define @intrinsic_vfcvt_rtz.xu.f.v_nxv8i32_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv8i32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -441,7 +441,7 @@ define @intrinsic_vfcvt_rtz.xu.f.v_nxv16i32_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv16i32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -484,7 +484,7 @@ define @intrinsic_vfcvt_rtz.xu.f.v_nxv1i64_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv1i64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -527,7 +527,7 @@ define @intrinsic_vfcvt_rtz.xu.f.v_nxv2i64_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv2i64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -570,7 +570,7 @@ define @intrinsic_vfcvt_rtz.xu.f.v_nxv4i64_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv4i64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -613,7 +613,7 @@ define @intrinsic_vfcvt_rtz.xu.f.v_nxv8i64_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv8i64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f.ll @@ -11,7 +11,7 @@ define @intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -54,7 +54,7 @@ define @intrinsic_vfcvt_x.f.v_nxv2i16_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv2i16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -97,7 +97,7 @@ define @intrinsic_vfcvt_x.f.v_nxv4i16_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv4i16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -140,7 +140,7 @@ define @intrinsic_vfcvt_x.f.v_nxv8i16_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv8i16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -183,7 +183,7 @@ define @intrinsic_vfcvt_x.f.v_nxv16i16_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv16i16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -226,7 +226,7 @@ define @intrinsic_vfcvt_x.f.v_nxv32i16_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv32i16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -269,7 +269,7 @@ define @intrinsic_vfcvt_x.f.v_nxv1i32_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv1i32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -312,7 +312,7 @@ define @intrinsic_vfcvt_x.f.v_nxv2i32_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv2i32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ define @intrinsic_vfcvt_x.f.v_nxv4i32_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv4i32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -398,7 +398,7 @@ define @intrinsic_vfcvt_x.f.v_nxv8i32_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv8i32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -441,7 +441,7 @@ define @intrinsic_vfcvt_x.f.v_nxv16i32_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv16i32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -484,7 +484,7 @@ define @intrinsic_vfcvt_x.f.v_nxv1i64_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv1i64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -527,7 +527,7 @@ define @intrinsic_vfcvt_x.f.v_nxv2i64_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv2i64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -570,7 +570,7 @@ define @intrinsic_vfcvt_x.f.v_nxv4i64_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv4i64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -613,7 +613,7 @@ define @intrinsic_vfcvt_x.f.v_nxv8i64_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv8i64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfcvt.x.f.v v8, v8 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f.ll @@ -11,7 +11,7 @@ define @intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfcvt.xu.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -54,7 +54,7 @@ define @intrinsic_vfcvt_xu.f.v_nxv2i16_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv2i16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfcvt.xu.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -97,7 +97,7 @@ define @intrinsic_vfcvt_xu.f.v_nxv4i16_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv4i16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfcvt.xu.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -140,7 +140,7 @@ define @intrinsic_vfcvt_xu.f.v_nxv8i16_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv8i16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfcvt.xu.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -183,7 +183,7 @@ define @intrinsic_vfcvt_xu.f.v_nxv16i16_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv16i16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfcvt.xu.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -226,7 +226,7 @@ define @intrinsic_vfcvt_xu.f.v_nxv32i16_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv32i16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfcvt.xu.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -269,7 +269,7 @@ define @intrinsic_vfcvt_xu.f.v_nxv1i32_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv1i32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfcvt.xu.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -312,7 +312,7 @@ define @intrinsic_vfcvt_xu.f.v_nxv2i32_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv2i32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfcvt.xu.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ define @intrinsic_vfcvt_xu.f.v_nxv4i32_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv4i32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfcvt.xu.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -398,7 +398,7 @@ define @intrinsic_vfcvt_xu.f.v_nxv8i32_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv8i32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfcvt.xu.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -441,7 +441,7 @@ define @intrinsic_vfcvt_xu.f.v_nxv16i32_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv16i32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfcvt.xu.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -484,7 +484,7 @@ define @intrinsic_vfcvt_xu.f.v_nxv1i64_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv1i64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfcvt.xu.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -527,7 +527,7 @@ define @intrinsic_vfcvt_xu.f.v_nxv2i64_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv2i64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfcvt.xu.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -570,7 +570,7 @@ define @intrinsic_vfcvt_xu.f.v_nxv4i64_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv4i64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfcvt.xu.f.v v8, v8 ; CHECK-NEXT: ret entry: @@ -613,7 +613,7 @@ define @intrinsic_vfcvt_xu.f.v_nxv8i64_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv8i64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfcvt.xu.f.v v8, v8 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-sdnode.ll @@ -7,7 +7,7 @@ define @vfdiv_vv_nxv1f16( %va, %vb) { ; CHECK-LABEL: vfdiv_vv_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = fdiv %va, %vb @@ -17,7 +17,7 @@ define @vfdiv_vf_nxv1f16( %va, half %b) { ; CHECK-LABEL: vfdiv_vf_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -29,7 +29,7 @@ define @vfdiv_vv_nxv2f16( %va, %vb) { ; CHECK-LABEL: vfdiv_vv_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = fdiv %va, %vb @@ -39,7 +39,7 @@ define @vfdiv_vf_nxv2f16( %va, half %b) { ; CHECK-LABEL: vfdiv_vf_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -51,7 +51,7 @@ define @vfdiv_vv_nxv4f16( %va, %vb) { ; CHECK-LABEL: vfdiv_vv_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = fdiv %va, %vb @@ -61,7 +61,7 @@ define @vfdiv_vf_nxv4f16( %va, half %b) { ; CHECK-LABEL: vfdiv_vf_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -73,7 +73,7 @@ define @vfdiv_vv_nxv8f16( %va, %vb) { ; CHECK-LABEL: vfdiv_vv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = fdiv %va, %vb @@ -83,7 +83,7 @@ define @vfdiv_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: vfdiv_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -95,7 +95,7 @@ define @vfdiv_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: vfdiv_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -107,7 +107,7 @@ define @vfdiv_vv_nxv16f16( %va, %vb) { ; CHECK-LABEL: vfdiv_vv_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = fdiv %va, %vb @@ -117,7 +117,7 @@ define @vfdiv_vf_nxv16f16( %va, half %b) { ; CHECK-LABEL: vfdiv_vf_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -129,7 +129,7 @@ define @vfdiv_vv_nxv32f16( %va, %vb) { ; CHECK-LABEL: vfdiv_vv_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = fdiv %va, %vb @@ -139,7 +139,7 @@ define @vfdiv_vf_nxv32f16( %va, half %b) { ; CHECK-LABEL: vfdiv_vf_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -151,7 +151,7 @@ define @vfdiv_vv_nxv1f32( %va, %vb) { ; CHECK-LABEL: vfdiv_vv_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = fdiv %va, %vb @@ -161,7 +161,7 @@ define @vfdiv_vf_nxv1f32( %va, float %b) { ; CHECK-LABEL: vfdiv_vf_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -173,7 +173,7 @@ define @vfdiv_vv_nxv2f32( %va, %vb) { ; CHECK-LABEL: vfdiv_vv_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = fdiv %va, %vb @@ -183,7 +183,7 @@ define @vfdiv_vf_nxv2f32( %va, float %b) { ; CHECK-LABEL: vfdiv_vf_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -195,7 +195,7 @@ define @vfdiv_vv_nxv4f32( %va, %vb) { ; CHECK-LABEL: vfdiv_vv_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = fdiv %va, %vb @@ -205,7 +205,7 @@ define @vfdiv_vf_nxv4f32( %va, float %b) { ; CHECK-LABEL: vfdiv_vf_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -217,7 +217,7 @@ define @vfdiv_vv_nxv8f32( %va, %vb) { ; CHECK-LABEL: vfdiv_vv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = fdiv %va, %vb @@ -227,7 +227,7 @@ define @vfdiv_vf_nxv8f32( %va, float %b) { ; CHECK-LABEL: vfdiv_vf_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -239,7 +239,7 @@ define @vfdiv_fv_nxv8f32( %va, float %b) { ; CHECK-LABEL: vfdiv_fv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -251,7 +251,7 @@ define @vfdiv_vv_nxv16f32( %va, %vb) { ; CHECK-LABEL: vfdiv_vv_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = fdiv %va, %vb @@ -261,7 +261,7 @@ define @vfdiv_vf_nxv16f32( %va, float %b) { ; CHECK-LABEL: vfdiv_vf_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -273,7 +273,7 @@ define @vfdiv_vv_nxv1f64( %va, %vb) { ; CHECK-LABEL: vfdiv_vv_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = fdiv %va, %vb @@ -283,7 +283,7 @@ define @vfdiv_vf_nxv1f64( %va, double %b) { ; CHECK-LABEL: vfdiv_vf_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -295,7 +295,7 @@ define @vfdiv_vv_nxv2f64( %va, %vb) { ; CHECK-LABEL: vfdiv_vv_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = fdiv %va, %vb @@ -305,7 +305,7 @@ define @vfdiv_vf_nxv2f64( %va, double %b) { ; CHECK-LABEL: vfdiv_vf_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -317,7 +317,7 @@ define @vfdiv_vv_nxv4f64( %va, %vb) { ; CHECK-LABEL: vfdiv_vv_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = fdiv %va, %vb @@ -327,7 +327,7 @@ define @vfdiv_vf_nxv4f64( %va, double %b) { ; CHECK-LABEL: vfdiv_vf_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -339,7 +339,7 @@ define @vfdiv_vv_nxv8f64( %va, %vb) { ; CHECK-LABEL: vfdiv_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = fdiv %va, %vb @@ -349,7 +349,7 @@ define @vfdiv_vf_nxv8f64( %va, double %b) { ; CHECK-LABEL: vfdiv_vf_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -361,7 +361,7 @@ define @vfdiv_fv_nxv8f64( %va, double %b) { ; CHECK-LABEL: vfdiv_fv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll @@ -19,7 +19,7 @@ define @vfdiv_vv_nxv1f16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv1f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -43,7 +43,7 @@ define @vfdiv_vf_nxv1f16_unmasked( %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_nxv1f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -69,7 +69,7 @@ define @vfdiv_vv_nxv2f16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -93,7 +93,7 @@ define @vfdiv_vf_nxv2f16_unmasked( %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -119,7 +119,7 @@ define @vfdiv_vv_nxv4f16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -143,7 +143,7 @@ define @vfdiv_vf_nxv4f16_unmasked( %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_nxv4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -169,7 +169,7 @@ define @vfdiv_vv_nxv8f16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -193,7 +193,7 @@ define @vfdiv_vf_nxv8f16_unmasked( %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_nxv8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -219,7 +219,7 @@ define @vfdiv_vv_nxv16f16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -243,7 +243,7 @@ define @vfdiv_vf_nxv16f16_unmasked( %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_nxv16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -269,7 +269,7 @@ define @vfdiv_vv_nxv32f16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv32f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -293,7 +293,7 @@ define @vfdiv_vf_nxv32f16_unmasked( %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_nxv32f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -319,7 +319,7 @@ define @vfdiv_vv_nxv1f32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv1f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -343,7 +343,7 @@ define @vfdiv_vf_nxv1f32_unmasked( %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_nxv1f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -369,7 +369,7 @@ define @vfdiv_vv_nxv2f32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -393,7 +393,7 @@ define @vfdiv_vf_nxv2f32_unmasked( %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -419,7 +419,7 @@ define @vfdiv_vv_nxv4f32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -443,7 +443,7 @@ define @vfdiv_vf_nxv4f32_unmasked( %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_nxv4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -469,7 +469,7 @@ define @vfdiv_vv_nxv8f32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -493,7 +493,7 @@ define @vfdiv_vf_nxv8f32_unmasked( %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_nxv8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -519,7 +519,7 @@ define @vfdiv_vv_nxv16f32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -543,7 +543,7 @@ define @vfdiv_vf_nxv16f32_unmasked( %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_nxv16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -569,7 +569,7 @@ define @vfdiv_vv_nxv1f64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv1f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -593,7 +593,7 @@ define @vfdiv_vf_nxv1f64_unmasked( %va, double %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_nxv1f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -619,7 +619,7 @@ define @vfdiv_vv_nxv2f64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -643,7 +643,7 @@ define @vfdiv_vf_nxv2f64_unmasked( %va, double %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -669,7 +669,7 @@ define @vfdiv_vv_nxv4f64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -693,7 +693,7 @@ define @vfdiv_vf_nxv4f64_unmasked( %va, double %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_nxv4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -731,7 +731,7 @@ define @vfdiv_vv_nxv8f64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vv_nxv8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -755,7 +755,7 @@ define @vfdiv_vf_nxv8f64_unmasked( %va, double %b, i32 zeroext %evl) { ; CHECK-LABEL: vfdiv_vf_nxv8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfdiv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv.ll @@ -12,7 +12,7 @@ define @intrinsic_vfdiv_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -59,7 +59,7 @@ define @intrinsic_vfdiv_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -106,7 +106,7 @@ define @intrinsic_vfdiv_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -153,7 +153,7 @@ define @intrinsic_vfdiv_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -200,7 +200,7 @@ define @intrinsic_vfdiv_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -247,7 +247,7 @@ define @intrinsic_vfdiv_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -295,7 +295,7 @@ define @intrinsic_vfdiv_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -342,7 +342,7 @@ define @intrinsic_vfdiv_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -389,7 +389,7 @@ define @intrinsic_vfdiv_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -436,7 +436,7 @@ define @intrinsic_vfdiv_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -483,7 +483,7 @@ define @intrinsic_vfdiv_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -531,7 +531,7 @@ define @intrinsic_vfdiv_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -578,7 +578,7 @@ define @intrinsic_vfdiv_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -625,7 +625,7 @@ define @intrinsic_vfdiv_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -672,7 +672,7 @@ define @intrinsic_vfdiv_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfdiv.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -720,7 +720,7 @@ define @intrinsic_vfdiv_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -767,7 +767,7 @@ define @intrinsic_vfdiv_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -814,7 +814,7 @@ define @intrinsic_vfdiv_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -861,7 +861,7 @@ define @intrinsic_vfdiv_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -908,7 +908,7 @@ define @intrinsic_vfdiv_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -955,7 +955,7 @@ define @intrinsic_vfdiv_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1002,7 +1002,7 @@ define @intrinsic_vfdiv_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1049,7 +1049,7 @@ define @intrinsic_vfdiv_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1096,7 +1096,7 @@ define @intrinsic_vfdiv_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1143,7 +1143,7 @@ define @intrinsic_vfdiv_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1190,7 +1190,7 @@ define @intrinsic_vfdiv_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1237,7 +1237,7 @@ define @intrinsic_vfdiv_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1284,7 +1284,7 @@ define @intrinsic_vfdiv_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1331,7 +1331,7 @@ define @intrinsic_vfdiv_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1378,7 +1378,7 @@ define @intrinsic_vfdiv_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfirst.ll b/llvm/test/CodeGen/RISCV/rvv/vfirst.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfirst.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfirst.ll @@ -10,7 +10,7 @@ define iXLen @intrinsic_vfirst_m_nxv1i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfirst_m_nxv1i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vfirst.m a0, v0 ; CHECK-NEXT: ret entry: @@ -43,7 +43,7 @@ ; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv1i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vfirst.m a0, v9, v0.t ; CHECK-NEXT: ret @@ -77,7 +77,7 @@ define iXLen @intrinsic_vfirst_m_nxv2i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfirst_m_nxv2i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vfirst.m a0, v0 ; CHECK-NEXT: ret entry: @@ -97,7 +97,7 @@ ; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv2i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vfirst.m a0, v9, v0.t ; CHECK-NEXT: ret @@ -117,7 +117,7 @@ define iXLen @intrinsic_vfirst_m_nxv4i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfirst_m_nxv4i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vfirst.m a0, v0 ; CHECK-NEXT: ret entry: @@ -137,7 +137,7 @@ ; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv4i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vfirst.m a0, v9, v0.t ; CHECK-NEXT: ret @@ -157,7 +157,7 @@ define iXLen @intrinsic_vfirst_m_nxv8i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfirst_m_nxv8i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vfirst.m a0, v0 ; CHECK-NEXT: ret entry: @@ -177,7 +177,7 @@ ; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv8i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vfirst.m a0, v9, v0.t ; CHECK-NEXT: ret @@ -197,7 +197,7 @@ define iXLen @intrinsic_vfirst_m_nxv16i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfirst_m_nxv16i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vfirst.m a0, v0 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ ; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv16i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vfirst.m a0, v9, v0.t ; CHECK-NEXT: ret @@ -237,7 +237,7 @@ define iXLen @intrinsic_vfirst_m_nxv32i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfirst_m_nxv32i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vfirst.m a0, v0 ; CHECK-NEXT: ret entry: @@ -257,7 +257,7 @@ ; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv32i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vfirst.m a0, v9, v0.t ; CHECK-NEXT: ret @@ -277,7 +277,7 @@ define iXLen @intrinsic_vfirst_m_nxv64i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfirst_m_nxv64i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vfirst.m a0, v0 ; CHECK-NEXT: ret entry: @@ -297,7 +297,7 @@ ; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv64i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vfirst.m a0, v9, v0.t ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll @@ -20,7 +20,7 @@ define @vfma_vv_nxv1f16_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv1f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -56,7 +56,7 @@ define @vfma_vf_nxv1f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv1f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -70,7 +70,7 @@ define @vfma_vf_nxv1f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv1f16_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -97,7 +97,7 @@ define @vfma_vv_nxv2f16_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -133,7 +133,7 @@ define @vfma_vf_nxv2f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -147,7 +147,7 @@ define @vfma_vf_nxv2f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv2f16_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -174,7 +174,7 @@ define @vfma_vv_nxv4f16_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -210,7 +210,7 @@ define @vfma_vf_nxv4f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -224,7 +224,7 @@ define @vfma_vf_nxv4f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv4f16_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -251,7 +251,7 @@ define @vfma_vv_nxv8f16_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v10, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -287,7 +287,7 @@ define @vfma_vf_nxv8f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -301,7 +301,7 @@ define @vfma_vf_nxv8f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv8f16_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -328,7 +328,7 @@ define @vfma_vv_nxv16f16_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v12, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -364,7 +364,7 @@ define @vfma_vf_nxv16f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -378,7 +378,7 @@ define @vfma_vf_nxv16f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv16f16_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -407,7 +407,7 @@ ; CHECK-LABEL: vfma_vv_nxv32f16_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v16, v24 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -443,7 +443,7 @@ define @vfma_vf_nxv32f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv32f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -457,7 +457,7 @@ define @vfma_vf_nxv32f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv32f16_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -484,7 +484,7 @@ define @vfma_vv_nxv1f32_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv1f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -520,7 +520,7 @@ define @vfma_vf_nxv1f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv1f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -534,7 +534,7 @@ define @vfma_vf_nxv1f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv1f32_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -561,7 +561,7 @@ define @vfma_vv_nxv2f32_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -597,7 +597,7 @@ define @vfma_vf_nxv2f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -611,7 +611,7 @@ define @vfma_vf_nxv2f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv2f32_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -638,7 +638,7 @@ define @vfma_vv_nxv4f32_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v10, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -674,7 +674,7 @@ define @vfma_vf_nxv4f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -688,7 +688,7 @@ define @vfma_vf_nxv4f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv4f32_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -715,7 +715,7 @@ define @vfma_vv_nxv8f32_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v12, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -751,7 +751,7 @@ define @vfma_vf_nxv8f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -765,7 +765,7 @@ define @vfma_vf_nxv8f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv8f32_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -794,7 +794,7 @@ ; CHECK-LABEL: vfma_vv_nxv16f32_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v16, v24 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -830,7 +830,7 @@ define @vfma_vf_nxv16f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -844,7 +844,7 @@ define @vfma_vf_nxv16f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv16f32_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -871,7 +871,7 @@ define @vfma_vv_nxv1f64_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv1f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -907,7 +907,7 @@ define @vfma_vf_nxv1f64_unmasked( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv1f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -921,7 +921,7 @@ define @vfma_vf_nxv1f64_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv1f64_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -948,7 +948,7 @@ define @vfma_vv_nxv2f64_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v10, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -984,7 +984,7 @@ define @vfma_vf_nxv2f64_unmasked( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -998,7 +998,7 @@ define @vfma_vf_nxv2f64_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv2f64_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -1025,7 +1025,7 @@ define @vfma_vv_nxv4f64_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v12, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1061,7 +1061,7 @@ define @vfma_vf_nxv4f64_unmasked( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -1075,7 +1075,7 @@ define @vfma_vf_nxv4f64_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv4f64_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -1104,7 +1104,7 @@ ; CHECK-LABEL: vfma_vv_nxv7f64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v16, v24 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1131,7 +1131,7 @@ ; CHECK-LABEL: vfma_vv_nxv8f64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v16, v24 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1167,7 +1167,7 @@ define @vfma_vf_nxv8f64_unmasked( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -1181,7 +1181,7 @@ define @vfma_vf_nxv8f64_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv8f64_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -1217,7 +1217,7 @@ ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: li a3, 0 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: vsetvli a5, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a5, zero, e8, mf4, ta, ma ; CHECK-NEXT: slli a5, a1, 3 ; CHECK-NEXT: add a6, a2, a5 ; CHECK-NEXT: vl8re64.v v8, (a6) @@ -1345,7 +1345,7 @@ ; CHECK-NEXT: addi a2, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill ; CHECK-NEXT: vl8re64.v v0, (a0) -; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 @@ -1356,7 +1356,7 @@ ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a4, a1 ; CHECK-NEXT: .LBB93_4: -; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add a0, sp, a0 @@ -1396,7 +1396,7 @@ define @vfmsub_vv_nxv1f16_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv1f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfmsub.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1435,7 +1435,7 @@ define @vfmsub_vf_nxv1f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv1f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -1450,7 +1450,7 @@ define @vfmsub_vf_nxv1f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv1f16_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -1490,7 +1490,7 @@ define @vfnmadd_vv_nxv1f16_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vv_nxv1f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1504,7 +1504,7 @@ define @vfnmadd_vv_nxv1f16_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vv_nxv1f16_unmasked_commuted: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1546,7 +1546,7 @@ define @vfnmadd_vf_nxv1f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv1f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -1562,7 +1562,7 @@ define @vfnmadd_vf_nxv1f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv1f16_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -1606,7 +1606,7 @@ define @vfnmadd_vf_nxv1f16_neg_splat_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv1f16_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -1622,7 +1622,7 @@ define @vfnmadd_vf_nxv1f16_neg_splat_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv1f16_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -1663,7 +1663,7 @@ define @vfnmsub_vv_nxv1f16_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vv_nxv1f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1677,7 +1677,7 @@ define @vfnmsub_vv_nxv1f16_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vv_nxv1f16_unmasked_commuted: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1717,7 +1717,7 @@ define @vfnmsub_vf_nxv1f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv1f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -1732,7 +1732,7 @@ define @vfnmsub_vf_nxv1f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv1f16_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -1773,7 +1773,7 @@ define @vfnmsub_vf_nxv1f16_neg_splat_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv1f16_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -1788,7 +1788,7 @@ define @vfnmsub_vf_nxv1f16_neg_splat_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv1f16_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -1817,7 +1817,7 @@ define @vfmsub_vv_nxv2f16_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfmsub.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1856,7 +1856,7 @@ define @vfmsub_vf_nxv2f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -1871,7 +1871,7 @@ define @vfmsub_vf_nxv2f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv2f16_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -1911,7 +1911,7 @@ define @vfnmadd_vv_nxv2f16_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vv_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1925,7 +1925,7 @@ define @vfnmadd_vv_nxv2f16_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vv_nxv2f16_unmasked_commuted: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1967,7 +1967,7 @@ define @vfnmadd_vf_nxv2f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -1983,7 +1983,7 @@ define @vfnmadd_vf_nxv2f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv2f16_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -2027,7 +2027,7 @@ define @vfnmadd_vf_nxv2f16_neg_splat_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv2f16_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -2043,7 +2043,7 @@ define @vfnmadd_vf_nxv2f16_neg_splat_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv2f16_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -2084,7 +2084,7 @@ define @vfnmsub_vv_nxv2f16_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vv_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -2098,7 +2098,7 @@ define @vfnmsub_vv_nxv2f16_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vv_nxv2f16_unmasked_commuted: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -2138,7 +2138,7 @@ define @vfnmsub_vf_nxv2f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -2153,7 +2153,7 @@ define @vfnmsub_vf_nxv2f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv2f16_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -2194,7 +2194,7 @@ define @vfnmsub_vf_nxv2f16_neg_splat_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv2f16_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -2209,7 +2209,7 @@ define @vfnmsub_vf_nxv2f16_neg_splat_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv2f16_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -2238,7 +2238,7 @@ define @vfmsub_vv_nxv4f16_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfmsub.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -2277,7 +2277,7 @@ define @vfmsub_vf_nxv4f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -2292,7 +2292,7 @@ define @vfmsub_vf_nxv4f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv4f16_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -2332,7 +2332,7 @@ define @vfnmadd_vv_nxv4f16_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vv_nxv4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -2346,7 +2346,7 @@ define @vfnmadd_vv_nxv4f16_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vv_nxv4f16_unmasked_commuted: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -2388,7 +2388,7 @@ define @vfnmadd_vf_nxv4f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -2404,7 +2404,7 @@ define @vfnmadd_vf_nxv4f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv4f16_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -2448,7 +2448,7 @@ define @vfnmadd_vf_nxv4f16_neg_splat_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv4f16_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -2464,7 +2464,7 @@ define @vfnmadd_vf_nxv4f16_neg_splat_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv4f16_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -2505,7 +2505,7 @@ define @vfnmsub_vv_nxv4f16_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vv_nxv4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -2519,7 +2519,7 @@ define @vfnmsub_vv_nxv4f16_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vv_nxv4f16_unmasked_commuted: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -2559,7 +2559,7 @@ define @vfnmsub_vf_nxv4f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -2574,7 +2574,7 @@ define @vfnmsub_vf_nxv4f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv4f16_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -2615,7 +2615,7 @@ define @vfnmsub_vf_nxv4f16_neg_splat_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv4f16_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -2630,7 +2630,7 @@ define @vfnmsub_vf_nxv4f16_neg_splat_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv4f16_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -2659,7 +2659,7 @@ define @vfmsub_vv_nxv8f16_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfmsub.vv v8, v10, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -2698,7 +2698,7 @@ define @vfmsub_vf_nxv8f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -2713,7 +2713,7 @@ define @vfmsub_vf_nxv8f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv8f16_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -2753,7 +2753,7 @@ define @vfnmadd_vv_nxv8f16_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vv_nxv8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v10, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -2767,7 +2767,7 @@ define @vfnmadd_vv_nxv8f16_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vv_nxv8f16_unmasked_commuted: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v10, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -2809,7 +2809,7 @@ define @vfnmadd_vf_nxv8f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -2825,7 +2825,7 @@ define @vfnmadd_vf_nxv8f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv8f16_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -2869,7 +2869,7 @@ define @vfnmadd_vf_nxv8f16_neg_splat_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv8f16_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -2885,7 +2885,7 @@ define @vfnmadd_vf_nxv8f16_neg_splat_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv8f16_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -2926,7 +2926,7 @@ define @vfnmsub_vv_nxv8f16_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vv_nxv8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v10, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -2940,7 +2940,7 @@ define @vfnmsub_vv_nxv8f16_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vv_nxv8f16_unmasked_commuted: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v10, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -2980,7 +2980,7 @@ define @vfnmsub_vf_nxv8f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -2995,7 +2995,7 @@ define @vfnmsub_vf_nxv8f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv8f16_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -3036,7 +3036,7 @@ define @vfnmsub_vf_nxv8f16_neg_splat_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv8f16_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -3051,7 +3051,7 @@ define @vfnmsub_vf_nxv8f16_neg_splat_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv8f16_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -3080,7 +3080,7 @@ define @vfmsub_vv_nxv16f16_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfmsub.vv v8, v12, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -3119,7 +3119,7 @@ define @vfmsub_vf_nxv16f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -3134,7 +3134,7 @@ define @vfmsub_vf_nxv16f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv16f16_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -3174,7 +3174,7 @@ define @vfnmadd_vv_nxv16f16_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vv_nxv16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v12, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -3188,7 +3188,7 @@ define @vfnmadd_vv_nxv16f16_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vv_nxv16f16_unmasked_commuted: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v12, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -3230,7 +3230,7 @@ define @vfnmadd_vf_nxv16f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -3246,7 +3246,7 @@ define @vfnmadd_vf_nxv16f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv16f16_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -3290,7 +3290,7 @@ define @vfnmadd_vf_nxv16f16_neg_splat_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv16f16_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -3306,7 +3306,7 @@ define @vfnmadd_vf_nxv16f16_neg_splat_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv16f16_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -3347,7 +3347,7 @@ define @vfnmsub_vv_nxv16f16_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vv_nxv16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v12, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -3361,7 +3361,7 @@ define @vfnmsub_vv_nxv16f16_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vv_nxv16f16_unmasked_commuted: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v12, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -3401,7 +3401,7 @@ define @vfnmsub_vf_nxv16f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -3416,7 +3416,7 @@ define @vfnmsub_vf_nxv16f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv16f16_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -3457,7 +3457,7 @@ define @vfnmsub_vf_nxv16f16_neg_splat_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv16f16_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -3472,7 +3472,7 @@ define @vfnmsub_vf_nxv16f16_neg_splat_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv16f16_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -3503,7 +3503,7 @@ ; CHECK-LABEL: vfmsub_vv_nxv32f16_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vfmsub.vv v8, v16, v24 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -3542,7 +3542,7 @@ define @vfmsub_vf_nxv32f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv32f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -3557,7 +3557,7 @@ define @vfmsub_vf_nxv32f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv32f16_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -3600,7 +3600,7 @@ ; CHECK-LABEL: vfnmadd_vv_nxv32f16_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v16, v24 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -3615,7 +3615,7 @@ ; CHECK-LABEL: vfnmadd_vv_nxv32f16_unmasked_commuted: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v16, v24 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -3657,7 +3657,7 @@ define @vfnmadd_vf_nxv32f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv32f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -3673,7 +3673,7 @@ define @vfnmadd_vf_nxv32f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv32f16_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -3717,7 +3717,7 @@ define @vfnmadd_vf_nxv32f16_neg_splat_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv32f16_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -3733,7 +3733,7 @@ define @vfnmadd_vf_nxv32f16_neg_splat_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv32f16_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -3777,7 +3777,7 @@ ; CHECK-LABEL: vfnmsub_vv_nxv32f16_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v16, v24 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -3792,7 +3792,7 @@ ; CHECK-LABEL: vfnmsub_vv_nxv32f16_unmasked_commuted: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v16, v24 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -3832,7 +3832,7 @@ define @vfnmsub_vf_nxv32f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv32f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -3847,7 +3847,7 @@ define @vfnmsub_vf_nxv32f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv32f16_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -3888,7 +3888,7 @@ define @vfnmsub_vf_nxv32f16_neg_splat_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv32f16_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -3903,7 +3903,7 @@ define @vfnmsub_vf_nxv32f16_neg_splat_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv32f16_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -3932,7 +3932,7 @@ define @vfmsub_vv_nxv1f32_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv1f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfmsub.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -3971,7 +3971,7 @@ define @vfmsub_vf_nxv1f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv1f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -3986,7 +3986,7 @@ define @vfmsub_vf_nxv1f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv1f32_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -4026,7 +4026,7 @@ define @vfnmadd_vv_nxv1f32_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vv_nxv1f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -4040,7 +4040,7 @@ define @vfnmadd_vv_nxv1f32_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vv_nxv1f32_unmasked_commuted: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -4082,7 +4082,7 @@ define @vfnmadd_vf_nxv1f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv1f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -4098,7 +4098,7 @@ define @vfnmadd_vf_nxv1f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv1f32_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -4142,7 +4142,7 @@ define @vfnmadd_vf_nxv1f32_neg_splat_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv1f32_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -4158,7 +4158,7 @@ define @vfnmadd_vf_nxv1f32_neg_splat_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv1f32_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -4199,7 +4199,7 @@ define @vfnmsub_vv_nxv1f32_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vv_nxv1f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -4213,7 +4213,7 @@ define @vfnmsub_vv_nxv1f32_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vv_nxv1f32_unmasked_commuted: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -4253,7 +4253,7 @@ define @vfnmsub_vf_nxv1f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv1f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -4268,7 +4268,7 @@ define @vfnmsub_vf_nxv1f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv1f32_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -4309,7 +4309,7 @@ define @vfnmsub_vf_nxv1f32_neg_splat_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv1f32_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -4324,7 +4324,7 @@ define @vfnmsub_vf_nxv1f32_neg_splat_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv1f32_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -4353,7 +4353,7 @@ define @vfmsub_vv_nxv2f32_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfmsub.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -4392,7 +4392,7 @@ define @vfmsub_vf_nxv2f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -4407,7 +4407,7 @@ define @vfmsub_vf_nxv2f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv2f32_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -4447,7 +4447,7 @@ define @vfnmadd_vv_nxv2f32_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vv_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -4461,7 +4461,7 @@ define @vfnmadd_vv_nxv2f32_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vv_nxv2f32_unmasked_commuted: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -4503,7 +4503,7 @@ define @vfnmadd_vf_nxv2f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -4519,7 +4519,7 @@ define @vfnmadd_vf_nxv2f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv2f32_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -4563,7 +4563,7 @@ define @vfnmadd_vf_nxv2f32_neg_splat_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv2f32_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -4579,7 +4579,7 @@ define @vfnmadd_vf_nxv2f32_neg_splat_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv2f32_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -4620,7 +4620,7 @@ define @vfnmsub_vv_nxv2f32_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vv_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -4634,7 +4634,7 @@ define @vfnmsub_vv_nxv2f32_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vv_nxv2f32_unmasked_commuted: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -4674,7 +4674,7 @@ define @vfnmsub_vf_nxv2f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -4689,7 +4689,7 @@ define @vfnmsub_vf_nxv2f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv2f32_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -4730,7 +4730,7 @@ define @vfnmsub_vf_nxv2f32_neg_splat_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv2f32_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -4745,7 +4745,7 @@ define @vfnmsub_vf_nxv2f32_neg_splat_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv2f32_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -4774,7 +4774,7 @@ define @vfmsub_vv_nxv4f32_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfmsub.vv v8, v10, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -4813,7 +4813,7 @@ define @vfmsub_vf_nxv4f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -4828,7 +4828,7 @@ define @vfmsub_vf_nxv4f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv4f32_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -4868,7 +4868,7 @@ define @vfnmadd_vv_nxv4f32_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vv_nxv4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v10, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -4882,7 +4882,7 @@ define @vfnmadd_vv_nxv4f32_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vv_nxv4f32_unmasked_commuted: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v10, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -4924,7 +4924,7 @@ define @vfnmadd_vf_nxv4f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -4940,7 +4940,7 @@ define @vfnmadd_vf_nxv4f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv4f32_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -4984,7 +4984,7 @@ define @vfnmadd_vf_nxv4f32_neg_splat_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv4f32_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -5000,7 +5000,7 @@ define @vfnmadd_vf_nxv4f32_neg_splat_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv4f32_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -5041,7 +5041,7 @@ define @vfnmsub_vv_nxv4f32_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vv_nxv4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v10, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -5055,7 +5055,7 @@ define @vfnmsub_vv_nxv4f32_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vv_nxv4f32_unmasked_commuted: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v10, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -5095,7 +5095,7 @@ define @vfnmsub_vf_nxv4f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -5110,7 +5110,7 @@ define @vfnmsub_vf_nxv4f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv4f32_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -5151,7 +5151,7 @@ define @vfnmsub_vf_nxv4f32_neg_splat_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv4f32_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -5166,7 +5166,7 @@ define @vfnmsub_vf_nxv4f32_neg_splat_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv4f32_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -5195,7 +5195,7 @@ define @vfmsub_vv_nxv8f32_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfmsub.vv v8, v12, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -5234,7 +5234,7 @@ define @vfmsub_vf_nxv8f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -5249,7 +5249,7 @@ define @vfmsub_vf_nxv8f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv8f32_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -5289,7 +5289,7 @@ define @vfnmadd_vv_nxv8f32_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vv_nxv8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v12, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -5303,7 +5303,7 @@ define @vfnmadd_vv_nxv8f32_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vv_nxv8f32_unmasked_commuted: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v12, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -5345,7 +5345,7 @@ define @vfnmadd_vf_nxv8f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -5361,7 +5361,7 @@ define @vfnmadd_vf_nxv8f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv8f32_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -5405,7 +5405,7 @@ define @vfnmadd_vf_nxv8f32_neg_splat_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv8f32_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -5421,7 +5421,7 @@ define @vfnmadd_vf_nxv8f32_neg_splat_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv8f32_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -5462,7 +5462,7 @@ define @vfnmsub_vv_nxv8f32_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vv_nxv8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v12, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -5476,7 +5476,7 @@ define @vfnmsub_vv_nxv8f32_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vv_nxv8f32_unmasked_commuted: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v12, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -5516,7 +5516,7 @@ define @vfnmsub_vf_nxv8f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -5531,7 +5531,7 @@ define @vfnmsub_vf_nxv8f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv8f32_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -5572,7 +5572,7 @@ define @vfnmsub_vf_nxv8f32_neg_splat_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv8f32_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -5587,7 +5587,7 @@ define @vfnmsub_vf_nxv8f32_neg_splat_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv8f32_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -5618,7 +5618,7 @@ ; CHECK-LABEL: vfmsub_vv_nxv16f32_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vfmsub.vv v8, v16, v24 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -5657,7 +5657,7 @@ define @vfmsub_vf_nxv16f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -5672,7 +5672,7 @@ define @vfmsub_vf_nxv16f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv16f32_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -5715,7 +5715,7 @@ ; CHECK-LABEL: vfnmadd_vv_nxv16f32_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v16, v24 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -5730,7 +5730,7 @@ ; CHECK-LABEL: vfnmadd_vv_nxv16f32_unmasked_commuted: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v16, v24 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -5772,7 +5772,7 @@ define @vfnmadd_vf_nxv16f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -5788,7 +5788,7 @@ define @vfnmadd_vf_nxv16f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv16f32_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -5832,7 +5832,7 @@ define @vfnmadd_vf_nxv16f32_neg_splat_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv16f32_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -5848,7 +5848,7 @@ define @vfnmadd_vf_nxv16f32_neg_splat_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv16f32_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -5892,7 +5892,7 @@ ; CHECK-LABEL: vfnmsub_vv_nxv16f32_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v16, v24 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -5907,7 +5907,7 @@ ; CHECK-LABEL: vfnmsub_vv_nxv16f32_unmasked_commuted: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v16, v24 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -5947,7 +5947,7 @@ define @vfnmsub_vf_nxv16f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -5962,7 +5962,7 @@ define @vfnmsub_vf_nxv16f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv16f32_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -6003,7 +6003,7 @@ define @vfnmsub_vf_nxv16f32_neg_splat_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv16f32_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -6018,7 +6018,7 @@ define @vfnmsub_vf_nxv16f32_neg_splat_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv16f32_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -6047,7 +6047,7 @@ define @vfmsub_vv_nxv1f64_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv1f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfmsub.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -6086,7 +6086,7 @@ define @vfmsub_vf_nxv1f64_unmasked( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv1f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -6101,7 +6101,7 @@ define @vfmsub_vf_nxv1f64_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv1f64_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -6141,7 +6141,7 @@ define @vfnmadd_vv_nxv1f64_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vv_nxv1f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -6155,7 +6155,7 @@ define @vfnmadd_vv_nxv1f64_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vv_nxv1f64_unmasked_commuted: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -6197,7 +6197,7 @@ define @vfnmadd_vf_nxv1f64_unmasked( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv1f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -6213,7 +6213,7 @@ define @vfnmadd_vf_nxv1f64_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv1f64_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -6257,7 +6257,7 @@ define @vfnmadd_vf_nxv1f64_neg_splat_unmasked( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv1f64_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -6273,7 +6273,7 @@ define @vfnmadd_vf_nxv1f64_neg_splat_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv1f64_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -6314,7 +6314,7 @@ define @vfnmsub_vv_nxv1f64_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vv_nxv1f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -6328,7 +6328,7 @@ define @vfnmsub_vv_nxv1f64_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vv_nxv1f64_unmasked_commuted: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -6368,7 +6368,7 @@ define @vfnmsub_vf_nxv1f64_unmasked( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv1f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -6383,7 +6383,7 @@ define @vfnmsub_vf_nxv1f64_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv1f64_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -6424,7 +6424,7 @@ define @vfnmsub_vf_nxv1f64_neg_splat_unmasked( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv1f64_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -6439,7 +6439,7 @@ define @vfnmsub_vf_nxv1f64_neg_splat_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv1f64_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -6468,7 +6468,7 @@ define @vfmsub_vv_nxv2f64_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfmsub.vv v8, v10, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -6507,7 +6507,7 @@ define @vfmsub_vf_nxv2f64_unmasked( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -6522,7 +6522,7 @@ define @vfmsub_vf_nxv2f64_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv2f64_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -6562,7 +6562,7 @@ define @vfnmadd_vv_nxv2f64_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vv_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v10, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -6576,7 +6576,7 @@ define @vfnmadd_vv_nxv2f64_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vv_nxv2f64_unmasked_commuted: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v10, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -6618,7 +6618,7 @@ define @vfnmadd_vf_nxv2f64_unmasked( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -6634,7 +6634,7 @@ define @vfnmadd_vf_nxv2f64_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv2f64_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -6678,7 +6678,7 @@ define @vfnmadd_vf_nxv2f64_neg_splat_unmasked( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv2f64_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -6694,7 +6694,7 @@ define @vfnmadd_vf_nxv2f64_neg_splat_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv2f64_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -6735,7 +6735,7 @@ define @vfnmsub_vv_nxv2f64_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vv_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v10, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -6749,7 +6749,7 @@ define @vfnmsub_vv_nxv2f64_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vv_nxv2f64_unmasked_commuted: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v10, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -6789,7 +6789,7 @@ define @vfnmsub_vf_nxv2f64_unmasked( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -6804,7 +6804,7 @@ define @vfnmsub_vf_nxv2f64_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv2f64_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -6845,7 +6845,7 @@ define @vfnmsub_vf_nxv2f64_neg_splat_unmasked( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv2f64_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -6860,7 +6860,7 @@ define @vfnmsub_vf_nxv2f64_neg_splat_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv2f64_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -6889,7 +6889,7 @@ define @vfmsub_vv_nxv4f64_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfmsub.vv v8, v12, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -6928,7 +6928,7 @@ define @vfmsub_vf_nxv4f64_unmasked( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -6943,7 +6943,7 @@ define @vfmsub_vf_nxv4f64_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv4f64_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -6983,7 +6983,7 @@ define @vfnmadd_vv_nxv4f64_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vv_nxv4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v12, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -6997,7 +6997,7 @@ define @vfnmadd_vv_nxv4f64_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vv_nxv4f64_unmasked_commuted: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v12, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -7039,7 +7039,7 @@ define @vfnmadd_vf_nxv4f64_unmasked( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -7055,7 +7055,7 @@ define @vfnmadd_vf_nxv4f64_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv4f64_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -7099,7 +7099,7 @@ define @vfnmadd_vf_nxv4f64_neg_splat_unmasked( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv4f64_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -7115,7 +7115,7 @@ define @vfnmadd_vf_nxv4f64_neg_splat_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv4f64_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -7156,7 +7156,7 @@ define @vfnmsub_vv_nxv4f64_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vv_nxv4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v12, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -7170,7 +7170,7 @@ define @vfnmsub_vv_nxv4f64_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vv_nxv4f64_unmasked_commuted: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v12, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -7210,7 +7210,7 @@ define @vfnmsub_vf_nxv4f64_unmasked( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -7225,7 +7225,7 @@ define @vfnmsub_vf_nxv4f64_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv4f64_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -7266,7 +7266,7 @@ define @vfnmsub_vf_nxv4f64_neg_splat_unmasked( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv4f64_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -7281,7 +7281,7 @@ define @vfnmsub_vf_nxv4f64_neg_splat_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv4f64_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -7312,7 +7312,7 @@ ; CHECK-LABEL: vfmsub_vv_nxv8f64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vfmsub.vv v8, v16, v24 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -7351,7 +7351,7 @@ define @vfmsub_vf_nxv8f64_unmasked( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -7366,7 +7366,7 @@ define @vfmsub_vf_nxv8f64_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv8f64_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -7409,7 +7409,7 @@ ; CHECK-LABEL: vfnmadd_vv_nxv8f64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v16, v24 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -7424,7 +7424,7 @@ ; CHECK-LABEL: vfnmadd_vv_nxv8f64_unmasked_commuted: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v16, v24 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -7466,7 +7466,7 @@ define @vfnmadd_vf_nxv8f64_unmasked( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -7482,7 +7482,7 @@ define @vfnmadd_vf_nxv8f64_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv8f64_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -7526,7 +7526,7 @@ define @vfnmadd_vf_nxv8f64_neg_splat_unmasked( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv8f64_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -7542,7 +7542,7 @@ define @vfnmadd_vf_nxv8f64_neg_splat_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv8f64_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -7586,7 +7586,7 @@ ; CHECK-LABEL: vfnmsub_vv_nxv8f64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v16, v24 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -7601,7 +7601,7 @@ ; CHECK-LABEL: vfnmsub_vv_nxv8f64_unmasked_commuted: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v16, v24 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -7641,7 +7641,7 @@ define @vfnmsub_vf_nxv8f64_unmasked( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -7656,7 +7656,7 @@ define @vfnmsub_vf_nxv8f64_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv8f64_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -7697,7 +7697,7 @@ define @vfnmsub_vf_nxv8f64_neg_splat_unmasked( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv8f64_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -7712,7 +7712,7 @@ define @vfnmsub_vf_nxv8f64_neg_splat_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv8f64_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmacc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmacc-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmacc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmacc-vp.ll @@ -26,7 +26,7 @@ define @vfmacc_vv_nxv1f16_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv1f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfmacc.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -72,7 +72,7 @@ define @vfmacc_vf_nxv1f16_unmasked( %va, half %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vf_nxv1f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfmacc.vf v9, fa0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -153,7 +153,7 @@ define @vfmacc_vv_nxv2f16_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfmacc.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -199,7 +199,7 @@ define @vfmacc_vf_nxv2f16_unmasked( %va, half %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vf_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfmacc.vf v9, fa0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -280,7 +280,7 @@ define @vfmacc_vv_nxv4f16_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfmacc.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -326,7 +326,7 @@ define @vfmacc_vf_nxv4f16_unmasked( %va, half %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vf_nxv4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfmacc.vf v9, fa0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -407,7 +407,7 @@ define @vfmacc_vv_nxv8f16_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfmacc.vv v12, v8, v10 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -453,7 +453,7 @@ define @vfmacc_vf_nxv8f16_unmasked( %va, half %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vf_nxv8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfmacc.vf v10, fa0, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -534,7 +534,7 @@ define @vfmacc_vv_nxv16f16_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfmacc.vv v16, v8, v12 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -580,7 +580,7 @@ define @vfmacc_vf_nxv16f16_unmasked( %va, half %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vf_nxv16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfmacc.vf v12, fa0, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -663,7 +663,7 @@ ; CHECK-LABEL: vfmacc_vv_nxv32f16_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, ma ; CHECK-NEXT: vfmacc.vv v24, v8, v16 ; CHECK-NEXT: vmv8r.v v8, v24 ; CHECK-NEXT: ret @@ -709,7 +709,7 @@ define @vfmacc_vf_nxv32f16_unmasked( %va, half %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vf_nxv32f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vfmacc.vf v16, fa0, v8 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -791,7 +791,7 @@ define @vfmacc_vv_nxv1f32_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv1f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfmacc.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -837,7 +837,7 @@ define @vfmacc_vf_nxv1f32_unmasked( %va, float %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vf_nxv1f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfmacc.vf v9, fa0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -918,7 +918,7 @@ define @vfmacc_vv_nxv2f32_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfmacc.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -964,7 +964,7 @@ define @vfmacc_vf_nxv2f32_unmasked( %va, float %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vf_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfmacc.vf v9, fa0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1045,7 +1045,7 @@ define @vfmacc_vv_nxv4f32_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfmacc.vv v12, v8, v10 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -1091,7 +1091,7 @@ define @vfmacc_vf_nxv4f32_unmasked( %va, float %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vf_nxv4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfmacc.vf v10, fa0, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -1172,7 +1172,7 @@ define @vfmacc_vv_nxv8f32_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfmacc.vv v16, v8, v12 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -1218,7 +1218,7 @@ define @vfmacc_vf_nxv8f32_unmasked( %va, float %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vf_nxv8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfmacc.vf v12, fa0, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -1301,7 +1301,7 @@ ; CHECK-LABEL: vfmacc_vv_nxv16f32_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma ; CHECK-NEXT: vfmacc.vv v24, v8, v16 ; CHECK-NEXT: vmv8r.v v8, v24 ; CHECK-NEXT: ret @@ -1347,7 +1347,7 @@ define @vfmacc_vf_nxv16f32_unmasked( %va, float %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vf_nxv16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vfmacc.vf v16, fa0, v8 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -1429,7 +1429,7 @@ define @vfmacc_vv_nxv1f64_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv1f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vfmacc.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1475,7 +1475,7 @@ define @vfmacc_vf_nxv1f64_unmasked( %va, double %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vf_nxv1f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vfmacc.vf v9, fa0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1556,7 +1556,7 @@ define @vfmacc_vv_nxv2f64_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vfmacc.vv v12, v8, v10 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -1602,7 +1602,7 @@ define @vfmacc_vf_nxv2f64_unmasked( %va, double %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vf_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vfmacc.vf v10, fa0, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -1683,7 +1683,7 @@ define @vfmacc_vv_nxv4f64_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vv_nxv4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vfmacc.vv v16, v8, v12 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -1729,7 +1729,7 @@ define @vfmacc_vf_nxv4f64_unmasked( %va, double %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vf_nxv4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vfmacc.vf v12, fa0, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -1812,7 +1812,7 @@ ; CHECK-LABEL: vfmacc_vv_nxv8f64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, ma ; CHECK-NEXT: vfmacc.vv v24, v8, v16 ; CHECK-NEXT: vmv8r.v v8, v24 ; CHECK-NEXT: ret @@ -1858,7 +1858,7 @@ define @vfmacc_vf_nxv8f64_unmasked( %va, double %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmacc_vf_nxv8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vfmacc.vf v16, fa0, v8 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmacc.ll b/llvm/test/CodeGen/RISCV/rvv/vfmacc.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmacc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmacc.ll @@ -13,7 +13,7 @@ define @intrinsic_vfmacc_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -60,7 +60,7 @@ define @intrinsic_vfmacc_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -107,7 +107,7 @@ define @intrinsic_vfmacc_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -154,7 +154,7 @@ define @intrinsic_vfmacc_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfmacc.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -201,7 +201,7 @@ define @intrinsic_vfmacc_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfmacc.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -248,7 +248,7 @@ define @intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -295,7 +295,7 @@ define @intrinsic_vfmacc_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -342,7 +342,7 @@ define @intrinsic_vfmacc_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfmacc.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -389,7 +389,7 @@ define @intrinsic_vfmacc_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfmacc.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -436,7 +436,7 @@ define @intrinsic_vfmacc_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vfmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -483,7 +483,7 @@ define @intrinsic_vfmacc_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vfmacc.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -530,7 +530,7 @@ define @intrinsic_vfmacc_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vfmacc.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -577,7 +577,7 @@ define @intrinsic_vfmacc_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -624,7 +624,7 @@ define @intrinsic_vfmacc_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -671,7 +671,7 @@ define @intrinsic_vfmacc_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -718,7 +718,7 @@ define @intrinsic_vfmacc_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfmacc.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: @@ -765,7 +765,7 @@ define @intrinsic_vfmacc_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfmacc.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: @@ -812,7 +812,7 @@ define @intrinsic_vfmacc_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vfmacc_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vfmacc_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfmacc.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vfmacc_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfmacc.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vfmacc_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vfmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -1047,7 +1047,7 @@ define @intrinsic_vfmacc_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vfmacc.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: @@ -1094,7 +1094,7 @@ define @intrinsic_vfmacc_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vfmacc.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll @@ -12,7 +12,7 @@ define @vfmadd_vv_nxv1f16( %va, %vb, %vc) { ; CHECK-LABEL: vfmadd_vv_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %vd = call @llvm.fma.v1f16( %va, %vb, %vc) @@ -22,7 +22,7 @@ define @vfmadd_vf_nxv1f16( %va, %vb, half %c) { ; CHECK-LABEL: vfmadd_vf_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -36,7 +36,7 @@ define @vfmadd_vv_nxv2f16( %va, %vb, %vc) { ; CHECK-LABEL: vfmadd_vv_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v10, v9 ; CHECK-NEXT: ret %vd = call @llvm.fma.v2f16( %va, %vc, %vb) @@ -46,7 +46,7 @@ define @vfmadd_vf_nxv2f16( %va, %vb, half %c) { ; CHECK-LABEL: vfmadd_vf_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -60,7 +60,7 @@ define @vfmadd_vv_nxv4f16( %va, %vb, %vc) { ; CHECK-LABEL: vfmadd_vv_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %vd = call @llvm.fma.v4f16( %vb, %va, %vc) @@ -70,7 +70,7 @@ define @vfmadd_vf_nxv4f16( %va, %vb, half %c) { ; CHECK-LABEL: vfmadd_vf_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -84,7 +84,7 @@ define @vfmadd_vv_nxv8f16( %va, %vb, %vc) { ; CHECK-LABEL: vfmadd_vv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfmacc.vv v8, v12, v10 ; CHECK-NEXT: ret %vd = call @llvm.fma.v8f16( %vb, %vc, %va) @@ -94,7 +94,7 @@ define @vfmadd_vf_nxv8f16( %va, %vb, half %c) { ; CHECK-LABEL: vfmadd_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfmacc.vf v8, fa0, v10 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -108,7 +108,7 @@ define @vfmadd_vv_nxv16f16( %va, %vb, %vc) { ; CHECK-LABEL: vfmadd_vv_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v16, v12 ; CHECK-NEXT: ret %vd = call @llvm.fma.v16f16( %vc, %va, %vb) @@ -118,7 +118,7 @@ define @vfmadd_vf_nxv16f16( %va, %vb, half %c) { ; CHECK-LABEL: vfmadd_vf_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -133,7 +133,7 @@ ; CHECK-LABEL: vfmadd_vv_nxv32f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vfmacc.vv v8, v16, v24 ; CHECK-NEXT: ret %vd = call @llvm.fma.v32f16( %vc, %vb, %va) @@ -143,7 +143,7 @@ define @vfmadd_vf_nxv32f16( %va, %vb, half %c) { ; CHECK-LABEL: vfmadd_vf_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vfmacc.vf v8, fa0, v16 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -157,7 +157,7 @@ define @vfmadd_vv_nxv1f32( %va, %vb, %vc) { ; CHECK-LABEL: vfmadd_vv_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %vd = call @llvm.fma.v1f32( %va, %vb, %vc) @@ -167,7 +167,7 @@ define @vfmadd_vf_nxv1f32( %va, %vb, float %c) { ; CHECK-LABEL: vfmadd_vf_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement poison, float %c, i32 0 @@ -181,7 +181,7 @@ define @vfmadd_vv_nxv2f32( %va, %vb, %vc) { ; CHECK-LABEL: vfmadd_vv_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v10, v9 ; CHECK-NEXT: ret %vd = call @llvm.fma.v2f32( %va, %vc, %vb) @@ -191,7 +191,7 @@ define @vfmadd_vf_nxv2f32( %va, %vb, float %c) { ; CHECK-LABEL: vfmadd_vf_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement poison, float %c, i32 0 @@ -205,7 +205,7 @@ define @vfmadd_vv_nxv4f32( %va, %vb, %vc) { ; CHECK-LABEL: vfmadd_vv_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v10, v12 ; CHECK-NEXT: ret %vd = call @llvm.fma.v4f32( %vb, %va, %vc) @@ -215,7 +215,7 @@ define @vfmadd_vf_nxv4f32( %va, %vb, float %c) { ; CHECK-LABEL: vfmadd_vf_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret %head = insertelement poison, float %c, i32 0 @@ -229,7 +229,7 @@ define @vfmadd_vv_nxv8f32( %va, %vb, %vc) { ; CHECK-LABEL: vfmadd_vv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfmacc.vv v8, v16, v12 ; CHECK-NEXT: ret %vd = call @llvm.fma.v8f32( %vb, %vc, %va) @@ -239,7 +239,7 @@ define @vfmadd_vf_nxv8f32( %va, %vb, float %c) { ; CHECK-LABEL: vfmadd_vf_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfmacc.vf v8, fa0, v12 ; CHECK-NEXT: ret %head = insertelement poison, float %c, i32 0 @@ -254,7 +254,7 @@ ; CHECK-LABEL: vfmadd_vv_nxv16f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v24, v16 ; CHECK-NEXT: ret %vd = call @llvm.fma.v16f32( %vc, %va, %vb) @@ -264,7 +264,7 @@ define @vfmadd_vf_nxv16f32( %va, %vb, float %c) { ; CHECK-LABEL: vfmadd_vf_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v16 ; CHECK-NEXT: ret %head = insertelement poison, float %c, i32 0 @@ -278,7 +278,7 @@ define @vfmadd_vv_nxv1f64( %va, %vb, %vc) { ; CHECK-LABEL: vfmadd_vv_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %vd = call @llvm.fma.v1f64( %va, %vb, %vc) @@ -288,7 +288,7 @@ define @vfmadd_vf_nxv1f64( %va, %vb, double %c) { ; CHECK-LABEL: vfmadd_vf_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement poison, double %c, i32 0 @@ -302,7 +302,7 @@ define @vfmadd_vv_nxv2f64( %va, %vb, %vc) { ; CHECK-LABEL: vfmadd_vv_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v12, v10 ; CHECK-NEXT: ret %vd = call @llvm.fma.v2f64( %va, %vc, %vb) @@ -312,7 +312,7 @@ define @vfmadd_vf_nxv2f64( %va, %vb, double %c) { ; CHECK-LABEL: vfmadd_vf_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vfmacc.vf v8, fa0, v10 ; CHECK-NEXT: ret %head = insertelement poison, double %c, i32 0 @@ -326,7 +326,7 @@ define @vfmadd_vv_nxv4f64( %va, %vb, %vc) { ; CHECK-LABEL: vfmadd_vv_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v12, v16 ; CHECK-NEXT: ret %vd = call @llvm.fma.v4f64( %vb, %va, %vc) @@ -336,7 +336,7 @@ define @vfmadd_vf_nxv4f64( %va, %vb, double %c) { ; CHECK-LABEL: vfmadd_vf_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret %head = insertelement poison, double %c, i32 0 @@ -351,7 +351,7 @@ ; CHECK-LABEL: vfmadd_vv_nxv8f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfmacc.vv v8, v16, v24 ; CHECK-NEXT: ret %vd = call @llvm.fma.v8f64( %vb, %vc, %va) @@ -361,7 +361,7 @@ define @vfmadd_vf_nxv8f64( %va, %vb, double %c) { ; CHECK-LABEL: vfmadd_vf_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfmacc.vf v8, fa0, v16 ; CHECK-NEXT: ret %head = insertelement poison, double %c, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd.ll @@ -13,7 +13,7 @@ define @intrinsic_vfmadd_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfmadd.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -60,7 +60,7 @@ define @intrinsic_vfmadd_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfmadd.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -107,7 +107,7 @@ define @intrinsic_vfmadd_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfmadd.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -154,7 +154,7 @@ define @intrinsic_vfmadd_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfmadd.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -201,7 +201,7 @@ define @intrinsic_vfmadd_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfmadd.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -248,7 +248,7 @@ define @intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfmadd.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -295,7 +295,7 @@ define @intrinsic_vfmadd_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfmadd.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -342,7 +342,7 @@ define @intrinsic_vfmadd_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfmadd.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -389,7 +389,7 @@ define @intrinsic_vfmadd_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfmadd.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -436,7 +436,7 @@ define @intrinsic_vfmadd_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vfmadd.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -483,7 +483,7 @@ define @intrinsic_vfmadd_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vfmadd.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -530,7 +530,7 @@ define @intrinsic_vfmadd_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vfmadd.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -577,7 +577,7 @@ define @intrinsic_vfmadd_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -624,7 +624,7 @@ define @intrinsic_vfmadd_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -671,7 +671,7 @@ define @intrinsic_vfmadd_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -718,7 +718,7 @@ define @intrinsic_vfmadd_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: @@ -765,7 +765,7 @@ define @intrinsic_vfmadd_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: @@ -812,7 +812,7 @@ define @intrinsic_vfmadd_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vfmadd_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vfmadd_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vfmadd_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vfmadd_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -1047,7 +1047,7 @@ define @intrinsic_vfmadd_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: @@ -1094,7 +1094,7 @@ define @intrinsic_vfmadd_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmax-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmax-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmax-sdnode.ll @@ -9,7 +9,7 @@ define @vfmax_nxv1f16_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv1f16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.maxnum.nxv1f16( %a, %b) @@ -19,7 +19,7 @@ define @vfmax_nxv1f16_vf( %a, half %b) { ; CHECK-LABEL: vfmax_nxv1f16_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -33,7 +33,7 @@ define @vfmax_nxv2f16_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv2f16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.maxnum.nxv2f16( %a, %b) @@ -43,7 +43,7 @@ define @vfmax_nxv2f16_vf( %a, half %b) { ; CHECK-LABEL: vfmax_nxv2f16_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -57,7 +57,7 @@ define @vfmax_nxv4f16_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv4f16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.maxnum.nxv4f16( %a, %b) @@ -67,7 +67,7 @@ define @vfmax_nxv4f16_vf( %a, half %b) { ; CHECK-LABEL: vfmax_nxv4f16_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -81,7 +81,7 @@ define @vfmax_nxv8f16_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv8f16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v10 ; CHECK-NEXT: ret %v = call @llvm.maxnum.nxv8f16( %a, %b) @@ -91,7 +91,7 @@ define @vfmax_nxv8f16_vf( %a, half %b) { ; CHECK-LABEL: vfmax_nxv8f16_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -105,7 +105,7 @@ define @vfmax_nxv16f16_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv16f16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v12 ; CHECK-NEXT: ret %v = call @llvm.maxnum.nxv16f16( %a, %b) @@ -115,7 +115,7 @@ define @vfmax_nxv16f16_vf( %a, half %b) { ; CHECK-LABEL: vfmax_nxv16f16_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -129,7 +129,7 @@ define @vfmax_nxv32f16_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv32f16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v16 ; CHECK-NEXT: ret %v = call @llvm.maxnum.nxv32f16( %a, %b) @@ -139,7 +139,7 @@ define @vfmax_nxv32f16_vf( %a, half %b) { ; CHECK-LABEL: vfmax_nxv32f16_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -153,7 +153,7 @@ define @vfmax_nxv1f32_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv1f32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.maxnum.nxv1f32( %a, %b) @@ -163,7 +163,7 @@ define @vfmax_nxv1f32_vf( %a, float %b) { ; CHECK-LABEL: vfmax_nxv1f32_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -177,7 +177,7 @@ define @vfmax_nxv2f32_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv2f32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.maxnum.nxv2f32( %a, %b) @@ -187,7 +187,7 @@ define @vfmax_nxv2f32_vf( %a, float %b) { ; CHECK-LABEL: vfmax_nxv2f32_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -201,7 +201,7 @@ define @vfmax_nxv4f32_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv4f32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v10 ; CHECK-NEXT: ret %v = call @llvm.maxnum.nxv4f32( %a, %b) @@ -211,7 +211,7 @@ define @vfmax_nxv4f32_vf( %a, float %b) { ; CHECK-LABEL: vfmax_nxv4f32_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -225,7 +225,7 @@ define @vfmax_nxv8f32_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv8f32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v12 ; CHECK-NEXT: ret %v = call @llvm.maxnum.nxv8f32( %a, %b) @@ -235,7 +235,7 @@ define @vfmax_nxv8f32_vf( %a, float %b) { ; CHECK-LABEL: vfmax_nxv8f32_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -249,7 +249,7 @@ define @vfmax_nxv16f32_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv16f32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v16 ; CHECK-NEXT: ret %v = call @llvm.maxnum.nxv16f32( %a, %b) @@ -259,7 +259,7 @@ define @vfmax_nxv16f32_vf( %a, float %b) { ; CHECK-LABEL: vfmax_nxv16f32_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -273,7 +273,7 @@ define @vfmax_nxv1f64_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv1f64_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.maxnum.nxv1f64( %a, %b) @@ -283,7 +283,7 @@ define @vfmax_nxv1f64_vf( %a, double %b) { ; CHECK-LABEL: vfmax_nxv1f64_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -297,7 +297,7 @@ define @vfmax_nxv2f64_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv2f64_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v10 ; CHECK-NEXT: ret %v = call @llvm.maxnum.nxv2f64( %a, %b) @@ -307,7 +307,7 @@ define @vfmax_nxv2f64_vf( %a, double %b) { ; CHECK-LABEL: vfmax_nxv2f64_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -321,7 +321,7 @@ define @vfmax_nxv4f64_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv4f64_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v12 ; CHECK-NEXT: ret %v = call @llvm.maxnum.nxv4f64( %a, %b) @@ -331,7 +331,7 @@ define @vfmax_nxv4f64_vf( %a, double %b) { ; CHECK-LABEL: vfmax_nxv4f64_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -345,7 +345,7 @@ define @vfmax_nxv8f64_vv( %a, %b) { ; CHECK-LABEL: vfmax_nxv8f64_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v16 ; CHECK-NEXT: ret %v = call @llvm.maxnum.nxv8f64( %a, %b) @@ -355,7 +355,7 @@ define @vfmax_nxv8f64_vf( %a, double %b) { ; CHECK-LABEL: vfmax_nxv8f64_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll @@ -19,7 +19,7 @@ define @vfmax_vv_nxv1f16_unmasked( %va, %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv1f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -43,7 +43,7 @@ define @vfmax_vv_nxv2f16_unmasked( %va, %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -67,7 +67,7 @@ define @vfmax_vv_nxv4f16_unmasked( %va, %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -91,7 +91,7 @@ define @vfmax_vv_nxv8f16_unmasked( %va, %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -115,7 +115,7 @@ define @vfmax_vv_nxv16f16_unmasked( %va, %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -139,7 +139,7 @@ define @vfmax_vv_nxv32f16_unmasked( %va, %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv32f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -163,7 +163,7 @@ define @vfmax_vv_nxv1f32_unmasked( %va, %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv1f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -187,7 +187,7 @@ define @vfmax_vv_nxv2f32_unmasked( %va, %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -211,7 +211,7 @@ define @vfmax_vv_nxv4f32_unmasked( %va, %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -235,7 +235,7 @@ define @vfmax_vv_nxv8f32_unmasked( %va, %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -259,7 +259,7 @@ define @vfmax_vv_nxv16f32_unmasked( %va, %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -283,7 +283,7 @@ define @vfmax_vv_nxv1f64_unmasked( %va, %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv1f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -307,7 +307,7 @@ define @vfmax_vv_nxv2f64_unmasked( %va, %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -331,7 +331,7 @@ define @vfmax_vv_nxv4f64_unmasked( %va, %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -355,7 +355,7 @@ define @vfmax_vv_nxv8f64_unmasked( %va, %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmax_vv_nxv8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmax.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmax.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmax.ll @@ -12,7 +12,7 @@ define @intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -59,7 +59,7 @@ define @intrinsic_vfmax_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -106,7 +106,7 @@ define @intrinsic_vfmax_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -153,7 +153,7 @@ define @intrinsic_vfmax_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -200,7 +200,7 @@ define @intrinsic_vfmax_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -247,7 +247,7 @@ define @intrinsic_vfmax_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -295,7 +295,7 @@ define @intrinsic_vfmax_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -342,7 +342,7 @@ define @intrinsic_vfmax_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -389,7 +389,7 @@ define @intrinsic_vfmax_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -436,7 +436,7 @@ define @intrinsic_vfmax_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -483,7 +483,7 @@ define @intrinsic_vfmax_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -531,7 +531,7 @@ define @intrinsic_vfmax_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -578,7 +578,7 @@ define @intrinsic_vfmax_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -625,7 +625,7 @@ define @intrinsic_vfmax_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -672,7 +672,7 @@ define @intrinsic_vfmax_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -720,7 +720,7 @@ define @intrinsic_vfmax_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -767,7 +767,7 @@ define @intrinsic_vfmax_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -814,7 +814,7 @@ define @intrinsic_vfmax_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -861,7 +861,7 @@ define @intrinsic_vfmax_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -908,7 +908,7 @@ define @intrinsic_vfmax_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -955,7 +955,7 @@ define @intrinsic_vfmax_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1002,7 +1002,7 @@ define @intrinsic_vfmax_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1049,7 +1049,7 @@ define @intrinsic_vfmax_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1096,7 +1096,7 @@ define @intrinsic_vfmax_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1143,7 +1143,7 @@ define @intrinsic_vfmax_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1190,7 +1190,7 @@ define @intrinsic_vfmax_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1237,7 +1237,7 @@ define @intrinsic_vfmax_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1284,7 +1284,7 @@ define @intrinsic_vfmax_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1331,7 +1331,7 @@ define @intrinsic_vfmax_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1378,7 +1378,7 @@ define @intrinsic_vfmax_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmax_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfmax.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmerge.ll b/llvm/test/CodeGen/RISCV/rvv/vfmerge.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmerge.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmerge.ll @@ -13,7 +13,7 @@ define @intrinsic_vfmerge_vvm_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -37,7 +37,7 @@ define @intrinsic_vfmerge_vfm_nxv1f16_nxv1f16_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret entry: @@ -61,7 +61,7 @@ define @intrinsic_vfmerge_vvm_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -85,7 +85,7 @@ define @intrinsic_vfmerge_vfm_nxv2f16_nxv2f16_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret entry: @@ -109,7 +109,7 @@ define @intrinsic_vfmerge_vvm_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -133,7 +133,7 @@ define @intrinsic_vfmerge_vfm_nxv4f16_nxv4f16_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret entry: @@ -157,7 +157,7 @@ define @intrinsic_vfmerge_vvm_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0 ; CHECK-NEXT: ret entry: @@ -181,7 +181,7 @@ define @intrinsic_vfmerge_vfm_nxv8f16_nxv8f16_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret entry: @@ -205,7 +205,7 @@ define @intrinsic_vfmerge_vvm_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0 ; CHECK-NEXT: ret entry: @@ -229,7 +229,7 @@ define @intrinsic_vfmerge_vfm_nxv16f16_nxv16f16_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret entry: @@ -253,7 +253,7 @@ define @intrinsic_vfmerge_vvm_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0 ; CHECK-NEXT: ret entry: @@ -277,7 +277,7 @@ define @intrinsic_vfmerge_vfm_nxv32f16_nxv32f16_f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret entry: @@ -301,7 +301,7 @@ define @intrinsic_vfmerge_vvm_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -325,7 +325,7 @@ define @intrinsic_vfmerge_vfm_nxv1f32_nxv1f32_f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret entry: @@ -349,7 +349,7 @@ define @intrinsic_vfmerge_vvm_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -373,7 +373,7 @@ define @intrinsic_vfmerge_vfm_nxv2f32_nxv2f32_f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret entry: @@ -397,7 +397,7 @@ define @intrinsic_vfmerge_vvm_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0 ; CHECK-NEXT: ret entry: @@ -421,7 +421,7 @@ define @intrinsic_vfmerge_vfm_nxv4f32_nxv4f32_f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret entry: @@ -445,7 +445,7 @@ define @intrinsic_vfmerge_vvm_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0 ; CHECK-NEXT: ret entry: @@ -469,7 +469,7 @@ define @intrinsic_vfmerge_vfm_nxv8f32_nxv8f32_f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret entry: @@ -493,7 +493,7 @@ define @intrinsic_vfmerge_vvm_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0 ; CHECK-NEXT: ret entry: @@ -517,7 +517,7 @@ define @intrinsic_vfmerge_vfm_nxv16f32_nxv16f32_f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret entry: @@ -541,7 +541,7 @@ define @intrinsic_vfmerge_vvm_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -565,7 +565,7 @@ define @intrinsic_vfmerge_vfm_nxv1f64_nxv1f64_f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret entry: @@ -589,7 +589,7 @@ define @intrinsic_vfmerge_vvm_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0 ; CHECK-NEXT: ret entry: @@ -613,7 +613,7 @@ define @intrinsic_vfmerge_vfm_nxv2f64_nxv2f64_f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret entry: @@ -637,7 +637,7 @@ define @intrinsic_vfmerge_vvm_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0 ; CHECK-NEXT: ret entry: @@ -661,7 +661,7 @@ define @intrinsic_vfmerge_vfm_nxv4f64_nxv4f64_f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret entry: @@ -685,7 +685,7 @@ define @intrinsic_vfmerge_vvm_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vvm_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0 ; CHECK-NEXT: ret entry: @@ -709,7 +709,7 @@ define @intrinsic_vfmerge_vfm_nxv8f64_nxv8f64_f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret entry: @@ -726,7 +726,7 @@ define @intrinsic_vfmerge_vzm_nxv1f16_nxv1f16_f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret entry: @@ -743,7 +743,7 @@ define @intrinsic_vfmerge_vzm_nxv2f16_nxv2f16_f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret entry: @@ -760,7 +760,7 @@ define @intrinsic_vfmerge_vzm_nxv4f16_nxv4f16_f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret entry: @@ -777,7 +777,7 @@ define @intrinsic_vfmerge_vzm_nxv8f16_nxv8f16_f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret entry: @@ -794,7 +794,7 @@ define @intrinsic_vfmerge_vzm_nxv16f16_nxv16f16_f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret entry: @@ -811,7 +811,7 @@ define @intrinsic_vfmerge_vzm_nxv32f16_nxv32f16_f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret entry: @@ -828,7 +828,7 @@ define @intrinsic_vfmerge_vzm_nxv1f32_nxv1f32_f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret entry: @@ -845,7 +845,7 @@ define @intrinsic_vfmerge_vzm_nxv2f32_nxv2f32_f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret entry: @@ -862,7 +862,7 @@ define @intrinsic_vfmerge_vzm_nxv4f32_nxv4f32_f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret entry: @@ -879,7 +879,7 @@ define @intrinsic_vfmerge_vzm_nxv8f32_nxv8f32_f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret entry: @@ -896,7 +896,7 @@ define @intrinsic_vfmerge_vzm_nxv16f32_nxv16f32_f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret entry: @@ -913,7 +913,7 @@ define @intrinsic_vfmerge_vzm_nxv1f64_nxv1f64_f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret entry: @@ -930,7 +930,7 @@ define @intrinsic_vfmerge_vzm_nxv2f64_nxv2f64_f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret entry: @@ -947,7 +947,7 @@ define @intrinsic_vfmerge_vzm_nxv4f64_nxv4f64_f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret entry: @@ -964,7 +964,7 @@ define @intrinsic_vfmerge_vzm_nxv8f64_nxv8f64_f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmin-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmin-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmin-sdnode.ll @@ -9,7 +9,7 @@ define @vfmin_nxv1f16_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv1f16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.minnum.nxv1f16( %a, %b) @@ -19,7 +19,7 @@ define @vfmin_nxv1f16_vf( %a, half %b) { ; CHECK-LABEL: vfmin_nxv1f16_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -33,7 +33,7 @@ define @vfmin_nxv2f16_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv2f16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.minnum.nxv2f16( %a, %b) @@ -43,7 +43,7 @@ define @vfmin_nxv2f16_vf( %a, half %b) { ; CHECK-LABEL: vfmin_nxv2f16_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -57,7 +57,7 @@ define @vfmin_nxv4f16_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv4f16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.minnum.nxv4f16( %a, %b) @@ -67,7 +67,7 @@ define @vfmin_nxv4f16_vf( %a, half %b) { ; CHECK-LABEL: vfmin_nxv4f16_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -81,7 +81,7 @@ define @vfmin_nxv8f16_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv8f16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v10 ; CHECK-NEXT: ret %v = call @llvm.minnum.nxv8f16( %a, %b) @@ -91,7 +91,7 @@ define @vfmin_nxv8f16_vf( %a, half %b) { ; CHECK-LABEL: vfmin_nxv8f16_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -105,7 +105,7 @@ define @vfmin_nxv16f16_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv16f16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v12 ; CHECK-NEXT: ret %v = call @llvm.minnum.nxv16f16( %a, %b) @@ -115,7 +115,7 @@ define @vfmin_nxv16f16_vf( %a, half %b) { ; CHECK-LABEL: vfmin_nxv16f16_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -129,7 +129,7 @@ define @vfmin_nxv32f16_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv32f16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v16 ; CHECK-NEXT: ret %v = call @llvm.minnum.nxv32f16( %a, %b) @@ -139,7 +139,7 @@ define @vfmin_nxv32f16_vf( %a, half %b) { ; CHECK-LABEL: vfmin_nxv32f16_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -153,7 +153,7 @@ define @vfmin_nxv1f32_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv1f32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.minnum.nxv1f32( %a, %b) @@ -163,7 +163,7 @@ define @vfmin_nxv1f32_vf( %a, float %b) { ; CHECK-LABEL: vfmin_nxv1f32_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -177,7 +177,7 @@ define @vfmin_nxv2f32_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv2f32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.minnum.nxv2f32( %a, %b) @@ -187,7 +187,7 @@ define @vfmin_nxv2f32_vf( %a, float %b) { ; CHECK-LABEL: vfmin_nxv2f32_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -201,7 +201,7 @@ define @vfmin_nxv4f32_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv4f32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v10 ; CHECK-NEXT: ret %v = call @llvm.minnum.nxv4f32( %a, %b) @@ -211,7 +211,7 @@ define @vfmin_nxv4f32_vf( %a, float %b) { ; CHECK-LABEL: vfmin_nxv4f32_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -225,7 +225,7 @@ define @vfmin_nxv8f32_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv8f32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v12 ; CHECK-NEXT: ret %v = call @llvm.minnum.nxv8f32( %a, %b) @@ -235,7 +235,7 @@ define @vfmin_nxv8f32_vf( %a, float %b) { ; CHECK-LABEL: vfmin_nxv8f32_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -249,7 +249,7 @@ define @vfmin_nxv16f32_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv16f32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v16 ; CHECK-NEXT: ret %v = call @llvm.minnum.nxv16f32( %a, %b) @@ -259,7 +259,7 @@ define @vfmin_nxv16f32_vf( %a, float %b) { ; CHECK-LABEL: vfmin_nxv16f32_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -273,7 +273,7 @@ define @vfmin_nxv1f64_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv1f64_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.minnum.nxv1f64( %a, %b) @@ -283,7 +283,7 @@ define @vfmin_nxv1f64_vf( %a, double %b) { ; CHECK-LABEL: vfmin_nxv1f64_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -297,7 +297,7 @@ define @vfmin_nxv2f64_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv2f64_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v10 ; CHECK-NEXT: ret %v = call @llvm.minnum.nxv2f64( %a, %b) @@ -307,7 +307,7 @@ define @vfmin_nxv2f64_vf( %a, double %b) { ; CHECK-LABEL: vfmin_nxv2f64_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -321,7 +321,7 @@ define @vfmin_nxv4f64_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv4f64_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v12 ; CHECK-NEXT: ret %v = call @llvm.minnum.nxv4f64( %a, %b) @@ -331,7 +331,7 @@ define @vfmin_nxv4f64_vf( %a, double %b) { ; CHECK-LABEL: vfmin_nxv4f64_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -345,7 +345,7 @@ define @vfmin_nxv8f64_vv( %a, %b) { ; CHECK-LABEL: vfmin_nxv8f64_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v16 ; CHECK-NEXT: ret %v = call @llvm.minnum.nxv8f64( %a, %b) @@ -355,7 +355,7 @@ define @vfmin_nxv8f64_vf( %a, double %b) { ; CHECK-LABEL: vfmin_nxv8f64_vf: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll @@ -19,7 +19,7 @@ define @vfmin_vv_nxv1f16_unmasked( %va, %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv1f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -43,7 +43,7 @@ define @vfmin_vv_nxv2f16_unmasked( %va, %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -67,7 +67,7 @@ define @vfmin_vv_nxv4f16_unmasked( %va, %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -91,7 +91,7 @@ define @vfmin_vv_nxv8f16_unmasked( %va, %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -115,7 +115,7 @@ define @vfmin_vv_nxv16f16_unmasked( %va, %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -139,7 +139,7 @@ define @vfmin_vv_nxv32f16_unmasked( %va, %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv32f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -163,7 +163,7 @@ define @vfmin_vv_nxv1f32_unmasked( %va, %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv1f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -187,7 +187,7 @@ define @vfmin_vv_nxv2f32_unmasked( %va, %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -211,7 +211,7 @@ define @vfmin_vv_nxv4f32_unmasked( %va, %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -235,7 +235,7 @@ define @vfmin_vv_nxv8f32_unmasked( %va, %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -259,7 +259,7 @@ define @vfmin_vv_nxv16f32_unmasked( %va, %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -283,7 +283,7 @@ define @vfmin_vv_nxv1f64_unmasked( %va, %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv1f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -307,7 +307,7 @@ define @vfmin_vv_nxv2f64_unmasked( %va, %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -331,7 +331,7 @@ define @vfmin_vv_nxv4f64_unmasked( %va, %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -355,7 +355,7 @@ define @vfmin_vv_nxv8f64_unmasked( %va, %vb, i32 zeroext %evl) { ; CHECK-LABEL: vfmin_vv_nxv8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmin.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmin.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmin.ll @@ -12,7 +12,7 @@ define @intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -59,7 +59,7 @@ define @intrinsic_vfmin_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -106,7 +106,7 @@ define @intrinsic_vfmin_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -153,7 +153,7 @@ define @intrinsic_vfmin_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -200,7 +200,7 @@ define @intrinsic_vfmin_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -247,7 +247,7 @@ define @intrinsic_vfmin_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -295,7 +295,7 @@ define @intrinsic_vfmin_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -342,7 +342,7 @@ define @intrinsic_vfmin_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -389,7 +389,7 @@ define @intrinsic_vfmin_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -436,7 +436,7 @@ define @intrinsic_vfmin_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -483,7 +483,7 @@ define @intrinsic_vfmin_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -531,7 +531,7 @@ define @intrinsic_vfmin_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -578,7 +578,7 @@ define @intrinsic_vfmin_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -625,7 +625,7 @@ define @intrinsic_vfmin_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -672,7 +672,7 @@ define @intrinsic_vfmin_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -720,7 +720,7 @@ define @intrinsic_vfmin_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -767,7 +767,7 @@ define @intrinsic_vfmin_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -814,7 +814,7 @@ define @intrinsic_vfmin_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -861,7 +861,7 @@ define @intrinsic_vfmin_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -908,7 +908,7 @@ define @intrinsic_vfmin_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -955,7 +955,7 @@ define @intrinsic_vfmin_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1002,7 +1002,7 @@ define @intrinsic_vfmin_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1049,7 +1049,7 @@ define @intrinsic_vfmin_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1096,7 +1096,7 @@ define @intrinsic_vfmin_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1143,7 +1143,7 @@ define @intrinsic_vfmin_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1190,7 +1190,7 @@ define @intrinsic_vfmin_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1237,7 +1237,7 @@ define @intrinsic_vfmin_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1284,7 +1284,7 @@ define @intrinsic_vfmin_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1331,7 +1331,7 @@ define @intrinsic_vfmin_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1378,7 +1378,7 @@ define @intrinsic_vfmin_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmin_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfmin.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsac-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsac-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmsac-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmsac-vp.ll @@ -27,7 +27,7 @@ define @vmfsac_vv_nxv1f16_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vv_nxv1f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfmsac.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -76,7 +76,7 @@ define @vmfsac_vf_nxv1f16_unmasked( %a, half %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vf_nxv1f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfmsac.vf v9, fa0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -162,7 +162,7 @@ define @vmfsac_vv_nxv2f16_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vv_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfmsac.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -211,7 +211,7 @@ define @vmfsac_vf_nxv2f16_unmasked( %a, half %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vf_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfmsac.vf v9, fa0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -297,7 +297,7 @@ define @vmfsac_vv_nxv4f16_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vv_nxv4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfmsac.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -346,7 +346,7 @@ define @vmfsac_vf_nxv4f16_unmasked( %a, half %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vf_nxv4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfmsac.vf v9, fa0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -432,7 +432,7 @@ define @vmfsac_vv_nxv8f16_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vv_nxv8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfmsac.vv v12, v8, v10 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -481,7 +481,7 @@ define @vmfsac_vf_nxv8f16_unmasked( %a, half %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vf_nxv8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfmsac.vf v10, fa0, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -567,7 +567,7 @@ define @vmfsac_vv_nxv16f16_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vv_nxv16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfmsac.vv v16, v8, v12 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -616,7 +616,7 @@ define @vmfsac_vf_nxv16f16_unmasked( %a, half %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vf_nxv16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfmsac.vf v12, fa0, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -704,7 +704,7 @@ ; CHECK-LABEL: vmfsac_vv_nxv32f16_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, ma ; CHECK-NEXT: vfmsac.vv v24, v8, v16 ; CHECK-NEXT: vmv8r.v v8, v24 ; CHECK-NEXT: ret @@ -753,7 +753,7 @@ define @vmfsac_vf_nxv32f16_unmasked( %a, half %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vf_nxv32f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vfmsac.vf v16, fa0, v8 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -840,7 +840,7 @@ define @vmfsac_vv_nxv1f32_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vv_nxv1f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfmsac.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -889,7 +889,7 @@ define @vmfsac_vf_nxv1f32_unmasked( %a, float %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vf_nxv1f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfmsac.vf v9, fa0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -975,7 +975,7 @@ define @vmfsac_vv_nxv2f32_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vv_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfmsac.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1024,7 +1024,7 @@ define @vmfsac_vf_nxv2f32_unmasked( %a, float %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vf_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfmsac.vf v9, fa0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1110,7 +1110,7 @@ define @vmfsac_vv_nxv4f32_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vv_nxv4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfmsac.vv v12, v8, v10 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -1159,7 +1159,7 @@ define @vmfsac_vf_nxv4f32_unmasked( %a, float %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vf_nxv4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfmsac.vf v10, fa0, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -1245,7 +1245,7 @@ define @vmfsac_vv_nxv8f32_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vv_nxv8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfmsac.vv v16, v8, v12 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -1294,7 +1294,7 @@ define @vmfsac_vf_nxv8f32_unmasked( %a, float %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vf_nxv8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfmsac.vf v12, fa0, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -1382,7 +1382,7 @@ ; CHECK-LABEL: vmfsac_vv_nxv16f32_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma ; CHECK-NEXT: vfmsac.vv v24, v8, v16 ; CHECK-NEXT: vmv8r.v v8, v24 ; CHECK-NEXT: ret @@ -1431,7 +1431,7 @@ define @vmfsac_vf_nxv16f32_unmasked( %a, float %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vf_nxv16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vfmsac.vf v16, fa0, v8 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -1518,7 +1518,7 @@ define @vmfsac_vv_nxv1f64_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vv_nxv1f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vfmsac.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1567,7 +1567,7 @@ define @vmfsac_vf_nxv1f64_unmasked( %a, double %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vf_nxv1f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vfmsac.vf v9, fa0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1653,7 +1653,7 @@ define @vmfsac_vv_nxv2f64_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vv_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vfmsac.vv v12, v8, v10 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -1702,7 +1702,7 @@ define @vmfsac_vf_nxv2f64_unmasked( %a, double %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vf_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vfmsac.vf v10, fa0, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -1788,7 +1788,7 @@ define @vmfsac_vv_nxv4f64_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vv_nxv4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vfmsac.vv v16, v8, v12 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -1837,7 +1837,7 @@ define @vmfsac_vf_nxv4f64_unmasked( %a, double %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vf_nxv4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vfmsac.vf v12, fa0, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -1925,7 +1925,7 @@ ; CHECK-LABEL: vmfsac_vv_nxv8f64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, ma ; CHECK-NEXT: vfmsac.vv v24, v8, v16 ; CHECK-NEXT: vmv8r.v v8, v24 ; CHECK-NEXT: ret @@ -1974,7 +1974,7 @@ define @vmfsac_vf_nxv8f64_unmasked( %a, double %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vmfsac_vf_nxv8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vfmsac.vf v16, fa0, v8 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsac.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsac.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmsac.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmsac.ll @@ -13,7 +13,7 @@ define @intrinsic_vfmsac_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfmsac.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -60,7 +60,7 @@ define @intrinsic_vfmsac_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfmsac.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -107,7 +107,7 @@ define @intrinsic_vfmsac_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfmsac.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -154,7 +154,7 @@ define @intrinsic_vfmsac_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfmsac.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -201,7 +201,7 @@ define @intrinsic_vfmsac_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfmsac.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -248,7 +248,7 @@ define @intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfmsac.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -295,7 +295,7 @@ define @intrinsic_vfmsac_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfmsac.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -342,7 +342,7 @@ define @intrinsic_vfmsac_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfmsac.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -389,7 +389,7 @@ define @intrinsic_vfmsac_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfmsac.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -436,7 +436,7 @@ define @intrinsic_vfmsac_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vfmsac.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -483,7 +483,7 @@ define @intrinsic_vfmsac_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vfmsac.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -530,7 +530,7 @@ define @intrinsic_vfmsac_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vfmsac.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -577,7 +577,7 @@ define @intrinsic_vfmsac_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -624,7 +624,7 @@ define @intrinsic_vfmsac_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -671,7 +671,7 @@ define @intrinsic_vfmsac_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -718,7 +718,7 @@ define @intrinsic_vfmsac_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfmsac.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: @@ -765,7 +765,7 @@ define @intrinsic_vfmsac_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfmsac.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: @@ -812,7 +812,7 @@ define @intrinsic_vfmsac_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vfmsac_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vfmsac_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfmsac.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vfmsac_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfmsac.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vfmsac_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vfmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -1047,7 +1047,7 @@ define @intrinsic_vfmsac_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vfmsac.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: @@ -1094,7 +1094,7 @@ define @intrinsic_vfmsac_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vfmsac.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsub-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmsub-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmsub-sdnode.ll @@ -12,7 +12,7 @@ define @vfmsub_vv_nxv1f16( %va, %vb, %vc) { ; CHECK-LABEL: vfmsub_vv_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfmsub.vv v8, v9, v10 ; CHECK-NEXT: ret %neg = fneg %vc @@ -23,7 +23,7 @@ define @vfmsub_vf_nxv1f16( %va, %vb, half %c) { ; CHECK-LABEL: vfmsub_vf_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -38,7 +38,7 @@ define @vfmsub_vv_nxv2f16( %va, %vb, %vc) { ; CHECK-LABEL: vfmsub_vv_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfmsub.vv v8, v10, v9 ; CHECK-NEXT: ret %neg = fneg %vb @@ -49,7 +49,7 @@ define @vfmsub_vf_nxv2f16( %va, %vb, half %c) { ; CHECK-LABEL: vfmsub_vf_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -64,7 +64,7 @@ define @vfmsub_vv_nxv4f16( %va, %vb, %vc) { ; CHECK-LABEL: vfmsub_vv_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfmsub.vv v8, v9, v10 ; CHECK-NEXT: ret %neg = fneg %vc @@ -75,7 +75,7 @@ define @vfmsub_vf_nxv4f16( %va, %vb, half %c) { ; CHECK-LABEL: vfmsub_vf_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -90,7 +90,7 @@ define @vfmsub_vv_nxv8f16( %va, %vb, %vc) { ; CHECK-LABEL: vfmsub_vv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfmsac.vv v8, v12, v10 ; CHECK-NEXT: ret %neg = fneg %va @@ -101,7 +101,7 @@ define @vfmsub_vf_nxv8f16( %va, %vb, half %c) { ; CHECK-LABEL: vfmsub_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfmsac.vf v8, fa0, v10 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -116,7 +116,7 @@ define @vfmsub_vv_nxv16f16( %va, %vb, %vc) { ; CHECK-LABEL: vfmsub_vv_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfmsub.vv v8, v16, v12 ; CHECK-NEXT: ret %neg = fneg %vb @@ -127,7 +127,7 @@ define @vfmsub_vf_nxv16f16( %va, %vb, half %c) { ; CHECK-LABEL: vfmsub_vf_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -143,7 +143,7 @@ ; CHECK-LABEL: vfmsub_vv_nxv32f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vfmsac.vv v8, v16, v24 ; CHECK-NEXT: ret %neg = fneg %va @@ -154,7 +154,7 @@ define @vfmsub_vf_nxv32f16( %va, %vb, half %c) { ; CHECK-LABEL: vfmsub_vf_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vfmsac.vf v8, fa0, v16 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -169,7 +169,7 @@ define @vfmsub_vv_nxv1f32( %va, %vb, %vc) { ; CHECK-LABEL: vfmsub_vv_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfmsub.vv v8, v9, v10 ; CHECK-NEXT: ret %neg = fneg %vc @@ -180,7 +180,7 @@ define @vfmsub_vf_nxv1f32( %va, %vb, float %c) { ; CHECK-LABEL: vfmsub_vf_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement poison, float %c, i32 0 @@ -195,7 +195,7 @@ define @vfmsub_vv_nxv2f32( %va, %vb, %vc) { ; CHECK-LABEL: vfmsub_vv_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfmsub.vv v8, v10, v9 ; CHECK-NEXT: ret %neg = fneg %vb @@ -206,7 +206,7 @@ define @vfmsub_vf_nxv2f32( %va, %vb, float %c) { ; CHECK-LABEL: vfmsub_vf_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement poison, float %c, i32 0 @@ -221,7 +221,7 @@ define @vfmsub_vv_nxv4f32( %va, %vb, %vc) { ; CHECK-LABEL: vfmsub_vv_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfmsub.vv v8, v10, v12 ; CHECK-NEXT: ret %neg = fneg %vc @@ -232,7 +232,7 @@ define @vfmsub_vf_nxv4f32( %va, %vb, float %c) { ; CHECK-LABEL: vfmsub_vf_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret %head = insertelement poison, float %c, i32 0 @@ -247,7 +247,7 @@ define @vfmsub_vv_nxv8f32( %va, %vb, %vc) { ; CHECK-LABEL: vfmsub_vv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfmsac.vv v8, v16, v12 ; CHECK-NEXT: ret %neg = fneg %va @@ -258,7 +258,7 @@ define @vfmsub_vf_nxv8f32( %va, %vb, float %c) { ; CHECK-LABEL: vfmsub_vf_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfmsac.vf v8, fa0, v12 ; CHECK-NEXT: ret %head = insertelement poison, float %c, i32 0 @@ -274,7 +274,7 @@ ; CHECK-LABEL: vfmsub_vv_nxv16f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vfmsub.vv v8, v24, v16 ; CHECK-NEXT: ret %neg = fneg %vb @@ -285,7 +285,7 @@ define @vfmsub_vf_nxv16f32( %va, %vb, float %c) { ; CHECK-LABEL: vfmsub_vf_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v16 ; CHECK-NEXT: ret %head = insertelement poison, float %c, i32 0 @@ -300,7 +300,7 @@ define @vfmsub_vv_nxv1f64( %va, %vb, %vc) { ; CHECK-LABEL: vfmsub_vv_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfmsub.vv v8, v9, v10 ; CHECK-NEXT: ret %neg = fneg %vc @@ -311,7 +311,7 @@ define @vfmsub_vf_nxv1f64( %va, %vb, double %c) { ; CHECK-LABEL: vfmsub_vf_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement poison, double %c, i32 0 @@ -326,7 +326,7 @@ define @vfmsub_vv_nxv2f64( %va, %vb, %vc) { ; CHECK-LABEL: vfmsub_vv_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vfmsub.vv v8, v12, v10 ; CHECK-NEXT: ret %neg = fneg %vb @@ -337,7 +337,7 @@ define @vfmsub_vf_nxv2f64( %va, %vb, double %c) { ; CHECK-LABEL: vfmsub_vf_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vfmsac.vf v8, fa0, v10 ; CHECK-NEXT: ret %head = insertelement poison, double %c, i32 0 @@ -352,7 +352,7 @@ define @vfmsub_vv_nxv4f64( %va, %vb, %vc) { ; CHECK-LABEL: vfmsub_vv_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vfmsub.vv v8, v12, v16 ; CHECK-NEXT: ret %neg = fneg %vc @@ -363,7 +363,7 @@ define @vfmsub_vf_nxv4f64( %va, %vb, double %c) { ; CHECK-LABEL: vfmsub_vf_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret %head = insertelement poison, double %c, i32 0 @@ -379,7 +379,7 @@ ; CHECK-LABEL: vfmsub_vv_nxv8f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfmsac.vv v8, v16, v24 ; CHECK-NEXT: ret %neg = fneg %va @@ -390,7 +390,7 @@ define @vfmsub_vf_nxv8f64( %va, %vb, double %c) { ; CHECK-LABEL: vfmsub_vf_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfmsac.vf v8, fa0, v16 ; CHECK-NEXT: ret %head = insertelement poison, double %c, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsub.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsub.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmsub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmsub.ll @@ -13,7 +13,7 @@ define @intrinsic_vfmsub_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfmsub.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -60,7 +60,7 @@ define @intrinsic_vfmsub_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfmsub.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -107,7 +107,7 @@ define @intrinsic_vfmsub_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfmsub.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -154,7 +154,7 @@ define @intrinsic_vfmsub_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfmsub.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -201,7 +201,7 @@ define @intrinsic_vfmsub_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfmsub.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -248,7 +248,7 @@ define @intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfmsub.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -295,7 +295,7 @@ define @intrinsic_vfmsub_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfmsub.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -342,7 +342,7 @@ define @intrinsic_vfmsub_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfmsub.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -389,7 +389,7 @@ define @intrinsic_vfmsub_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfmsub.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -436,7 +436,7 @@ define @intrinsic_vfmsub_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vfmsub.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -483,7 +483,7 @@ define @intrinsic_vfmsub_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vfmsub.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -530,7 +530,7 @@ define @intrinsic_vfmsub_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vfmsub.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -577,7 +577,7 @@ define @intrinsic_vfmsub_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -624,7 +624,7 @@ define @intrinsic_vfmsub_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -671,7 +671,7 @@ define @intrinsic_vfmsub_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -718,7 +718,7 @@ define @intrinsic_vfmsub_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: @@ -765,7 +765,7 @@ define @intrinsic_vfmsub_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: @@ -812,7 +812,7 @@ define @intrinsic_vfmsub_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vfmsub_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vfmsub_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vfmsub_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vfmsub_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -1047,7 +1047,7 @@ define @intrinsic_vfmsub_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: @@ -1094,7 +1094,7 @@ define @intrinsic_vfmsub_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmul-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-sdnode.ll @@ -7,7 +7,7 @@ define @vfmul_vv_nxv1f16( %va, %vb) { ; CHECK-LABEL: vfmul_vv_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = fmul %va, %vb @@ -17,7 +17,7 @@ define @vfmul_vf_nxv1f16( %va, half %b) { ; CHECK-LABEL: vfmul_vf_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -29,7 +29,7 @@ define @vfmul_vv_nxv2f16( %va, %vb) { ; CHECK-LABEL: vfmul_vv_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = fmul %va, %vb @@ -39,7 +39,7 @@ define @vfmul_vf_nxv2f16( %va, half %b) { ; CHECK-LABEL: vfmul_vf_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -51,7 +51,7 @@ define @vfmul_vv_nxv4f16( %va, %vb) { ; CHECK-LABEL: vfmul_vv_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = fmul %va, %vb @@ -61,7 +61,7 @@ define @vfmul_vf_nxv4f16( %va, half %b) { ; CHECK-LABEL: vfmul_vf_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -73,7 +73,7 @@ define @vfmul_vv_nxv8f16( %va, %vb) { ; CHECK-LABEL: vfmul_vv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = fmul %va, %vb @@ -83,7 +83,7 @@ define @vfmul_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: vfmul_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -95,7 +95,7 @@ define @vfmul_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: vfmul_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -107,7 +107,7 @@ define @vfmul_vv_nxv16f16( %va, %vb) { ; CHECK-LABEL: vfmul_vv_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = fmul %va, %vb @@ -117,7 +117,7 @@ define @vfmul_vf_nxv16f16( %va, half %b) { ; CHECK-LABEL: vfmul_vf_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -129,7 +129,7 @@ define @vfmul_vv_nxv32f16( %va, %vb) { ; CHECK-LABEL: vfmul_vv_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = fmul %va, %vb @@ -139,7 +139,7 @@ define @vfmul_vf_nxv32f16( %va, half %b) { ; CHECK-LABEL: vfmul_vf_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -151,7 +151,7 @@ define @vfmul_vv_nxv1f32( %va, %vb) { ; CHECK-LABEL: vfmul_vv_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = fmul %va, %vb @@ -161,7 +161,7 @@ define @vfmul_vf_nxv1f32( %va, float %b) { ; CHECK-LABEL: vfmul_vf_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -173,7 +173,7 @@ define @vfmul_vv_nxv2f32( %va, %vb) { ; CHECK-LABEL: vfmul_vv_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = fmul %va, %vb @@ -183,7 +183,7 @@ define @vfmul_vf_nxv2f32( %va, float %b) { ; CHECK-LABEL: vfmul_vf_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -195,7 +195,7 @@ define @vfmul_vv_nxv4f32( %va, %vb) { ; CHECK-LABEL: vfmul_vv_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = fmul %va, %vb @@ -205,7 +205,7 @@ define @vfmul_vf_nxv4f32( %va, float %b) { ; CHECK-LABEL: vfmul_vf_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -217,7 +217,7 @@ define @vfmul_vv_nxv8f32( %va, %vb) { ; CHECK-LABEL: vfmul_vv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = fmul %va, %vb @@ -227,7 +227,7 @@ define @vfmul_vf_nxv8f32( %va, float %b) { ; CHECK-LABEL: vfmul_vf_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -239,7 +239,7 @@ define @vfmul_fv_nxv8f32( %va, float %b) { ; CHECK-LABEL: vfmul_fv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -251,7 +251,7 @@ define @vfmul_vv_nxv16f32( %va, %vb) { ; CHECK-LABEL: vfmul_vv_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = fmul %va, %vb @@ -261,7 +261,7 @@ define @vfmul_vf_nxv16f32( %va, float %b) { ; CHECK-LABEL: vfmul_vf_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -273,7 +273,7 @@ define @vfmul_vv_nxv1f64( %va, %vb) { ; CHECK-LABEL: vfmul_vv_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = fmul %va, %vb @@ -283,7 +283,7 @@ define @vfmul_vf_nxv1f64( %va, double %b) { ; CHECK-LABEL: vfmul_vf_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -295,7 +295,7 @@ define @vfmul_vv_nxv2f64( %va, %vb) { ; CHECK-LABEL: vfmul_vv_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = fmul %va, %vb @@ -305,7 +305,7 @@ define @vfmul_vf_nxv2f64( %va, double %b) { ; CHECK-LABEL: vfmul_vf_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -317,7 +317,7 @@ define @vfmul_vv_nxv4f64( %va, %vb) { ; CHECK-LABEL: vfmul_vv_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = fmul %va, %vb @@ -327,7 +327,7 @@ define @vfmul_vf_nxv4f64( %va, double %b) { ; CHECK-LABEL: vfmul_vf_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -339,7 +339,7 @@ define @vfmul_vv_nxv8f64( %va, %vb) { ; CHECK-LABEL: vfmul_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = fmul %va, %vb @@ -349,7 +349,7 @@ define @vfmul_vf_nxv8f64( %va, double %b) { ; CHECK-LABEL: vfmul_vf_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -361,7 +361,7 @@ define @vfmul_fv_nxv8f64( %va, double %b) { ; CHECK-LABEL: vfmul_fv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll @@ -19,7 +19,7 @@ define @vfmul_vv_nxv1f16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv1f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -43,7 +43,7 @@ define @vfmul_vf_nxv1f16_unmasked( %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_nxv1f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -69,7 +69,7 @@ define @vfmul_vv_nxv2f16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -93,7 +93,7 @@ define @vfmul_vf_nxv2f16_unmasked( %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -119,7 +119,7 @@ define @vfmul_vv_nxv4f16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -143,7 +143,7 @@ define @vfmul_vf_nxv4f16_unmasked( %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_nxv4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -169,7 +169,7 @@ define @vfmul_vv_nxv8f16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -193,7 +193,7 @@ define @vfmul_vf_nxv8f16_unmasked( %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_nxv8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -219,7 +219,7 @@ define @vfmul_vv_nxv16f16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -243,7 +243,7 @@ define @vfmul_vf_nxv16f16_unmasked( %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_nxv16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -269,7 +269,7 @@ define @vfmul_vv_nxv32f16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv32f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -293,7 +293,7 @@ define @vfmul_vf_nxv32f16_unmasked( %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_nxv32f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -319,7 +319,7 @@ define @vfmul_vv_nxv1f32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv1f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -343,7 +343,7 @@ define @vfmul_vf_nxv1f32_unmasked( %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_nxv1f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -369,7 +369,7 @@ define @vfmul_vv_nxv2f32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -393,7 +393,7 @@ define @vfmul_vf_nxv2f32_unmasked( %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -419,7 +419,7 @@ define @vfmul_vv_nxv4f32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -443,7 +443,7 @@ define @vfmul_vf_nxv4f32_unmasked( %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_nxv4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -469,7 +469,7 @@ define @vfmul_vv_nxv8f32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -493,7 +493,7 @@ define @vfmul_vf_nxv8f32_unmasked( %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_nxv8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -519,7 +519,7 @@ define @vfmul_vv_nxv16f32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -543,7 +543,7 @@ define @vfmul_vf_nxv16f32_unmasked( %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_nxv16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -569,7 +569,7 @@ define @vfmul_vv_nxv1f64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv1f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -593,7 +593,7 @@ define @vfmul_vf_nxv1f64_unmasked( %va, double %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_nxv1f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -619,7 +619,7 @@ define @vfmul_vv_nxv2f64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -643,7 +643,7 @@ define @vfmul_vf_nxv2f64_unmasked( %va, double %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -669,7 +669,7 @@ define @vfmul_vv_nxv4f64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -693,7 +693,7 @@ define @vfmul_vf_nxv4f64_unmasked( %va, double %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_nxv4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -731,7 +731,7 @@ define @vfmul_vv_nxv8f64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vv_nxv8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -755,7 +755,7 @@ define @vfmul_vf_nxv8f64_unmasked( %va, double %b, i32 zeroext %evl) { ; CHECK-LABEL: vfmul_vf_nxv8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmul.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmul.ll @@ -12,7 +12,7 @@ define @intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -59,7 +59,7 @@ define @intrinsic_vfmul_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -106,7 +106,7 @@ define @intrinsic_vfmul_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -153,7 +153,7 @@ define @intrinsic_vfmul_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -200,7 +200,7 @@ define @intrinsic_vfmul_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -247,7 +247,7 @@ define @intrinsic_vfmul_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -295,7 +295,7 @@ define @intrinsic_vfmul_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -342,7 +342,7 @@ define @intrinsic_vfmul_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -389,7 +389,7 @@ define @intrinsic_vfmul_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -436,7 +436,7 @@ define @intrinsic_vfmul_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -483,7 +483,7 @@ define @intrinsic_vfmul_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -531,7 +531,7 @@ define @intrinsic_vfmul_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -578,7 +578,7 @@ define @intrinsic_vfmul_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -625,7 +625,7 @@ define @intrinsic_vfmul_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -672,7 +672,7 @@ define @intrinsic_vfmul_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -720,7 +720,7 @@ define @intrinsic_vfmul_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -767,7 +767,7 @@ define @intrinsic_vfmul_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -814,7 +814,7 @@ define @intrinsic_vfmul_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -861,7 +861,7 @@ define @intrinsic_vfmul_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -908,7 +908,7 @@ define @intrinsic_vfmul_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -955,7 +955,7 @@ define @intrinsic_vfmul_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1002,7 +1002,7 @@ define @intrinsic_vfmul_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1049,7 +1049,7 @@ define @intrinsic_vfmul_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1096,7 +1096,7 @@ define @intrinsic_vfmul_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1143,7 +1143,7 @@ define @intrinsic_vfmul_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1190,7 +1190,7 @@ define @intrinsic_vfmul_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1237,7 +1237,7 @@ define @intrinsic_vfmul_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1284,7 +1284,7 @@ define @intrinsic_vfmul_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1331,7 +1331,7 @@ define @intrinsic_vfmul_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1378,7 +1378,7 @@ define @intrinsic_vfmul_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmul_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfmul.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll @@ -20,7 +20,7 @@ define @vfma_vv_nxv1f16_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv1f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -56,7 +56,7 @@ define @vfma_vf_nxv1f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv1f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -70,7 +70,7 @@ define @vfma_vf_nxv1f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv1f16_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -97,7 +97,7 @@ define @vfma_vv_nxv2f16_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -133,7 +133,7 @@ define @vfma_vf_nxv2f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -147,7 +147,7 @@ define @vfma_vf_nxv2f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv2f16_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -174,7 +174,7 @@ define @vfma_vv_nxv4f16_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -210,7 +210,7 @@ define @vfma_vf_nxv4f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -224,7 +224,7 @@ define @vfma_vf_nxv4f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv4f16_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -251,7 +251,7 @@ define @vfma_vv_nxv8f16_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v10, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -287,7 +287,7 @@ define @vfma_vf_nxv8f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -301,7 +301,7 @@ define @vfma_vf_nxv8f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv8f16_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -328,7 +328,7 @@ define @vfma_vv_nxv16f16_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v12, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -364,7 +364,7 @@ define @vfma_vf_nxv16f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -378,7 +378,7 @@ define @vfma_vf_nxv16f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv16f16_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -407,7 +407,7 @@ ; CHECK-LABEL: vfma_vv_nxv32f16_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v16, v24 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -443,7 +443,7 @@ define @vfma_vf_nxv32f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv32f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -457,7 +457,7 @@ define @vfma_vf_nxv32f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv32f16_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -484,7 +484,7 @@ define @vfma_vv_nxv1f32_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv1f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -520,7 +520,7 @@ define @vfma_vf_nxv1f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv1f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -534,7 +534,7 @@ define @vfma_vf_nxv1f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv1f32_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -561,7 +561,7 @@ define @vfma_vv_nxv2f32_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -597,7 +597,7 @@ define @vfma_vf_nxv2f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -611,7 +611,7 @@ define @vfma_vf_nxv2f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv2f32_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -638,7 +638,7 @@ define @vfma_vv_nxv4f32_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v10, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -674,7 +674,7 @@ define @vfma_vf_nxv4f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -688,7 +688,7 @@ define @vfma_vf_nxv4f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv4f32_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -715,7 +715,7 @@ define @vfma_vv_nxv8f32_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v12, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -751,7 +751,7 @@ define @vfma_vf_nxv8f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -765,7 +765,7 @@ define @vfma_vf_nxv8f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv8f32_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -794,7 +794,7 @@ ; CHECK-LABEL: vfma_vv_nxv16f32_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v16, v24 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -830,7 +830,7 @@ define @vfma_vf_nxv16f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -844,7 +844,7 @@ define @vfma_vf_nxv16f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv16f32_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -871,7 +871,7 @@ define @vfma_vv_nxv1f64_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv1f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -907,7 +907,7 @@ define @vfma_vf_nxv1f64_unmasked( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv1f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -921,7 +921,7 @@ define @vfma_vf_nxv1f64_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv1f64_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -948,7 +948,7 @@ define @vfma_vv_nxv2f64_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v10, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -984,7 +984,7 @@ define @vfma_vf_nxv2f64_unmasked( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -998,7 +998,7 @@ define @vfma_vf_nxv2f64_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv2f64_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -1025,7 +1025,7 @@ define @vfma_vv_nxv4f64_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vv_nxv4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v12, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1061,7 +1061,7 @@ define @vfma_vf_nxv4f64_unmasked( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -1075,7 +1075,7 @@ define @vfma_vf_nxv4f64_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv4f64_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -1104,7 +1104,7 @@ ; CHECK-LABEL: vfma_vv_nxv7f64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v16, v24 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1131,7 +1131,7 @@ ; CHECK-LABEL: vfma_vv_nxv8f64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vfmadd.vv v8, v16, v24 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1167,7 +1167,7 @@ define @vfma_vf_nxv8f64_unmasked( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -1181,7 +1181,7 @@ define @vfma_vf_nxv8f64_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfma_vf_nxv8f64_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfmadd.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -1217,7 +1217,7 @@ ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: li a3, 0 ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: vsetvli a5, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a5, zero, e8, mf4, ta, ma ; CHECK-NEXT: slli a5, a1, 3 ; CHECK-NEXT: add a6, a2, a5 ; CHECK-NEXT: vl8re64.v v8, (a6) @@ -1345,7 +1345,7 @@ ; CHECK-NEXT: addi a2, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill ; CHECK-NEXT: vl8re64.v v0, (a0) -; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add a0, sp, a0 @@ -1356,7 +1356,7 @@ ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a4, a1 ; CHECK-NEXT: .LBB93_4: -; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add a0, sp, a0 @@ -1396,7 +1396,7 @@ define @vfmsub_vv_nxv1f16_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv1f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfmsub.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1435,7 +1435,7 @@ define @vfmsub_vf_nxv1f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv1f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -1450,7 +1450,7 @@ define @vfmsub_vf_nxv1f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv1f16_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -1490,7 +1490,7 @@ define @vfnmadd_vv_nxv1f16_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vv_nxv1f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1504,7 +1504,7 @@ define @vfnmadd_vv_nxv1f16_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vv_nxv1f16_unmasked_commuted: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1546,7 +1546,7 @@ define @vfnmadd_vf_nxv1f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv1f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -1562,7 +1562,7 @@ define @vfnmadd_vf_nxv1f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv1f16_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -1606,7 +1606,7 @@ define @vfnmadd_vf_nxv1f16_neg_splat_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv1f16_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -1622,7 +1622,7 @@ define @vfnmadd_vf_nxv1f16_neg_splat_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv1f16_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -1663,7 +1663,7 @@ define @vfnmsub_vv_nxv1f16_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vv_nxv1f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1677,7 +1677,7 @@ define @vfnmsub_vv_nxv1f16_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vv_nxv1f16_unmasked_commuted: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1717,7 +1717,7 @@ define @vfnmsub_vf_nxv1f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv1f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -1732,7 +1732,7 @@ define @vfnmsub_vf_nxv1f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv1f16_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -1773,7 +1773,7 @@ define @vfnmsub_vf_nxv1f16_neg_splat_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv1f16_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -1788,7 +1788,7 @@ define @vfnmsub_vf_nxv1f16_neg_splat_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv1f16_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -1817,7 +1817,7 @@ define @vfmsub_vv_nxv2f16_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfmsub.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1856,7 +1856,7 @@ define @vfmsub_vf_nxv2f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -1871,7 +1871,7 @@ define @vfmsub_vf_nxv2f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv2f16_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -1911,7 +1911,7 @@ define @vfnmadd_vv_nxv2f16_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vv_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1925,7 +1925,7 @@ define @vfnmadd_vv_nxv2f16_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vv_nxv2f16_unmasked_commuted: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1967,7 +1967,7 @@ define @vfnmadd_vf_nxv2f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -1983,7 +1983,7 @@ define @vfnmadd_vf_nxv2f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv2f16_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -2027,7 +2027,7 @@ define @vfnmadd_vf_nxv2f16_neg_splat_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv2f16_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -2043,7 +2043,7 @@ define @vfnmadd_vf_nxv2f16_neg_splat_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv2f16_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -2084,7 +2084,7 @@ define @vfnmsub_vv_nxv2f16_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vv_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -2098,7 +2098,7 @@ define @vfnmsub_vv_nxv2f16_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vv_nxv2f16_unmasked_commuted: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -2138,7 +2138,7 @@ define @vfnmsub_vf_nxv2f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -2153,7 +2153,7 @@ define @vfnmsub_vf_nxv2f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv2f16_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -2194,7 +2194,7 @@ define @vfnmsub_vf_nxv2f16_neg_splat_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv2f16_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -2209,7 +2209,7 @@ define @vfnmsub_vf_nxv2f16_neg_splat_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv2f16_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -2238,7 +2238,7 @@ define @vfmsub_vv_nxv4f16_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfmsub.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -2277,7 +2277,7 @@ define @vfmsub_vf_nxv4f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -2292,7 +2292,7 @@ define @vfmsub_vf_nxv4f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv4f16_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -2332,7 +2332,7 @@ define @vfnmadd_vv_nxv4f16_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vv_nxv4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -2346,7 +2346,7 @@ define @vfnmadd_vv_nxv4f16_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vv_nxv4f16_unmasked_commuted: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -2388,7 +2388,7 @@ define @vfnmadd_vf_nxv4f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -2404,7 +2404,7 @@ define @vfnmadd_vf_nxv4f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv4f16_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -2448,7 +2448,7 @@ define @vfnmadd_vf_nxv4f16_neg_splat_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv4f16_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -2464,7 +2464,7 @@ define @vfnmadd_vf_nxv4f16_neg_splat_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv4f16_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -2505,7 +2505,7 @@ define @vfnmsub_vv_nxv4f16_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vv_nxv4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -2519,7 +2519,7 @@ define @vfnmsub_vv_nxv4f16_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vv_nxv4f16_unmasked_commuted: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -2559,7 +2559,7 @@ define @vfnmsub_vf_nxv4f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -2574,7 +2574,7 @@ define @vfnmsub_vf_nxv4f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv4f16_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -2615,7 +2615,7 @@ define @vfnmsub_vf_nxv4f16_neg_splat_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv4f16_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -2630,7 +2630,7 @@ define @vfnmsub_vf_nxv4f16_neg_splat_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv4f16_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -2659,7 +2659,7 @@ define @vfmsub_vv_nxv8f16_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfmsub.vv v8, v10, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -2698,7 +2698,7 @@ define @vfmsub_vf_nxv8f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -2713,7 +2713,7 @@ define @vfmsub_vf_nxv8f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv8f16_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -2753,7 +2753,7 @@ define @vfnmadd_vv_nxv8f16_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vv_nxv8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v10, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -2767,7 +2767,7 @@ define @vfnmadd_vv_nxv8f16_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vv_nxv8f16_unmasked_commuted: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v10, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -2809,7 +2809,7 @@ define @vfnmadd_vf_nxv8f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -2825,7 +2825,7 @@ define @vfnmadd_vf_nxv8f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv8f16_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -2869,7 +2869,7 @@ define @vfnmadd_vf_nxv8f16_neg_splat_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv8f16_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -2885,7 +2885,7 @@ define @vfnmadd_vf_nxv8f16_neg_splat_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv8f16_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -2926,7 +2926,7 @@ define @vfnmsub_vv_nxv8f16_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vv_nxv8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v10, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -2940,7 +2940,7 @@ define @vfnmsub_vv_nxv8f16_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vv_nxv8f16_unmasked_commuted: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v10, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -2980,7 +2980,7 @@ define @vfnmsub_vf_nxv8f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -2995,7 +2995,7 @@ define @vfnmsub_vf_nxv8f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv8f16_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -3036,7 +3036,7 @@ define @vfnmsub_vf_nxv8f16_neg_splat_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv8f16_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -3051,7 +3051,7 @@ define @vfnmsub_vf_nxv8f16_neg_splat_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv8f16_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -3080,7 +3080,7 @@ define @vfmsub_vv_nxv16f16_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfmsub.vv v8, v12, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -3119,7 +3119,7 @@ define @vfmsub_vf_nxv16f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -3134,7 +3134,7 @@ define @vfmsub_vf_nxv16f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv16f16_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -3174,7 +3174,7 @@ define @vfnmadd_vv_nxv16f16_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vv_nxv16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v12, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -3188,7 +3188,7 @@ define @vfnmadd_vv_nxv16f16_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vv_nxv16f16_unmasked_commuted: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v12, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -3230,7 +3230,7 @@ define @vfnmadd_vf_nxv16f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -3246,7 +3246,7 @@ define @vfnmadd_vf_nxv16f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv16f16_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -3290,7 +3290,7 @@ define @vfnmadd_vf_nxv16f16_neg_splat_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv16f16_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -3306,7 +3306,7 @@ define @vfnmadd_vf_nxv16f16_neg_splat_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv16f16_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -3347,7 +3347,7 @@ define @vfnmsub_vv_nxv16f16_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vv_nxv16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v12, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -3361,7 +3361,7 @@ define @vfnmsub_vv_nxv16f16_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vv_nxv16f16_unmasked_commuted: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v12, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -3401,7 +3401,7 @@ define @vfnmsub_vf_nxv16f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -3416,7 +3416,7 @@ define @vfnmsub_vf_nxv16f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv16f16_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -3457,7 +3457,7 @@ define @vfnmsub_vf_nxv16f16_neg_splat_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv16f16_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -3472,7 +3472,7 @@ define @vfnmsub_vf_nxv16f16_neg_splat_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv16f16_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -3503,7 +3503,7 @@ ; CHECK-LABEL: vfmsub_vv_nxv32f16_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vfmsub.vv v8, v16, v24 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -3542,7 +3542,7 @@ define @vfmsub_vf_nxv32f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv32f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -3557,7 +3557,7 @@ define @vfmsub_vf_nxv32f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv32f16_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -3600,7 +3600,7 @@ ; CHECK-LABEL: vfnmadd_vv_nxv32f16_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v16, v24 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -3615,7 +3615,7 @@ ; CHECK-LABEL: vfnmadd_vv_nxv32f16_unmasked_commuted: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v16, v24 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -3657,7 +3657,7 @@ define @vfnmadd_vf_nxv32f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv32f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -3673,7 +3673,7 @@ define @vfnmadd_vf_nxv32f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv32f16_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -3717,7 +3717,7 @@ define @vfnmadd_vf_nxv32f16_neg_splat_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv32f16_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -3733,7 +3733,7 @@ define @vfnmadd_vf_nxv32f16_neg_splat_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv32f16_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -3777,7 +3777,7 @@ ; CHECK-LABEL: vfnmsub_vv_nxv32f16_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v16, v24 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -3792,7 +3792,7 @@ ; CHECK-LABEL: vfnmsub_vv_nxv32f16_unmasked_commuted: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v16, v24 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -3832,7 +3832,7 @@ define @vfnmsub_vf_nxv32f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv32f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -3847,7 +3847,7 @@ define @vfnmsub_vf_nxv32f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv32f16_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -3888,7 +3888,7 @@ define @vfnmsub_vf_nxv32f16_neg_splat_unmasked( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv32f16_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -3903,7 +3903,7 @@ define @vfnmsub_vf_nxv32f16_neg_splat_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv32f16_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -3932,7 +3932,7 @@ define @vfmsub_vv_nxv1f32_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv1f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfmsub.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -3971,7 +3971,7 @@ define @vfmsub_vf_nxv1f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv1f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -3986,7 +3986,7 @@ define @vfmsub_vf_nxv1f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv1f32_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -4026,7 +4026,7 @@ define @vfnmadd_vv_nxv1f32_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vv_nxv1f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -4040,7 +4040,7 @@ define @vfnmadd_vv_nxv1f32_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vv_nxv1f32_unmasked_commuted: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -4082,7 +4082,7 @@ define @vfnmadd_vf_nxv1f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv1f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -4098,7 +4098,7 @@ define @vfnmadd_vf_nxv1f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv1f32_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -4142,7 +4142,7 @@ define @vfnmadd_vf_nxv1f32_neg_splat_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv1f32_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -4158,7 +4158,7 @@ define @vfnmadd_vf_nxv1f32_neg_splat_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv1f32_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -4199,7 +4199,7 @@ define @vfnmsub_vv_nxv1f32_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vv_nxv1f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -4213,7 +4213,7 @@ define @vfnmsub_vv_nxv1f32_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vv_nxv1f32_unmasked_commuted: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -4253,7 +4253,7 @@ define @vfnmsub_vf_nxv1f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv1f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -4268,7 +4268,7 @@ define @vfnmsub_vf_nxv1f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv1f32_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -4309,7 +4309,7 @@ define @vfnmsub_vf_nxv1f32_neg_splat_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv1f32_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -4324,7 +4324,7 @@ define @vfnmsub_vf_nxv1f32_neg_splat_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv1f32_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -4353,7 +4353,7 @@ define @vfmsub_vv_nxv2f32_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfmsub.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -4392,7 +4392,7 @@ define @vfmsub_vf_nxv2f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -4407,7 +4407,7 @@ define @vfmsub_vf_nxv2f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv2f32_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -4447,7 +4447,7 @@ define @vfnmadd_vv_nxv2f32_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vv_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -4461,7 +4461,7 @@ define @vfnmadd_vv_nxv2f32_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vv_nxv2f32_unmasked_commuted: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -4503,7 +4503,7 @@ define @vfnmadd_vf_nxv2f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -4519,7 +4519,7 @@ define @vfnmadd_vf_nxv2f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv2f32_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -4563,7 +4563,7 @@ define @vfnmadd_vf_nxv2f32_neg_splat_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv2f32_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -4579,7 +4579,7 @@ define @vfnmadd_vf_nxv2f32_neg_splat_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv2f32_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -4620,7 +4620,7 @@ define @vfnmsub_vv_nxv2f32_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vv_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -4634,7 +4634,7 @@ define @vfnmsub_vv_nxv2f32_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vv_nxv2f32_unmasked_commuted: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -4674,7 +4674,7 @@ define @vfnmsub_vf_nxv2f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -4689,7 +4689,7 @@ define @vfnmsub_vf_nxv2f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv2f32_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -4730,7 +4730,7 @@ define @vfnmsub_vf_nxv2f32_neg_splat_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv2f32_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -4745,7 +4745,7 @@ define @vfnmsub_vf_nxv2f32_neg_splat_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv2f32_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -4774,7 +4774,7 @@ define @vfmsub_vv_nxv4f32_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfmsub.vv v8, v10, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -4813,7 +4813,7 @@ define @vfmsub_vf_nxv4f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -4828,7 +4828,7 @@ define @vfmsub_vf_nxv4f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv4f32_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -4868,7 +4868,7 @@ define @vfnmadd_vv_nxv4f32_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vv_nxv4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v10, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -4882,7 +4882,7 @@ define @vfnmadd_vv_nxv4f32_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vv_nxv4f32_unmasked_commuted: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v10, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -4924,7 +4924,7 @@ define @vfnmadd_vf_nxv4f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -4940,7 +4940,7 @@ define @vfnmadd_vf_nxv4f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv4f32_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -4984,7 +4984,7 @@ define @vfnmadd_vf_nxv4f32_neg_splat_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv4f32_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -5000,7 +5000,7 @@ define @vfnmadd_vf_nxv4f32_neg_splat_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv4f32_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -5041,7 +5041,7 @@ define @vfnmsub_vv_nxv4f32_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vv_nxv4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v10, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -5055,7 +5055,7 @@ define @vfnmsub_vv_nxv4f32_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vv_nxv4f32_unmasked_commuted: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v10, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -5095,7 +5095,7 @@ define @vfnmsub_vf_nxv4f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -5110,7 +5110,7 @@ define @vfnmsub_vf_nxv4f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv4f32_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -5151,7 +5151,7 @@ define @vfnmsub_vf_nxv4f32_neg_splat_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv4f32_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -5166,7 +5166,7 @@ define @vfnmsub_vf_nxv4f32_neg_splat_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv4f32_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -5195,7 +5195,7 @@ define @vfmsub_vv_nxv8f32_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfmsub.vv v8, v12, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -5234,7 +5234,7 @@ define @vfmsub_vf_nxv8f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -5249,7 +5249,7 @@ define @vfmsub_vf_nxv8f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv8f32_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -5289,7 +5289,7 @@ define @vfnmadd_vv_nxv8f32_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vv_nxv8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v12, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -5303,7 +5303,7 @@ define @vfnmadd_vv_nxv8f32_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vv_nxv8f32_unmasked_commuted: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v12, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -5345,7 +5345,7 @@ define @vfnmadd_vf_nxv8f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -5361,7 +5361,7 @@ define @vfnmadd_vf_nxv8f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv8f32_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -5405,7 +5405,7 @@ define @vfnmadd_vf_nxv8f32_neg_splat_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv8f32_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -5421,7 +5421,7 @@ define @vfnmadd_vf_nxv8f32_neg_splat_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv8f32_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -5462,7 +5462,7 @@ define @vfnmsub_vv_nxv8f32_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vv_nxv8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v12, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -5476,7 +5476,7 @@ define @vfnmsub_vv_nxv8f32_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vv_nxv8f32_unmasked_commuted: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v12, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -5516,7 +5516,7 @@ define @vfnmsub_vf_nxv8f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -5531,7 +5531,7 @@ define @vfnmsub_vf_nxv8f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv8f32_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -5572,7 +5572,7 @@ define @vfnmsub_vf_nxv8f32_neg_splat_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv8f32_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -5587,7 +5587,7 @@ define @vfnmsub_vf_nxv8f32_neg_splat_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv8f32_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -5618,7 +5618,7 @@ ; CHECK-LABEL: vfmsub_vv_nxv16f32_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vfmsub.vv v8, v16, v24 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -5657,7 +5657,7 @@ define @vfmsub_vf_nxv16f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -5672,7 +5672,7 @@ define @vfmsub_vf_nxv16f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv16f32_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -5715,7 +5715,7 @@ ; CHECK-LABEL: vfnmadd_vv_nxv16f32_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v16, v24 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -5730,7 +5730,7 @@ ; CHECK-LABEL: vfnmadd_vv_nxv16f32_unmasked_commuted: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v16, v24 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -5772,7 +5772,7 @@ define @vfnmadd_vf_nxv16f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -5788,7 +5788,7 @@ define @vfnmadd_vf_nxv16f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv16f32_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -5832,7 +5832,7 @@ define @vfnmadd_vf_nxv16f32_neg_splat_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv16f32_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -5848,7 +5848,7 @@ define @vfnmadd_vf_nxv16f32_neg_splat_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv16f32_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -5892,7 +5892,7 @@ ; CHECK-LABEL: vfnmsub_vv_nxv16f32_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v16, v24 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -5907,7 +5907,7 @@ ; CHECK-LABEL: vfnmsub_vv_nxv16f32_unmasked_commuted: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v16, v24 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -5947,7 +5947,7 @@ define @vfnmsub_vf_nxv16f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -5962,7 +5962,7 @@ define @vfnmsub_vf_nxv16f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv16f32_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -6003,7 +6003,7 @@ define @vfnmsub_vf_nxv16f32_neg_splat_unmasked( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv16f32_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -6018,7 +6018,7 @@ define @vfnmsub_vf_nxv16f32_neg_splat_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv16f32_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -6047,7 +6047,7 @@ define @vfmsub_vv_nxv1f64_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv1f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfmsub.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -6086,7 +6086,7 @@ define @vfmsub_vf_nxv1f64_unmasked( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv1f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -6101,7 +6101,7 @@ define @vfmsub_vf_nxv1f64_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv1f64_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -6141,7 +6141,7 @@ define @vfnmadd_vv_nxv1f64_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vv_nxv1f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -6155,7 +6155,7 @@ define @vfnmadd_vv_nxv1f64_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vv_nxv1f64_unmasked_commuted: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -6197,7 +6197,7 @@ define @vfnmadd_vf_nxv1f64_unmasked( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv1f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -6213,7 +6213,7 @@ define @vfnmadd_vf_nxv1f64_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv1f64_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -6257,7 +6257,7 @@ define @vfnmadd_vf_nxv1f64_neg_splat_unmasked( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv1f64_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -6273,7 +6273,7 @@ define @vfnmadd_vf_nxv1f64_neg_splat_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv1f64_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -6314,7 +6314,7 @@ define @vfnmsub_vv_nxv1f64_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vv_nxv1f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -6328,7 +6328,7 @@ define @vfnmsub_vv_nxv1f64_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vv_nxv1f64_unmasked_commuted: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -6368,7 +6368,7 @@ define @vfnmsub_vf_nxv1f64_unmasked( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv1f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -6383,7 +6383,7 @@ define @vfnmsub_vf_nxv1f64_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv1f64_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -6424,7 +6424,7 @@ define @vfnmsub_vf_nxv1f64_neg_splat_unmasked( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv1f64_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -6439,7 +6439,7 @@ define @vfnmsub_vf_nxv1f64_neg_splat_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv1f64_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -6468,7 +6468,7 @@ define @vfmsub_vv_nxv2f64_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfmsub.vv v8, v10, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -6507,7 +6507,7 @@ define @vfmsub_vf_nxv2f64_unmasked( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -6522,7 +6522,7 @@ define @vfmsub_vf_nxv2f64_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv2f64_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -6562,7 +6562,7 @@ define @vfnmadd_vv_nxv2f64_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vv_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v10, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -6576,7 +6576,7 @@ define @vfnmadd_vv_nxv2f64_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vv_nxv2f64_unmasked_commuted: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v10, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -6618,7 +6618,7 @@ define @vfnmadd_vf_nxv2f64_unmasked( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -6634,7 +6634,7 @@ define @vfnmadd_vf_nxv2f64_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv2f64_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -6678,7 +6678,7 @@ define @vfnmadd_vf_nxv2f64_neg_splat_unmasked( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv2f64_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -6694,7 +6694,7 @@ define @vfnmadd_vf_nxv2f64_neg_splat_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv2f64_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -6735,7 +6735,7 @@ define @vfnmsub_vv_nxv2f64_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vv_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v10, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -6749,7 +6749,7 @@ define @vfnmsub_vv_nxv2f64_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vv_nxv2f64_unmasked_commuted: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v10, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -6789,7 +6789,7 @@ define @vfnmsub_vf_nxv2f64_unmasked( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -6804,7 +6804,7 @@ define @vfnmsub_vf_nxv2f64_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv2f64_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -6845,7 +6845,7 @@ define @vfnmsub_vf_nxv2f64_neg_splat_unmasked( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv2f64_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -6860,7 +6860,7 @@ define @vfnmsub_vf_nxv2f64_neg_splat_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv2f64_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -6889,7 +6889,7 @@ define @vfmsub_vv_nxv4f64_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vv_nxv4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfmsub.vv v8, v12, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -6928,7 +6928,7 @@ define @vfmsub_vf_nxv4f64_unmasked( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -6943,7 +6943,7 @@ define @vfmsub_vf_nxv4f64_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv4f64_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -6983,7 +6983,7 @@ define @vfnmadd_vv_nxv4f64_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vv_nxv4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v12, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -6997,7 +6997,7 @@ define @vfnmadd_vv_nxv4f64_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vv_nxv4f64_unmasked_commuted: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v12, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -7039,7 +7039,7 @@ define @vfnmadd_vf_nxv4f64_unmasked( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -7055,7 +7055,7 @@ define @vfnmadd_vf_nxv4f64_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv4f64_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -7099,7 +7099,7 @@ define @vfnmadd_vf_nxv4f64_neg_splat_unmasked( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv4f64_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -7115,7 +7115,7 @@ define @vfnmadd_vf_nxv4f64_neg_splat_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv4f64_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -7156,7 +7156,7 @@ define @vfnmsub_vv_nxv4f64_unmasked( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vv_nxv4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v12, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -7170,7 +7170,7 @@ define @vfnmsub_vv_nxv4f64_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vv_nxv4f64_unmasked_commuted: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v12, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -7210,7 +7210,7 @@ define @vfnmsub_vf_nxv4f64_unmasked( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -7225,7 +7225,7 @@ define @vfnmsub_vf_nxv4f64_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv4f64_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -7266,7 +7266,7 @@ define @vfnmsub_vf_nxv4f64_neg_splat_unmasked( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv4f64_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -7281,7 +7281,7 @@ define @vfnmsub_vf_nxv4f64_neg_splat_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv4f64_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -7312,7 +7312,7 @@ ; CHECK-LABEL: vfmsub_vv_nxv8f64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vfmsub.vv v8, v16, v24 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -7351,7 +7351,7 @@ define @vfmsub_vf_nxv8f64_unmasked( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -7366,7 +7366,7 @@ define @vfmsub_vf_nxv8f64_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfmsub_vf_nxv8f64_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfmsub.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -7409,7 +7409,7 @@ ; CHECK-LABEL: vfnmadd_vv_nxv8f64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v16, v24 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -7424,7 +7424,7 @@ ; CHECK-LABEL: vfnmadd_vv_nxv8f64_unmasked_commuted: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v16, v24 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -7466,7 +7466,7 @@ define @vfnmadd_vf_nxv8f64_unmasked( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -7482,7 +7482,7 @@ define @vfnmadd_vf_nxv8f64_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv8f64_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -7526,7 +7526,7 @@ define @vfnmadd_vf_nxv8f64_neg_splat_unmasked( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv8f64_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -7542,7 +7542,7 @@ define @vfnmadd_vf_nxv8f64_neg_splat_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmadd_vf_nxv8f64_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -7586,7 +7586,7 @@ ; CHECK-LABEL: vfnmsub_vv_nxv8f64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v16, v24 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -7601,7 +7601,7 @@ ; CHECK-LABEL: vfnmsub_vv_nxv8f64_unmasked_commuted: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v16, v24 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -7641,7 +7641,7 @@ define @vfnmsub_vf_nxv8f64_unmasked( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -7656,7 +7656,7 @@ define @vfnmsub_vf_nxv8f64_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv8f64_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -7697,7 +7697,7 @@ define @vfnmsub_vf_nxv8f64_neg_splat_unmasked( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv8f64_neg_splat_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -7712,7 +7712,7 @@ define @vfnmsub_vf_nxv8f64_neg_splat_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsub_vf_nxv8f64_neg_splat_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.f.s.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.f.s.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmv.f.s.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmv.f.s.ll @@ -7,7 +7,7 @@ define half @intrinsic_vfmv.f.s_s_nxv1f16( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret entry: @@ -20,7 +20,7 @@ define half @intrinsic_vfmv.f.s_s_nxv2f16( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, mf2, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret entry: @@ -33,7 +33,7 @@ define half @intrinsic_vfmv.f.s_s_nxv4f16( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret entry: @@ -46,7 +46,7 @@ define half @intrinsic_vfmv.f.s_s_nxv8f16( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m2, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret entry: @@ -59,7 +59,7 @@ define half @intrinsic_vfmv.f.s_s_nxv16f16( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret entry: @@ -72,7 +72,7 @@ define half @intrinsic_vfmv.f.s_s_nxv32f16( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e16, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m8, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret entry: @@ -85,7 +85,7 @@ define float @intrinsic_vfmv.f.s_s_nxv1f32( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret entry: @@ -98,7 +98,7 @@ define float @intrinsic_vfmv.f.s_s_nxv2f32( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret entry: @@ -111,7 +111,7 @@ define float @intrinsic_vfmv.f.s_s_nxv4f32( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret entry: @@ -124,7 +124,7 @@ define float @intrinsic_vfmv.f.s_s_nxv8f32( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m4, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret entry: @@ -137,7 +137,7 @@ define float @intrinsic_vfmv.f.s_s_nxv16f32( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m8, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret entry: @@ -150,7 +150,7 @@ define double @intrinsic_vfmv.f.s_s_nxv1f64( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret entry: @@ -163,7 +163,7 @@ define double @intrinsic_vfmv.f.s_s_nxv2f64( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m2, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret entry: @@ -176,7 +176,7 @@ define double @intrinsic_vfmv.f.s_s_nxv4f64( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m4, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret entry: @@ -189,7 +189,7 @@ define double @intrinsic_vfmv.f.s_s_nxv8f64( %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m8, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f.ll @@ -9,7 +9,7 @@ define @intrinsic_vfmv.s.f_f_nxv1f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: @@ -22,7 +22,7 @@ define @intrinsic_vfmv.s.f_f_nxv2f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: @@ -35,7 +35,7 @@ define @intrinsic_vfmv.s.f_f_nxv4f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: @@ -48,7 +48,7 @@ define @intrinsic_vfmv.s.f_f_nxv8f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: @@ -61,7 +61,7 @@ define @intrinsic_vfmv.s.f_f_nxv16f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: @@ -74,7 +74,7 @@ define @intrinsic_vfmv.s.f_f_nxv32f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: @@ -87,7 +87,7 @@ define @intrinsic_vfmv.s.f_f_nxv1f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: @@ -100,7 +100,7 @@ define @intrinsic_vfmv.s.f_f_nxv2f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: @@ -113,7 +113,7 @@ define @intrinsic_vfmv.s.f_f_nxv4f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: @@ -126,7 +126,7 @@ define @intrinsic_vfmv.s.f_f_nxv8f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: @@ -139,7 +139,7 @@ define @intrinsic_vfmv.s.f_f_nxv16f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: @@ -152,7 +152,7 @@ define @intrinsic_vfmv.s.f_f_nxv1f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: @@ -165,7 +165,7 @@ define @intrinsic_vfmv.s.f_f_nxv2f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: @@ -178,7 +178,7 @@ define @intrinsic_vfmv.s.f_f_nxv4f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: @@ -191,7 +191,7 @@ define @intrinsic_vfmv.s.f_f_nxv8f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: @@ -202,7 +202,7 @@ define @intrinsic_vfmv.s.f_f_zero_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vmv.s.x v8, zero ; CHECK-NEXT: ret entry: @@ -213,7 +213,7 @@ define @intrinsic_vfmv.s.f_f_zero_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vmv.s.x v8, zero ; CHECK-NEXT: ret entry: @@ -224,7 +224,7 @@ define @intrinsic_vfmv.s.f_f_zero_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vmv.s.x v8, zero ; CHECK-NEXT: ret entry: @@ -235,7 +235,7 @@ define @intrinsic_vfmv.s.f_f_zero_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vmv.s.x v8, zero ; CHECK-NEXT: ret entry: @@ -246,7 +246,7 @@ define @intrinsic_vfmv.s.f_f_zero_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vmv.s.x v8, zero ; CHECK-NEXT: ret entry: @@ -257,7 +257,7 @@ define @intrinsic_vfmv.s.f_f_zero_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vmv.s.x v8, zero ; CHECK-NEXT: ret entry: @@ -268,7 +268,7 @@ define @intrinsic_vfmv.s.f_f_zero_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vmv.s.x v8, zero ; CHECK-NEXT: ret entry: @@ -279,7 +279,7 @@ define @intrinsic_vfmv.s.f_f_zero_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vmv.s.x v8, zero ; CHECK-NEXT: ret entry: @@ -290,7 +290,7 @@ define @intrinsic_vfmv.s.f_f_zero_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vmv.s.x v8, zero ; CHECK-NEXT: ret entry: @@ -301,7 +301,7 @@ define @intrinsic_vfmv.s.f_f_zero_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vmv.s.x v8, zero ; CHECK-NEXT: ret entry: @@ -312,7 +312,7 @@ define @intrinsic_vfmv.s.f_f_zero_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vmv.s.x v8, zero ; CHECK-NEXT: ret entry: @@ -323,7 +323,7 @@ define @intrinsic_vfmv.s.f_f_zero_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vmv.s.x v8, zero ; CHECK-NEXT: ret entry: @@ -334,7 +334,7 @@ define @intrinsic_vfmv.s.f_f_zero_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vmv.s.x v8, zero ; CHECK-NEXT: ret entry: @@ -345,7 +345,7 @@ define @intrinsic_vfmv.s.f_f_zero_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vmv.s.x v8, zero ; CHECK-NEXT: ret entry: @@ -356,7 +356,7 @@ define @intrinsic_vfmv.s.f_f_zero_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vmv.s.x v8, zero ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f.ll @@ -11,7 +11,7 @@ define @intrinsic_vfmv.v.f_f_nxv1f16(half %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfmv.v.f v8, fa0 ; CHECK-NEXT: ret entry: @@ -31,7 +31,7 @@ define @intrinsic_vfmv.v.f_f_nxv2f16(half %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfmv.v.f v8, fa0 ; CHECK-NEXT: ret entry: @@ -51,7 +51,7 @@ define @intrinsic_vfmv.v.f_f_nxv4f16(half %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfmv.v.f v8, fa0 ; CHECK-NEXT: ret entry: @@ -71,7 +71,7 @@ define @intrinsic_vfmv.v.f_f_nxv8f16(half %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfmv.v.f v8, fa0 ; CHECK-NEXT: ret entry: @@ -91,7 +91,7 @@ define @intrinsic_vfmv.v.f_f_nxv16f16(half %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfmv.v.f v8, fa0 ; CHECK-NEXT: ret entry: @@ -111,7 +111,7 @@ define @intrinsic_vfmv.v.f_f_nxv32f16(half %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfmv.v.f v8, fa0 ; CHECK-NEXT: ret entry: @@ -131,7 +131,7 @@ define @intrinsic_vfmv.v.f_f_nxv1f32(float %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfmv.v.f v8, fa0 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vfmv.v.f_f_nxv2f32(float %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfmv.v.f v8, fa0 ; CHECK-NEXT: ret entry: @@ -171,7 +171,7 @@ define @intrinsic_vfmv.v.f_f_nxv4f32(float %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfmv.v.f v8, fa0 ; CHECK-NEXT: ret entry: @@ -191,7 +191,7 @@ define @intrinsic_vfmv.v.f_f_nxv8f32(float %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfmv.v.f v8, fa0 ; CHECK-NEXT: ret entry: @@ -211,7 +211,7 @@ define @intrinsic_vfmv.v.f_f_nxv16f32(float %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfmv.v.f v8, fa0 ; CHECK-NEXT: ret entry: @@ -231,7 +231,7 @@ define @intrinsic_vfmv.v.f_f_nxv1f64(double %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfmv.v.f v8, fa0 ; CHECK-NEXT: ret entry: @@ -251,7 +251,7 @@ define @intrinsic_vfmv.v.f_f_nxv2f64(double %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfmv.v.f v8, fa0 ; CHECK-NEXT: ret entry: @@ -271,7 +271,7 @@ define @intrinsic_vfmv.v.f_f_nxv4f64(double %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfmv.v.f v8, fa0 ; CHECK-NEXT: ret entry: @@ -291,7 +291,7 @@ define @intrinsic_vfmv.v.f_f_nxv8f64(double %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfmv.v.f v8, fa0 ; CHECK-NEXT: ret entry: @@ -306,7 +306,7 @@ define @intrinsic_vfmv.v.f_zero_nxv1f16(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vfmv.v.f_zero_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: ret entry: @@ -321,7 +321,7 @@ define @intrinsic_vmv.v.i_zero_nxv2f16(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: ret entry: @@ -336,7 +336,7 @@ define @intrinsic_vmv.v.i_zero_nxv4f16(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: ret entry: @@ -351,7 +351,7 @@ define @intrinsic_vmv.v.i_zero_nxv8f16(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: ret entry: @@ -366,7 +366,7 @@ define @intrinsic_vmv.v.i_zero_nxv16f16(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: ret entry: @@ -381,7 +381,7 @@ define @intrinsic_vmv.v.i_zero_nxv32f16(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: ret entry: @@ -396,7 +396,7 @@ define @intrinsic_vmv.v.i_zero_nxv1f32(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: ret entry: @@ -411,7 +411,7 @@ define @intrinsic_vmv.v.i_zero_nxv2f32(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: ret entry: @@ -426,7 +426,7 @@ define @intrinsic_vmv.v.i_zero_nxv4f32(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: ret entry: @@ -441,7 +441,7 @@ define @intrinsic_vmv.v.i_zero_nxv8f32(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: ret entry: @@ -456,7 +456,7 @@ define @intrinsic_vmv.v.i_zero_nxv16f32(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: ret entry: @@ -471,7 +471,7 @@ define @intrinsic_vmv.v.i_zero_nxv1f64(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: ret entry: @@ -486,7 +486,7 @@ define @intrinsic_vmv.v.i_zero_nxv2f64(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: ret entry: @@ -501,7 +501,7 @@ define @intrinsic_vmv.v.i_zero_nxv4f64(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: ret entry: @@ -516,7 +516,7 @@ define @intrinsic_vmv.v.i_zero_nxv8f64(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll @@ -11,7 +11,7 @@ define @intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -55,7 +55,7 @@ define @intrinsic_vfncvt_f.f.w_nxv2f16_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv2f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -99,7 +99,7 @@ define @intrinsic_vfncvt_f.f.w_nxv4f16_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv4f16_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -143,7 +143,7 @@ define @intrinsic_vfncvt_f.f.w_nxv8f16_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv8f16_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -187,7 +187,7 @@ define @intrinsic_vfncvt_f.f.w_nxv16f16_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv16f16_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -231,7 +231,7 @@ define @intrinsic_vfncvt_f.f.w_nxv1f32_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv1f32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -275,7 +275,7 @@ define @intrinsic_vfncvt_f.f.w_nxv2f32_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv2f32_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -319,7 +319,7 @@ define @intrinsic_vfncvt_f.f.w_nxv4f32_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv4f32_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -363,7 +363,7 @@ define @intrinsic_vfncvt_f.f.w_nxv8f32_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv8f32_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x.ll @@ -11,7 +11,7 @@ define @intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfncvt.f.x.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -55,7 +55,7 @@ define @intrinsic_vfncvt_f.x.w_nxv2f16_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.f.x.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -99,7 +99,7 @@ define @intrinsic_vfncvt_f.x.w_nxv4f16_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfncvt.f.x.w v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -143,7 +143,7 @@ define @intrinsic_vfncvt_f.x.w_nxv8f16_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfncvt.f.x.w v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -187,7 +187,7 @@ define @intrinsic_vfncvt_f.x.w_nxv16f16_nxv16i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfncvt.f.x.w v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -231,7 +231,7 @@ define @intrinsic_vfncvt_f.x.w_nxv1f32_nxv1i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfncvt.f.x.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -275,7 +275,7 @@ define @intrinsic_vfncvt_f.x.w_nxv2f32_nxv2i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.f.x.w v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -319,7 +319,7 @@ define @intrinsic_vfncvt_f.x.w_nxv4f32_nxv4i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfncvt.f.x.w v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -363,7 +363,7 @@ define @intrinsic_vfncvt_f.x.w_nxv8f32_nxv8i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfncvt.f.x.w v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu.ll @@ -11,7 +11,7 @@ define @intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfncvt.f.xu.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -55,7 +55,7 @@ define @intrinsic_vfncvt_f.xu.w_nxv2f16_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.f.xu.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -99,7 +99,7 @@ define @intrinsic_vfncvt_f.xu.w_nxv4f16_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfncvt.f.xu.w v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -143,7 +143,7 @@ define @intrinsic_vfncvt_f.xu.w_nxv8f16_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfncvt.f.xu.w v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -187,7 +187,7 @@ define @intrinsic_vfncvt_f.xu.w_nxv16f16_nxv16i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfncvt.f.xu.w v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -231,7 +231,7 @@ define @intrinsic_vfncvt_f.xu.w_nxv1f32_nxv1i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfncvt.f.xu.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -275,7 +275,7 @@ define @intrinsic_vfncvt_f.xu.w_nxv2f32_nxv2i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.f.xu.w v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -319,7 +319,7 @@ define @intrinsic_vfncvt_f.xu.w_nxv4f32_nxv4i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfncvt.f.xu.w v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -363,7 +363,7 @@ define @intrinsic_vfncvt_f.xu.w_nxv8f32_nxv8i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfncvt.f.xu.w v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f.ll @@ -11,7 +11,7 @@ define @intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfncvt.rod.f.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -55,7 +55,7 @@ define @intrinsic_vfncvt_rod.f.f.w_nxv2f16_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv2f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.rod.f.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -99,7 +99,7 @@ define @intrinsic_vfncvt_rod.f.f.w_nxv4f16_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv4f16_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfncvt.rod.f.f.w v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -143,7 +143,7 @@ define @intrinsic_vfncvt_rod.f.f.w_nxv8f16_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv8f16_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfncvt.rod.f.f.w v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -187,7 +187,7 @@ define @intrinsic_vfncvt_rod.f.f.w_nxv16f16_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv16f16_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfncvt.rod.f.f.w v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -231,7 +231,7 @@ define @intrinsic_vfncvt_rod.f.f.w_nxv1f32_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv1f32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfncvt.rod.f.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -275,7 +275,7 @@ define @intrinsic_vfncvt_rod.f.f.w_nxv2f32_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv2f32_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.rod.f.f.w v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -319,7 +319,7 @@ define @intrinsic_vfncvt_rod.f.f.w_nxv4f32_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv4f32_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfncvt.rod.f.f.w v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -363,7 +363,7 @@ define @intrinsic_vfncvt_rod.f.f.w_nxv8f32_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv8f32_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfncvt.rod.f.f.w v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f.ll @@ -11,7 +11,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -55,7 +55,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv2i8_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv2i8_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -99,7 +99,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv4i8_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv4i8_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -143,7 +143,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv8i8_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv8i8_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -187,7 +187,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv16i8_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv16i8_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -231,7 +231,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv32i8_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv32i8_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -275,7 +275,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv1i16_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i16_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -319,7 +319,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv2i16_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv2i16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -363,7 +363,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv4i16_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv4i16_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -407,7 +407,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv8i16_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv8i16_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -451,7 +451,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv16i16_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv16i16_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -495,7 +495,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv1i32_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -539,7 +539,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv2i32_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv2i32_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -583,7 +583,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv4i32_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv4i32_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -627,7 +627,7 @@ define @intrinsic_vfncvt_rtz.x.f.w_nxv8i32_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv8i32_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f.ll @@ -11,7 +11,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -55,7 +55,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv2i8_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv2i8_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -99,7 +99,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv4i8_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv4i8_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -143,7 +143,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv8i8_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv8i8_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -187,7 +187,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv16i8_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv16i8_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -231,7 +231,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv32i8_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv32i8_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -275,7 +275,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv1i16_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i16_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -319,7 +319,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv2i16_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv2i16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -363,7 +363,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv4i16_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv4i16_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -407,7 +407,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv8i16_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv8i16_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -451,7 +451,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv16i16_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv16i16_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -495,7 +495,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv1i32_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -539,7 +539,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv2i32_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv2i32_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -583,7 +583,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv4i32_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv4i32_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -627,7 +627,7 @@ define @intrinsic_vfncvt_rtz.xu.f.w_nxv8i32_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv8i32_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f.ll @@ -11,7 +11,7 @@ define @intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vfncvt.x.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -55,7 +55,7 @@ define @intrinsic_vfncvt_x.f.w_nxv2i8_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i8_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vfncvt.x.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -99,7 +99,7 @@ define @intrinsic_vfncvt_x.f.w_nxv4i8_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv4i8_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vfncvt.x.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -143,7 +143,7 @@ define @intrinsic_vfncvt_x.f.w_nxv8i8_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv8i8_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vfncvt.x.f.w v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -187,7 +187,7 @@ define @intrinsic_vfncvt_x.f.w_nxv16i8_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv16i8_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vfncvt.x.f.w v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -231,7 +231,7 @@ define @intrinsic_vfncvt_x.f.w_nxv32i8_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv32i8_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vfncvt.x.f.w v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -275,7 +275,7 @@ define @intrinsic_vfncvt_x.f.w_nxv1i16_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i16_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfncvt.x.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -319,7 +319,7 @@ define @intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.x.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -363,7 +363,7 @@ define @intrinsic_vfncvt_x.f.w_nxv4i16_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv4i16_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfncvt.x.f.w v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -407,7 +407,7 @@ define @intrinsic_vfncvt_x.f.w_nxv8i16_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv8i16_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfncvt.x.f.w v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -451,7 +451,7 @@ define @intrinsic_vfncvt_x.f.w_nxv16i16_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv16i16_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfncvt.x.f.w v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -495,7 +495,7 @@ define @intrinsic_vfncvt_x.f.w_nxv1i32_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfncvt.x.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -539,7 +539,7 @@ define @intrinsic_vfncvt_x.f.w_nxv2i32_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i32_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.x.f.w v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -583,7 +583,7 @@ define @intrinsic_vfncvt_x.f.w_nxv4i32_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv4i32_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfncvt.x.f.w v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -627,7 +627,7 @@ define @intrinsic_vfncvt_x.f.w_nxv8i32_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv8i32_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfncvt.x.f.w v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f.ll @@ -11,7 +11,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vfncvt.xu.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -55,7 +55,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vfncvt.xu.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -99,7 +99,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vfncvt.xu.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -143,7 +143,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vfncvt.xu.f.w v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -187,7 +187,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vfncvt.xu.f.w v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -231,7 +231,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vfncvt.xu.f.w v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -275,7 +275,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv1i16_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i16_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfncvt.xu.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -319,7 +319,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv2i16_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.xu.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -363,7 +363,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv4i16_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i16_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfncvt.xu.f.w v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -407,7 +407,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv8i16_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i16_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfncvt.xu.f.w v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -451,7 +451,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv16i16_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv16i16_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfncvt.xu.f.w v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -495,7 +495,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv1i32_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfncvt.xu.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -539,7 +539,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv2i32_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i32_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.xu.f.w v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -583,7 +583,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv4i32_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i32_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfncvt.xu.f.w v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -627,7 +627,7 @@ define @intrinsic_vfncvt_xu.f.w_nxv8i32_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i32_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfncvt.xu.f.w v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode.ll @@ -7,7 +7,7 @@ define @vfneg_vv_nxv1f16( %va) { ; CHECK-LABEL: vfneg_vv_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %vb = fneg %va @@ -17,7 +17,7 @@ define @vfneg_vv_nxv2f16( %va) { ; CHECK-LABEL: vfneg_vv_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %vb = fneg %va @@ -27,7 +27,7 @@ define @vfneg_vv_nxv4f16( %va) { ; CHECK-LABEL: vfneg_vv_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %vb = fneg %va @@ -37,7 +37,7 @@ define @vfneg_vv_nxv8f16( %va) { ; CHECK-LABEL: vfneg_vv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %vb = fneg %va @@ -47,7 +47,7 @@ define @vfneg_vv_nxv16f16( %va) { ; CHECK-LABEL: vfneg_vv_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %vb = fneg %va @@ -57,7 +57,7 @@ define @vfneg_vv_nxv32f16( %va) { ; CHECK-LABEL: vfneg_vv_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %vb = fneg %va @@ -67,7 +67,7 @@ define @vfneg_vv_nxv1f32( %va) { ; CHECK-LABEL: vfneg_vv_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %vb = fneg %va @@ -77,7 +77,7 @@ define @vfneg_vv_nxv2f32( %va) { ; CHECK-LABEL: vfneg_vv_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %vb = fneg %va @@ -87,7 +87,7 @@ define @vfneg_vv_nxv4f32( %va) { ; CHECK-LABEL: vfneg_vv_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %vb = fneg %va @@ -97,7 +97,7 @@ define @vfneg_vv_nxv8f32( %va) { ; CHECK-LABEL: vfneg_vv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %vb = fneg %va @@ -107,7 +107,7 @@ define @vfneg_vv_nxv16f32( %va) { ; CHECK-LABEL: vfneg_vv_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %vb = fneg %va @@ -117,7 +117,7 @@ define @vfneg_vv_nxv1f64( %va) { ; CHECK-LABEL: vfneg_vv_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %vb = fneg %va @@ -127,7 +127,7 @@ define @vfneg_vv_nxv2f64( %va) { ; CHECK-LABEL: vfneg_vv_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %vb = fneg %va @@ -137,7 +137,7 @@ define @vfneg_vv_nxv4f64( %va) { ; CHECK-LABEL: vfneg_vv_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %vb = fneg %va @@ -147,7 +147,7 @@ define @vfneg_vv_nxv8f64( %va) { ; CHECK-LABEL: vfneg_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %vb = fneg %va diff --git a/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll @@ -19,7 +19,7 @@ define @vfneg_vv_nxv1f16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv1f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -43,7 +43,7 @@ define @vfneg_vv_nxv2f16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -67,7 +67,7 @@ define @vfneg_vv_nxv4f16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -91,7 +91,7 @@ define @vfneg_vv_nxv8f16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -115,7 +115,7 @@ define @vfneg_vv_nxv16f16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -139,7 +139,7 @@ define @vfneg_vv_nxv32f16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv32f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -163,7 +163,7 @@ define @vfneg_vv_nxv1f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv1f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -187,7 +187,7 @@ define @vfneg_vv_nxv2f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -211,7 +211,7 @@ define @vfneg_vv_nxv4f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -235,7 +235,7 @@ define @vfneg_vv_nxv8f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -259,7 +259,7 @@ define @vfneg_vv_nxv16f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -283,7 +283,7 @@ define @vfneg_vv_nxv1f64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv1f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -307,7 +307,7 @@ define @vfneg_vv_nxv2f64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -331,7 +331,7 @@ define @vfneg_vv_nxv4f64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -355,7 +355,7 @@ define @vfneg_vv_nxv7f64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv7f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -379,7 +379,7 @@ define @vfneg_vv_nxv8f64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfneg_vv_nxv8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -398,7 +398,7 @@ ; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a4, a1, 3 -; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma ; CHECK-NEXT: sub a3, a0, a1 ; CHECK-NEXT: vslidedown.vx v0, v0, a4 ; CHECK-NEXT: bltu a0, a3, .LBB32_2 @@ -429,14 +429,14 @@ ; CHECK-NEXT: mv a2, a1 ; CHECK-NEXT: .LBB33_2: ; CHECK-NEXT: li a3, 0 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: sub a1, a0, a1 ; CHECK-NEXT: vfneg.v v8, v8 ; CHECK-NEXT: bltu a0, a1, .LBB33_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a3, a1 ; CHECK-NEXT: .LBB33_4: -; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; CHECK-NEXT: vfneg.v v16, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-vp.ll @@ -28,7 +28,7 @@ define @vfnmacc_vv_nxv1f16_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv1f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfnmacc.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -80,7 +80,7 @@ define @vfnmacc_vf_nxv1f16_unmasked( %a, half %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vf_nxv1f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfnmacc.vf v9, fa0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -171,7 +171,7 @@ define @vfnmacc_vv_nxv2f16_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfnmacc.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -223,7 +223,7 @@ define @vfnmacc_vf_nxv2f16_unmasked( %a, half %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vf_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfnmacc.vf v9, fa0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -314,7 +314,7 @@ define @vfnmacc_vv_nxv4f16_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfnmacc.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -366,7 +366,7 @@ define @vfnmacc_vf_nxv4f16_unmasked( %a, half %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vf_nxv4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfnmacc.vf v9, fa0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -457,7 +457,7 @@ define @vfnmacc_vv_nxv8f16_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfnmacc.vv v12, v8, v10 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -509,7 +509,7 @@ define @vfnmacc_vf_nxv8f16_unmasked( %a, half %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vf_nxv8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfnmacc.vf v10, fa0, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -600,7 +600,7 @@ define @vfnmacc_vv_nxv16f16_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfnmacc.vv v16, v8, v12 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -652,7 +652,7 @@ define @vfnmacc_vf_nxv16f16_unmasked( %a, half %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vf_nxv16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfnmacc.vf v12, fa0, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -745,7 +745,7 @@ ; CHECK-LABEL: vfnmacc_vv_nxv32f16_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, ma ; CHECK-NEXT: vfnmacc.vv v24, v8, v16 ; CHECK-NEXT: vmv8r.v v8, v24 ; CHECK-NEXT: ret @@ -797,7 +797,7 @@ define @vfnmacc_vf_nxv32f16_unmasked( %a, half %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vf_nxv32f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vfnmacc.vf v16, fa0, v8 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -889,7 +889,7 @@ define @vfnmacc_vv_nxv1f32_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv1f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfnmacc.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -941,7 +941,7 @@ define @vfnmacc_vf_nxv1f32_unmasked( %a, float %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vf_nxv1f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfnmacc.vf v9, fa0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1032,7 +1032,7 @@ define @vfnmacc_vv_nxv2f32_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfnmacc.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1084,7 +1084,7 @@ define @vfnmacc_vf_nxv2f32_unmasked( %a, float %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vf_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfnmacc.vf v9, fa0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1175,7 +1175,7 @@ define @vfnmacc_vv_nxv4f32_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfnmacc.vv v12, v8, v10 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -1227,7 +1227,7 @@ define @vfnmacc_vf_nxv4f32_unmasked( %a, float %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vf_nxv4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfnmacc.vf v10, fa0, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -1318,7 +1318,7 @@ define @vfnmacc_vv_nxv8f32_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfnmacc.vv v16, v8, v12 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -1370,7 +1370,7 @@ define @vfnmacc_vf_nxv8f32_unmasked( %a, float %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vf_nxv8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfnmacc.vf v12, fa0, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -1463,7 +1463,7 @@ ; CHECK-LABEL: vfnmacc_vv_nxv16f32_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma ; CHECK-NEXT: vfnmacc.vv v24, v8, v16 ; CHECK-NEXT: vmv8r.v v8, v24 ; CHECK-NEXT: ret @@ -1515,7 +1515,7 @@ define @vfnmacc_vf_nxv16f32_unmasked( %a, float %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vf_nxv16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vfnmacc.vf v16, fa0, v8 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -1607,7 +1607,7 @@ define @vfnmacc_vv_nxv1f64_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv1f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vfnmacc.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1659,7 +1659,7 @@ define @vfnmacc_vf_nxv1f64_unmasked( %a, double %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vf_nxv1f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vfnmacc.vf v9, fa0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1750,7 +1750,7 @@ define @vfnmacc_vv_nxv2f64_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vfnmacc.vv v12, v8, v10 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -1802,7 +1802,7 @@ define @vfnmacc_vf_nxv2f64_unmasked( %a, double %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vf_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vfnmacc.vf v10, fa0, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -1893,7 +1893,7 @@ define @vfnmacc_vv_nxv4f64_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vv_nxv4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vfnmacc.vv v16, v8, v12 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -1945,7 +1945,7 @@ define @vfnmacc_vf_nxv4f64_unmasked( %a, double %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vf_nxv4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vfnmacc.vf v12, fa0, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -2038,7 +2038,7 @@ ; CHECK-LABEL: vfnmacc_vv_nxv8f64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, ma ; CHECK-NEXT: vfnmacc.vv v24, v8, v16 ; CHECK-NEXT: vmv8r.v v8, v24 ; CHECK-NEXT: ret @@ -2090,7 +2090,7 @@ define @vfnmacc_vf_nxv8f64_unmasked( %a, double %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmacc_vf_nxv8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vfnmacc.vf v16, fa0, v8 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmacc.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmacc.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfnmacc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmacc.ll @@ -13,7 +13,7 @@ define @intrinsic_vfnmacc_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfnmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -60,7 +60,7 @@ define @intrinsic_vfnmacc_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfnmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -107,7 +107,7 @@ define @intrinsic_vfnmacc_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfnmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -154,7 +154,7 @@ define @intrinsic_vfnmacc_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfnmacc.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -201,7 +201,7 @@ define @intrinsic_vfnmacc_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfnmacc.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -248,7 +248,7 @@ define @intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfnmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -295,7 +295,7 @@ define @intrinsic_vfnmacc_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfnmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -342,7 +342,7 @@ define @intrinsic_vfnmacc_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfnmacc.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -389,7 +389,7 @@ define @intrinsic_vfnmacc_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfnmacc.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -436,7 +436,7 @@ define @intrinsic_vfnmacc_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vfnmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -483,7 +483,7 @@ define @intrinsic_vfnmacc_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vfnmacc.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -530,7 +530,7 @@ define @intrinsic_vfnmacc_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vfnmacc.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -577,7 +577,7 @@ define @intrinsic_vfnmacc_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfnmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -624,7 +624,7 @@ define @intrinsic_vfnmacc_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfnmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -671,7 +671,7 @@ define @intrinsic_vfnmacc_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfnmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -718,7 +718,7 @@ define @intrinsic_vfnmacc_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfnmacc.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: @@ -765,7 +765,7 @@ define @intrinsic_vfnmacc_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfnmacc.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: @@ -812,7 +812,7 @@ define @intrinsic_vfnmacc_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfnmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vfnmacc_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfnmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vfnmacc_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfnmacc.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vfnmacc_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfnmacc.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vfnmacc_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vfnmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -1047,7 +1047,7 @@ define @intrinsic_vfnmacc_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vfnmacc.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: @@ -1094,7 +1094,7 @@ define @intrinsic_vfnmacc_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vfnmacc.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-sdnode.ll @@ -12,7 +12,7 @@ define @vfnmsub_vv_nxv1f16( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %neg = fneg %va @@ -24,7 +24,7 @@ define @vfnmsub_vf_nxv1f16( %va, %vb, half %c) { ; CHECK-LABEL: vfnmsub_vf_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -40,7 +40,7 @@ define @vfnmsub_vv_nxv2f16( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v10, v9 ; CHECK-NEXT: ret %neg = fneg %va @@ -52,7 +52,7 @@ define @vfnmsub_vf_nxv2f16( %va, %vb, half %c) { ; CHECK-LABEL: vfnmsub_vf_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -68,7 +68,7 @@ define @vfnmsub_vv_nxv4f16( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %neg = fneg %vb @@ -80,7 +80,7 @@ define @vfnmsub_vf_nxv4f16( %va, %vb, half %c) { ; CHECK-LABEL: vfnmsub_vf_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -96,7 +96,7 @@ define @vfnmsub_vv_nxv8f16( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfnmacc.vv v8, v12, v10 ; CHECK-NEXT: ret %neg = fneg %vb @@ -108,7 +108,7 @@ define @vfnmsub_vf_nxv8f16( %va, %vb, half %c) { ; CHECK-LABEL: vfnmsub_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfnmacc.vf v8, fa0, v10 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -124,7 +124,7 @@ define @vfnmsub_vv_nxv16f16( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v16, v12 ; CHECK-NEXT: ret %neg = fneg %vc @@ -136,7 +136,7 @@ define @vfnmsub_vf_nxv16f16( %va, %vb, half %c) { ; CHECK-LABEL: vfnmsub_vf_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -153,7 +153,7 @@ ; CHECK-LABEL: vfnmsub_vv_nxv32f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v24, v16 ; CHECK-NEXT: ret %neg = fneg %vc @@ -165,7 +165,7 @@ define @vfnmsub_vf_nxv32f16( %va, %vb, half %c) { ; CHECK-LABEL: vfnmsub_vf_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vfnmacc.vf v8, fa0, v16 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -181,7 +181,7 @@ define @vfnmsub_vv_nxv1f32( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %neg = fneg %vb @@ -193,7 +193,7 @@ define @vfnmsub_vf_nxv1f32( %va, %vb, float %c) { ; CHECK-LABEL: vfnmsub_vf_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement poison, float %c, i32 0 @@ -209,7 +209,7 @@ define @vfnmsub_vv_nxv2f32( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v10, v9 ; CHECK-NEXT: ret %neg = fneg %vc @@ -221,7 +221,7 @@ define @vfnmsub_vf_nxv2f32( %va, %vb, float %c) { ; CHECK-LABEL: vfnmsub_vf_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement poison, float %c, i32 0 @@ -237,7 +237,7 @@ define @vfnmsub_vv_nxv4f32( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v10, v12 ; CHECK-NEXT: ret %neg = fneg %va @@ -249,7 +249,7 @@ define @vfnmsub_vf_nxv4f32( %va, %vb, float %c) { ; CHECK-LABEL: vfnmsub_vf_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret %head = insertelement poison, float %c, i32 0 @@ -265,7 +265,7 @@ define @vfnmsub_vv_nxv8f32( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfnmacc.vv v8, v16, v12 ; CHECK-NEXT: ret %neg = fneg %vc @@ -277,7 +277,7 @@ define @vfnmsub_vf_nxv8f32( %va, %vb, float %c) { ; CHECK-LABEL: vfnmsub_vf_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfnmacc.vf v8, fa0, v12 ; CHECK-NEXT: ret %head = insertelement poison, float %c, i32 0 @@ -294,7 +294,7 @@ ; CHECK-LABEL: vfnmsub_vv_nxv16f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v24, v16 ; CHECK-NEXT: ret %neg = fneg %va @@ -306,7 +306,7 @@ define @vfnmsub_vf_nxv16f32( %va, %vb, float %c) { ; CHECK-LABEL: vfnmsub_vf_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v16 ; CHECK-NEXT: ret %head = insertelement poison, float %c, i32 0 @@ -322,7 +322,7 @@ define @vfnmsub_vv_nxv1f64( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfnmacc.vv v8, v10, v9 ; CHECK-NEXT: ret %neg = fneg %vb @@ -334,7 +334,7 @@ define @vfnmsub_vf_nxv1f64( %va, %vb, double %c) { ; CHECK-LABEL: vfnmsub_vf_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement poison, double %c, i32 0 @@ -350,7 +350,7 @@ define @vfnmsub_vv_nxv2f64( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v12, v10 ; CHECK-NEXT: ret %neg = fneg %va @@ -362,7 +362,7 @@ define @vfnmsub_vf_nxv2f64( %va, %vb, double %c) { ; CHECK-LABEL: vfnmsub_vf_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret %head = insertelement poison, double %c, i32 0 @@ -378,7 +378,7 @@ define @vfnmsub_vv_nxv4f64( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vfnmadd.vv v8, v12, v16 ; CHECK-NEXT: ret %neg = fneg %vb @@ -390,7 +390,7 @@ define @vfnmsub_vf_nxv4f64( %va, %vb, double %c) { ; CHECK-LABEL: vfnmsub_vf_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret %head = insertelement poison, double %c, i32 0 @@ -407,7 +407,7 @@ ; CHECK-LABEL: vfnmsub_vv_nxv8f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfnmacc.vv v8, v16, v24 ; CHECK-NEXT: ret %neg = fneg %vb @@ -419,7 +419,7 @@ define @vfnmsub_vf_nxv8f64( %va, %vb, double %c) { ; CHECK-LABEL: vfnmsub_vf_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfnmacc.vf v8, fa0, v16 ; CHECK-NEXT: ret %head = insertelement poison, double %c, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfnmadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmadd.ll @@ -13,7 +13,7 @@ define @intrinsic_vfnmadd_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -60,7 +60,7 @@ define @intrinsic_vfnmadd_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -107,7 +107,7 @@ define @intrinsic_vfnmadd_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -154,7 +154,7 @@ define @intrinsic_vfnmadd_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfnmadd.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -201,7 +201,7 @@ define @intrinsic_vfnmadd_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfnmadd.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -248,7 +248,7 @@ define @intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -295,7 +295,7 @@ define @intrinsic_vfnmadd_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -342,7 +342,7 @@ define @intrinsic_vfnmadd_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfnmadd.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -389,7 +389,7 @@ define @intrinsic_vfnmadd_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfnmadd.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -436,7 +436,7 @@ define @intrinsic_vfnmadd_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vfnmadd.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -483,7 +483,7 @@ define @intrinsic_vfnmadd_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vfnmadd.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -530,7 +530,7 @@ define @intrinsic_vfnmadd_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vfnmadd.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -577,7 +577,7 @@ define @intrinsic_vfnmadd_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -624,7 +624,7 @@ define @intrinsic_vfnmadd_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -671,7 +671,7 @@ define @intrinsic_vfnmadd_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -718,7 +718,7 @@ define @intrinsic_vfnmadd_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: @@ -765,7 +765,7 @@ define @intrinsic_vfnmadd_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: @@ -812,7 +812,7 @@ define @intrinsic_vfnmadd_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vfnmadd_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vfnmadd_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vfnmadd_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vfnmadd_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -1047,7 +1047,7 @@ define @intrinsic_vfnmadd_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: @@ -1094,7 +1094,7 @@ define @intrinsic_vfnmadd_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-vp.ll @@ -27,7 +27,7 @@ define @vfnmsac_vv_nxv1f16_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv1f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfnmsac.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -76,7 +76,7 @@ define @vfnmsac_vf_nxv1f16_unmasked( %a, half %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vf_nxv1f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfnmsac.vf v9, fa0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -162,7 +162,7 @@ define @vfnmsac_vv_nxv2f16_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfnmsac.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -211,7 +211,7 @@ define @vfnmsac_vf_nxv2f16_unmasked( %a, half %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vf_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfnmsac.vf v9, fa0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -297,7 +297,7 @@ define @vfnmsac_vv_nxv4f16_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfnmsac.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -346,7 +346,7 @@ define @vfnmsac_vf_nxv4f16_unmasked( %a, half %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vf_nxv4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfnmsac.vf v9, fa0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -432,7 +432,7 @@ define @vfnmsac_vv_nxv8f16_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfnmsac.vv v12, v8, v10 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -481,7 +481,7 @@ define @vfnmsac_vf_nxv8f16_unmasked( %a, half %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vf_nxv8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfnmsac.vf v10, fa0, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -567,7 +567,7 @@ define @vfnmsac_vv_nxv16f16_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfnmsac.vv v16, v8, v12 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -616,7 +616,7 @@ define @vfnmsac_vf_nxv16f16_unmasked( %a, half %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vf_nxv16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfnmsac.vf v12, fa0, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -704,7 +704,7 @@ ; CHECK-LABEL: vfnmsac_vv_nxv32f16_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, ma ; CHECK-NEXT: vfnmsac.vv v24, v8, v16 ; CHECK-NEXT: vmv8r.v v8, v24 ; CHECK-NEXT: ret @@ -753,7 +753,7 @@ define @vfnmsac_vf_nxv32f16_unmasked( %a, half %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vf_nxv32f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vfnmsac.vf v16, fa0, v8 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -840,7 +840,7 @@ define @vfnmsac_vv_nxv1f32_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv1f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfnmsac.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -889,7 +889,7 @@ define @vfnmsac_vf_nxv1f32_unmasked( %a, float %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vf_nxv1f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfnmsac.vf v9, fa0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -975,7 +975,7 @@ define @vfnmsac_vv_nxv2f32_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfnmsac.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1024,7 +1024,7 @@ define @vfnmsac_vf_nxv2f32_unmasked( %a, float %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vf_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfnmsac.vf v9, fa0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1110,7 +1110,7 @@ define @vfnmsac_vv_nxv4f32_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfnmsac.vv v12, v8, v10 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -1159,7 +1159,7 @@ define @vfnmsac_vf_nxv4f32_unmasked( %a, float %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vf_nxv4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfnmsac.vf v10, fa0, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -1245,7 +1245,7 @@ define @vfnmsac_vv_nxv8f32_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfnmsac.vv v16, v8, v12 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -1294,7 +1294,7 @@ define @vfnmsac_vf_nxv8f32_unmasked( %a, float %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vf_nxv8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfnmsac.vf v12, fa0, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -1382,7 +1382,7 @@ ; CHECK-LABEL: vfnmsac_vv_nxv16f32_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma ; CHECK-NEXT: vfnmsac.vv v24, v8, v16 ; CHECK-NEXT: vmv8r.v v8, v24 ; CHECK-NEXT: ret @@ -1431,7 +1431,7 @@ define @vfnmsac_vf_nxv16f32_unmasked( %a, float %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vf_nxv16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vfnmsac.vf v16, fa0, v8 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -1518,7 +1518,7 @@ define @vfnmsac_vv_nxv1f64_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv1f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vfnmsac.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1567,7 +1567,7 @@ define @vfnmsac_vf_nxv1f64_unmasked( %a, double %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vf_nxv1f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vfnmsac.vf v9, fa0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1653,7 +1653,7 @@ define @vfnmsac_vv_nxv2f64_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vfnmsac.vv v12, v8, v10 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -1702,7 +1702,7 @@ define @vfnmsac_vf_nxv2f64_unmasked( %a, double %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vf_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vfnmsac.vf v10, fa0, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -1788,7 +1788,7 @@ define @vfnmsac_vv_nxv4f64_unmasked( %a, %b, %c, %m, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vv_nxv4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vfnmsac.vv v16, v8, v12 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -1837,7 +1837,7 @@ define @vfnmsac_vf_nxv4f64_unmasked( %a, double %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vf_nxv4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vfnmsac.vf v12, fa0, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -1925,7 +1925,7 @@ ; CHECK-LABEL: vfnmsac_vv_nxv8f64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, ma ; CHECK-NEXT: vfnmsac.vv v24, v8, v16 ; CHECK-NEXT: vmv8r.v v8, v24 ; CHECK-NEXT: ret @@ -1974,7 +1974,7 @@ define @vfnmsac_vf_nxv8f64_unmasked( %a, double %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: vfnmsac_vf_nxv8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vfnmsac.vf v16, fa0, v8 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsac.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsac.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfnmsac.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsac.ll @@ -13,7 +13,7 @@ define @intrinsic_vfnmsac_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfnmsac.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -60,7 +60,7 @@ define @intrinsic_vfnmsac_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfnmsac.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -107,7 +107,7 @@ define @intrinsic_vfnmsac_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfnmsac.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -154,7 +154,7 @@ define @intrinsic_vfnmsac_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfnmsac.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -201,7 +201,7 @@ define @intrinsic_vfnmsac_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfnmsac.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -248,7 +248,7 @@ define @intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfnmsac.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -295,7 +295,7 @@ define @intrinsic_vfnmsac_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfnmsac.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -342,7 +342,7 @@ define @intrinsic_vfnmsac_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfnmsac.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -389,7 +389,7 @@ define @intrinsic_vfnmsac_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfnmsac.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -436,7 +436,7 @@ define @intrinsic_vfnmsac_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vfnmsac.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -483,7 +483,7 @@ define @intrinsic_vfnmsac_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vfnmsac.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -530,7 +530,7 @@ define @intrinsic_vfnmsac_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vfnmsac.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -577,7 +577,7 @@ define @intrinsic_vfnmsac_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfnmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -624,7 +624,7 @@ define @intrinsic_vfnmsac_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfnmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -671,7 +671,7 @@ define @intrinsic_vfnmsac_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfnmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -718,7 +718,7 @@ define @intrinsic_vfnmsac_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfnmsac.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: @@ -765,7 +765,7 @@ define @intrinsic_vfnmsac_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfnmsac.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: @@ -812,7 +812,7 @@ define @intrinsic_vfnmsac_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfnmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vfnmsac_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfnmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vfnmsac_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfnmsac.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vfnmsac_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfnmsac.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vfnmsac_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vfnmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -1047,7 +1047,7 @@ define @intrinsic_vfnmsac_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vfnmsac.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: @@ -1094,7 +1094,7 @@ define @intrinsic_vfnmsac_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vfnmsac.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-sdnode.ll @@ -12,7 +12,7 @@ define @vfnmsub_vv_nxv1f16( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfnmsub.vv v8, v9, v10 ; CHECK-NEXT: ret %neg = fneg %va @@ -23,7 +23,7 @@ define @vfnmsub_vf_nxv1f16( %va, %vb, half %c) { ; CHECK-LABEL: vfnmsub_vf_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -38,7 +38,7 @@ define @vfnmsub_vv_nxv2f16( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfnmsub.vv v8, v10, v9 ; CHECK-NEXT: ret %neg = fneg %va @@ -49,7 +49,7 @@ define @vfnmsub_vf_nxv2f16( %va, %vb, half %c) { ; CHECK-LABEL: vfnmsub_vf_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -64,7 +64,7 @@ define @vfnmsub_vv_nxv4f16( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfnmsub.vv v8, v9, v10 ; CHECK-NEXT: ret %neg = fneg %vb @@ -75,7 +75,7 @@ define @vfnmsub_vf_nxv4f16( %va, %vb, half %c) { ; CHECK-LABEL: vfnmsub_vf_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -90,7 +90,7 @@ define @vfnmsub_vv_nxv8f16( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfnmsac.vv v8, v12, v10 ; CHECK-NEXT: ret %neg = fneg %vb @@ -101,7 +101,7 @@ define @vfnmsub_vf_nxv8f16( %va, %vb, half %c) { ; CHECK-LABEL: vfnmsub_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfnmsac.vf v8, fa0, v10 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -116,7 +116,7 @@ define @vfnmsub_vv_nxv16f16( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfnmsub.vv v8, v16, v12 ; CHECK-NEXT: ret %neg = fneg %vc @@ -127,7 +127,7 @@ define @vfnmsub_vf_nxv16f16( %va, %vb, half %c) { ; CHECK-LABEL: vfnmsub_vf_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -143,7 +143,7 @@ ; CHECK-LABEL: vfnmsub_vv_nxv32f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vfnmsub.vv v8, v24, v16 ; CHECK-NEXT: ret %neg = fneg %vc @@ -154,7 +154,7 @@ define @vfnmsub_vf_nxv32f16( %va, %vb, half %c) { ; CHECK-LABEL: vfnmsub_vf_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vfnmsac.vf v8, fa0, v16 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -169,7 +169,7 @@ define @vfnmsub_vv_nxv1f32( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfnmsub.vv v8, v9, v10 ; CHECK-NEXT: ret %neg = fneg %vb @@ -180,7 +180,7 @@ define @vfnmsub_vf_nxv1f32( %va, %vb, float %c) { ; CHECK-LABEL: vfnmsub_vf_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement poison, float %c, i32 0 @@ -195,7 +195,7 @@ define @vfnmsub_vv_nxv2f32( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfnmsub.vv v8, v10, v9 ; CHECK-NEXT: ret %neg = fneg %vc @@ -206,7 +206,7 @@ define @vfnmsub_vf_nxv2f32( %va, %vb, float %c) { ; CHECK-LABEL: vfnmsub_vf_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement poison, float %c, i32 0 @@ -221,7 +221,7 @@ define @vfnmsub_vv_nxv4f32( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfnmsub.vv v8, v10, v12 ; CHECK-NEXT: ret %neg = fneg %va @@ -232,7 +232,7 @@ define @vfnmsub_vf_nxv4f32( %va, %vb, float %c) { ; CHECK-LABEL: vfnmsub_vf_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret %head = insertelement poison, float %c, i32 0 @@ -247,7 +247,7 @@ define @vfnmsub_vv_nxv8f32( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfnmsac.vv v8, v16, v12 ; CHECK-NEXT: ret %neg = fneg %vc @@ -258,7 +258,7 @@ define @vfnmsub_vf_nxv8f32( %va, %vb, float %c) { ; CHECK-LABEL: vfnmsub_vf_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfnmsac.vf v8, fa0, v12 ; CHECK-NEXT: ret %head = insertelement poison, float %c, i32 0 @@ -274,7 +274,7 @@ ; CHECK-LABEL: vfnmsub_vv_nxv16f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vfnmsub.vv v8, v24, v16 ; CHECK-NEXT: ret %neg = fneg %va @@ -285,7 +285,7 @@ define @vfnmsub_vf_nxv16f32( %va, %vb, float %c) { ; CHECK-LABEL: vfnmsub_vf_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v16 ; CHECK-NEXT: ret %head = insertelement poison, float %c, i32 0 @@ -300,7 +300,7 @@ define @vfnmsub_vv_nxv1f64( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfnmsac.vv v8, v10, v9 ; CHECK-NEXT: ret %neg = fneg %vb @@ -311,7 +311,7 @@ define @vfnmsub_vf_nxv1f64( %va, %vb, double %c) { ; CHECK-LABEL: vfnmsub_vf_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement poison, double %c, i32 0 @@ -326,7 +326,7 @@ define @vfnmsub_vv_nxv2f64( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vfnmsub.vv v8, v12, v10 ; CHECK-NEXT: ret %neg = fneg %va @@ -337,7 +337,7 @@ define @vfnmsub_vf_nxv2f64( %va, %vb, double %c) { ; CHECK-LABEL: vfnmsub_vf_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret %head = insertelement poison, double %c, i32 0 @@ -352,7 +352,7 @@ define @vfnmsub_vv_nxv4f64( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vfnmsub.vv v8, v12, v16 ; CHECK-NEXT: ret %neg = fneg %vb @@ -363,7 +363,7 @@ define @vfnmsub_vf_nxv4f64( %va, %vb, double %c) { ; CHECK-LABEL: vfnmsub_vf_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret %head = insertelement poison, double %c, i32 0 @@ -379,7 +379,7 @@ ; CHECK-LABEL: vfnmsub_vv_nxv8f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfnmsac.vv v8, v16, v24 ; CHECK-NEXT: ret %neg = fneg %vb @@ -390,7 +390,7 @@ define @vfnmsub_vf_nxv8f64( %va, %vb, double %c) { ; CHECK-LABEL: vfnmsub_vf_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfnmsac.vf v8, fa0, v16 ; CHECK-NEXT: ret %head = insertelement poison, double %c, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfnmsub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsub.ll @@ -13,7 +13,7 @@ define @intrinsic_vfnmsub_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfnmsub.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -60,7 +60,7 @@ define @intrinsic_vfnmsub_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfnmsub.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -107,7 +107,7 @@ define @intrinsic_vfnmsub_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfnmsub.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -154,7 +154,7 @@ define @intrinsic_vfnmsub_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfnmsub.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -201,7 +201,7 @@ define @intrinsic_vfnmsub_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfnmsub.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -248,7 +248,7 @@ define @intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfnmsub.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -295,7 +295,7 @@ define @intrinsic_vfnmsub_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfnmsub.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -342,7 +342,7 @@ define @intrinsic_vfnmsub_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfnmsub.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -389,7 +389,7 @@ define @intrinsic_vfnmsub_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfnmsub.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -436,7 +436,7 @@ define @intrinsic_vfnmsub_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vfnmsub.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -483,7 +483,7 @@ define @intrinsic_vfnmsub_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vfnmsub.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -530,7 +530,7 @@ define @intrinsic_vfnmsub_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vfnmsub.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -577,7 +577,7 @@ define @intrinsic_vfnmsub_vf_nxv1f16_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f16_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -624,7 +624,7 @@ define @intrinsic_vfnmsub_vf_nxv2f16_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f16_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -671,7 +671,7 @@ define @intrinsic_vfnmsub_vf_nxv4f16_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f16_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -718,7 +718,7 @@ define @intrinsic_vfnmsub_vf_nxv8f16_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv8f16_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: @@ -765,7 +765,7 @@ define @intrinsic_vfnmsub_vf_nxv16f16_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv16f16_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: @@ -812,7 +812,7 @@ define @intrinsic_vfnmsub_vf_nxv1f32_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f32_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vfnmsub_vf_nxv2f32_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f32_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vfnmsub_vf_nxv4f32_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f32_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vfnmsub_vf_nxv8f32_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv8f32_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vfnmsub_vf_nxv1f64_f64_nxv1f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f64_f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -1047,7 +1047,7 @@ define @intrinsic_vfnmsub_vf_nxv2f64_f64_nxv2f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f64_f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: @@ -1094,7 +1094,7 @@ define @intrinsic_vfnmsub_vf_nxv4f64_f64_nxv4f64( %0, double %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f64_f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfpext-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfpext-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfpext-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfpext-sdnode.ll @@ -8,7 +8,7 @@ ; ; CHECK-LABEL: vfpext_nxv1f16_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -20,9 +20,9 @@ ; ; CHECK-LABEL: vfpext_nxv1f16_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v8, v9 ; CHECK-NEXT: ret %evec = fpext %va to @@ -33,7 +33,7 @@ ; ; CHECK-LABEL: vfpext_nxv2f16_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -45,9 +45,9 @@ ; ; CHECK-LABEL: vfpext_nxv2f16_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v8, v10 ; CHECK-NEXT: ret %evec = fpext %va to @@ -58,7 +58,7 @@ ; ; CHECK-LABEL: vfpext_nxv4f16_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -70,9 +70,9 @@ ; ; CHECK-LABEL: vfpext_nxv4f16_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v8, v12 ; CHECK-NEXT: ret %evec = fpext %va to @@ -83,7 +83,7 @@ ; ; CHECK-LABEL: vfpext_nxv8f16_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v12, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -95,9 +95,9 @@ ; ; CHECK-LABEL: vfpext_nxv8f16_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v16, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v8, v16 ; CHECK-NEXT: ret %evec = fpext %va to @@ -108,7 +108,7 @@ ; ; CHECK-LABEL: vfpext_nxv16f16_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v16, v8 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -120,7 +120,7 @@ ; ; CHECK-LABEL: vfpext_nxv1f32_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -132,7 +132,7 @@ ; ; CHECK-LABEL: vfpext_nxv2f32_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -144,7 +144,7 @@ ; ; CHECK-LABEL: vfpext_nxv4f32_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v12, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -156,7 +156,7 @@ ; ; CHECK-LABEL: vfpext_nxv8f32_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v16, v8 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll @@ -18,7 +18,7 @@ define @vfpext_nxv2f16_nxv2f32_unmasked( %a, i32 zeroext %vl) { ; CHECK-LABEL: vfpext_nxv2f16_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -43,9 +43,9 @@ define @vfpext_nxv2f16_nxv2f64_unmasked( %a, i32 zeroext %vl) { ; CHECK-LABEL: vfpext_nxv2f16_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v8, v10 ; CHECK-NEXT: ret %v = call @llvm.vp.fpext.nxv2f64.nxv2f16( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) @@ -68,7 +68,7 @@ define @vfpext_nxv2f32_nxv2f64_unmasked( %a, i32 zeroext %vl) { ; CHECK-LABEL: vfpext_nxv2f32_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -98,7 +98,7 @@ ; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a4, a1, 2 -; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma ; CHECK-NEXT: slli a1, a1, 1 ; CHECK-NEXT: sub a3, a0, a1 ; CHECK-NEXT: vslidedown.vx v0, v0, a4 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll @@ -7,7 +7,7 @@ define @vfptosi_nxv1f16_nxv1i1( %va) { ; CHECK-LABEL: vfptosi_nxv1f16_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -19,7 +19,7 @@ define @vfptosi_nxv1f16_nxv1i7( %va) { ; CHECK-LABEL: vfptosi_nxv1f16_nxv1i7: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -30,7 +30,7 @@ define @vfptoui_nxv1f16_nxv1i7( %va) { ; CHECK-LABEL: vfptoui_nxv1f16_nxv1i7: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -41,7 +41,7 @@ define @vfptoui_nxv1f16_nxv1i1( %va) { ; CHECK-LABEL: vfptoui_nxv1f16_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -53,7 +53,7 @@ define @vfptosi_nxv1f16_nxv1i8( %va) { ; CHECK-LABEL: vfptosi_nxv1f16_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -64,7 +64,7 @@ define @vfptoui_nxv1f16_nxv1i8( %va) { ; CHECK-LABEL: vfptoui_nxv1f16_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -75,7 +75,7 @@ define @vfptosi_nxv1f16_nxv1i16( %va) { ; CHECK-LABEL: vfptosi_nxv1f16_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: ret %evec = fptosi %va to @@ -85,7 +85,7 @@ define @vfptoui_nxv1f16_nxv1i16( %va) { ; CHECK-LABEL: vfptoui_nxv1f16_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: ret %evec = fptoui %va to @@ -95,7 +95,7 @@ define @vfptosi_nxv1f16_nxv1i32( %va) { ; CHECK-LABEL: vfptosi_nxv1f16_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfwcvt.rtz.x.f.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -106,7 +106,7 @@ define @vfptoui_nxv1f16_nxv1i32( %va) { ; CHECK-LABEL: vfptoui_nxv1f16_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -117,9 +117,9 @@ define @vfptosi_nxv1f16_nxv1i64( %va) { ; CHECK-LABEL: vfptosi_nxv1f16_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v9 ; CHECK-NEXT: ret %evec = fptosi %va to @@ -129,9 +129,9 @@ define @vfptoui_nxv1f16_nxv1i64( %va) { ; CHECK-LABEL: vfptoui_nxv1f16_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v9 ; CHECK-NEXT: ret %evec = fptoui %va to @@ -141,7 +141,7 @@ define @vfptosi_nxv2f16_nxv2i1( %va) { ; CHECK-LABEL: vfptosi_nxv2f16_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -153,7 +153,7 @@ define @vfptoui_nxv2f16_nxv2i1( %va) { ; CHECK-LABEL: vfptoui_nxv2f16_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -165,7 +165,7 @@ define @vfptosi_nxv2f16_nxv2i8( %va) { ; CHECK-LABEL: vfptosi_nxv2f16_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -176,7 +176,7 @@ define @vfptoui_nxv2f16_nxv2i8( %va) { ; CHECK-LABEL: vfptoui_nxv2f16_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -187,7 +187,7 @@ define @vfptosi_nxv2f16_nxv2i16( %va) { ; CHECK-LABEL: vfptosi_nxv2f16_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: ret %evec = fptosi %va to @@ -197,7 +197,7 @@ define @vfptoui_nxv2f16_nxv2i16( %va) { ; CHECK-LABEL: vfptoui_nxv2f16_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: ret %evec = fptoui %va to @@ -207,7 +207,7 @@ define @vfptosi_nxv2f16_nxv2i32( %va) { ; CHECK-LABEL: vfptosi_nxv2f16_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfwcvt.rtz.x.f.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -218,7 +218,7 @@ define @vfptoui_nxv2f16_nxv2i32( %va) { ; CHECK-LABEL: vfptoui_nxv2f16_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -229,9 +229,9 @@ define @vfptosi_nxv2f16_nxv2i64( %va) { ; CHECK-LABEL: vfptosi_nxv2f16_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v10 ; CHECK-NEXT: ret %evec = fptosi %va to @@ -241,9 +241,9 @@ define @vfptoui_nxv2f16_nxv2i64( %va) { ; CHECK-LABEL: vfptoui_nxv2f16_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v10 ; CHECK-NEXT: ret %evec = fptoui %va to @@ -253,7 +253,7 @@ define @vfptosi_nxv4f16_nxv4i1( %va) { ; CHECK-LABEL: vfptosi_nxv4f16_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -265,7 +265,7 @@ define @vfptoui_nxv4f16_nxv4i1( %va) { ; CHECK-LABEL: vfptoui_nxv4f16_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -277,7 +277,7 @@ define @vfptosi_nxv4f16_nxv4i8( %va) { ; CHECK-LABEL: vfptosi_nxv4f16_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -288,7 +288,7 @@ define @vfptoui_nxv4f16_nxv4i8( %va) { ; CHECK-LABEL: vfptoui_nxv4f16_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -299,7 +299,7 @@ define @vfptosi_nxv4f16_nxv4i16( %va) { ; CHECK-LABEL: vfptosi_nxv4f16_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: ret %evec = fptosi %va to @@ -309,7 +309,7 @@ define @vfptoui_nxv4f16_nxv4i16( %va) { ; CHECK-LABEL: vfptoui_nxv4f16_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: ret %evec = fptoui %va to @@ -319,7 +319,7 @@ define @vfptosi_nxv4f16_nxv4i32( %va) { ; CHECK-LABEL: vfptosi_nxv4f16_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfwcvt.rtz.x.f.v v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -330,7 +330,7 @@ define @vfptoui_nxv4f16_nxv4i32( %va) { ; CHECK-LABEL: vfptoui_nxv4f16_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -341,9 +341,9 @@ define @vfptosi_nxv4f16_nxv4i64( %va) { ; CHECK-LABEL: vfptosi_nxv4f16_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v12 ; CHECK-NEXT: ret %evec = fptosi %va to @@ -353,9 +353,9 @@ define @vfptoui_nxv4f16_nxv4i64( %va) { ; CHECK-LABEL: vfptoui_nxv4f16_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v12 ; CHECK-NEXT: ret %evec = fptoui %va to @@ -365,7 +365,7 @@ define @vfptosi_nxv8f16_nxv8i1( %va) { ; CHECK-LABEL: vfptosi_nxv8f16_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 ; CHECK-NEXT: vand.vi v8, v10, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -377,7 +377,7 @@ define @vfptoui_nxv8f16_nxv8i1( %va) { ; CHECK-LABEL: vfptoui_nxv8f16_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 ; CHECK-NEXT: vand.vi v8, v10, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -389,7 +389,7 @@ define @vfptosi_nxv8f16_nxv8i8( %va) { ; CHECK-LABEL: vfptosi_nxv8f16_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -400,7 +400,7 @@ define @vfptoui_nxv8f16_nxv8i8( %va) { ; CHECK-LABEL: vfptoui_nxv8f16_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -411,7 +411,7 @@ define @vfptosi_nxv8f16_nxv8i16( %va) { ; CHECK-LABEL: vfptosi_nxv8f16_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: ret %evec = fptosi %va to @@ -421,7 +421,7 @@ define @vfptoui_nxv8f16_nxv8i16( %va) { ; CHECK-LABEL: vfptoui_nxv8f16_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: ret %evec = fptoui %va to @@ -431,7 +431,7 @@ define @vfptosi_nxv8f16_nxv8i32( %va) { ; CHECK-LABEL: vfptosi_nxv8f16_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfwcvt.rtz.x.f.v v12, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -442,7 +442,7 @@ define @vfptoui_nxv8f16_nxv8i32( %va) { ; CHECK-LABEL: vfptoui_nxv8f16_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v12, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -453,9 +453,9 @@ define @vfptosi_nxv8f16_nxv8i64( %va) { ; CHECK-LABEL: vfptosi_nxv8f16_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v16, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v16 ; CHECK-NEXT: ret %evec = fptosi %va to @@ -465,9 +465,9 @@ define @vfptoui_nxv8f16_nxv8i64( %va) { ; CHECK-LABEL: vfptoui_nxv8f16_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v16, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v16 ; CHECK-NEXT: ret %evec = fptoui %va to @@ -477,7 +477,7 @@ define @vfptosi_nxv16f16_nxv16i1( %va) { ; CHECK-LABEL: vfptosi_nxv16f16_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 ; CHECK-NEXT: vand.vi v8, v12, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -489,7 +489,7 @@ define @vfptoui_nxv16f16_nxv16i1( %va) { ; CHECK-LABEL: vfptoui_nxv16f16_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 ; CHECK-NEXT: vand.vi v8, v12, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -501,7 +501,7 @@ define @vfptosi_nxv16f16_nxv16i8( %va) { ; CHECK-LABEL: vfptosi_nxv16f16_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -512,7 +512,7 @@ define @vfptoui_nxv16f16_nxv16i8( %va) { ; CHECK-LABEL: vfptoui_nxv16f16_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -523,7 +523,7 @@ define @vfptosi_nxv16f16_nxv16i16( %va) { ; CHECK-LABEL: vfptosi_nxv16f16_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: ret %evec = fptosi %va to @@ -533,7 +533,7 @@ define @vfptoui_nxv16f16_nxv16i16( %va) { ; CHECK-LABEL: vfptoui_nxv16f16_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: ret %evec = fptoui %va to @@ -543,7 +543,7 @@ define @vfptosi_nxv16f16_nxv16i32( %va) { ; CHECK-LABEL: vfptosi_nxv16f16_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfwcvt.rtz.x.f.v v16, v8 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -554,7 +554,7 @@ define @vfptoui_nxv16f16_nxv16i32( %va) { ; CHECK-LABEL: vfptoui_nxv16f16_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v16, v8 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -565,7 +565,7 @@ define @vfptosi_nxv32f16_nxv32i1( %va) { ; CHECK-LABEL: vfptosi_nxv32f16_nxv32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 ; CHECK-NEXT: vand.vi v8, v16, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -577,7 +577,7 @@ define @vfptoui_nxv32f16_nxv32i1( %va) { ; CHECK-LABEL: vfptoui_nxv32f16_nxv32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 ; CHECK-NEXT: vand.vi v8, v16, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -589,7 +589,7 @@ define @vfptosi_nxv32f16_nxv32i8( %va) { ; CHECK-LABEL: vfptosi_nxv32f16_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -600,7 +600,7 @@ define @vfptoui_nxv32f16_nxv32i8( %va) { ; CHECK-LABEL: vfptoui_nxv32f16_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -611,7 +611,7 @@ define @vfptosi_nxv32f16_nxv32i16( %va) { ; CHECK-LABEL: vfptosi_nxv32f16_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: ret %evec = fptosi %va to @@ -621,7 +621,7 @@ define @vfptoui_nxv32f16_nxv32i16( %va) { ; CHECK-LABEL: vfptoui_nxv32f16_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: ret %evec = fptoui %va to @@ -631,7 +631,7 @@ define @vfptosi_nxv1f32_nxv1i1( %va) { ; CHECK-LABEL: vfptosi_nxv1f32_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -643,7 +643,7 @@ define @vfptoui_nxv1f32_nxv1i1( %va) { ; CHECK-LABEL: vfptoui_nxv1f32_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -655,9 +655,9 @@ define @vfptosi_nxv1f32_nxv1i8( %va) { ; CHECK-LABEL: vfptosi_nxv1f32_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v9, 0 ; CHECK-NEXT: ret %evec = fptosi %va to @@ -667,9 +667,9 @@ define @vfptoui_nxv1f32_nxv1i8( %va) { ; CHECK-LABEL: vfptoui_nxv1f32_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v9, 0 ; CHECK-NEXT: ret %evec = fptoui %va to @@ -679,7 +679,7 @@ define @vfptosi_nxv1f32_nxv1i16( %va) { ; CHECK-LABEL: vfptosi_nxv1f32_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -690,7 +690,7 @@ define @vfptoui_nxv1f32_nxv1i16( %va) { ; CHECK-LABEL: vfptoui_nxv1f32_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -701,7 +701,7 @@ define @vfptosi_nxv1f32_nxv1i32( %va) { ; CHECK-LABEL: vfptosi_nxv1f32_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: ret %evec = fptosi %va to @@ -711,7 +711,7 @@ define @vfptoui_nxv1f32_nxv1i32( %va) { ; CHECK-LABEL: vfptoui_nxv1f32_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: ret %evec = fptoui %va to @@ -721,7 +721,7 @@ define @vfptosi_nxv1f32_nxv1i64( %va) { ; CHECK-LABEL: vfptosi_nxv1f32_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwcvt.rtz.x.f.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -732,7 +732,7 @@ define @vfptoui_nxv1f32_nxv1i64( %va) { ; CHECK-LABEL: vfptoui_nxv1f32_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -743,7 +743,7 @@ define @vfptosi_nxv2f32_nxv2i1( %va) { ; CHECK-LABEL: vfptosi_nxv2f32_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -755,7 +755,7 @@ define @vfptoui_nxv2f32_nxv2i1( %va) { ; CHECK-LABEL: vfptoui_nxv2f32_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -767,9 +767,9 @@ define @vfptosi_nxv2f32_nxv2i8( %va) { ; CHECK-LABEL: vfptosi_nxv2f32_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v9, 0 ; CHECK-NEXT: ret %evec = fptosi %va to @@ -779,9 +779,9 @@ define @vfptoui_nxv2f32_nxv2i8( %va) { ; CHECK-LABEL: vfptoui_nxv2f32_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v9, 0 ; CHECK-NEXT: ret %evec = fptoui %va to @@ -791,7 +791,7 @@ define @vfptosi_nxv2f32_nxv2i16( %va) { ; CHECK-LABEL: vfptosi_nxv2f32_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -802,7 +802,7 @@ define @vfptoui_nxv2f32_nxv2i16( %va) { ; CHECK-LABEL: vfptoui_nxv2f32_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -813,7 +813,7 @@ define @vfptosi_nxv2f32_nxv2i32( %va) { ; CHECK-LABEL: vfptosi_nxv2f32_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: ret %evec = fptosi %va to @@ -823,7 +823,7 @@ define @vfptoui_nxv2f32_nxv2i32( %va) { ; CHECK-LABEL: vfptoui_nxv2f32_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: ret %evec = fptoui %va to @@ -833,7 +833,7 @@ define @vfptosi_nxv2f32_nxv2i64( %va) { ; CHECK-LABEL: vfptosi_nxv2f32_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwcvt.rtz.x.f.v v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -844,7 +844,7 @@ define @vfptoui_nxv2f32_nxv2i64( %va) { ; CHECK-LABEL: vfptoui_nxv2f32_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -855,7 +855,7 @@ define @vfptosi_nxv4f32_nxv4i1( %va) { ; CHECK-LABEL: vfptosi_nxv4f32_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 ; CHECK-NEXT: vand.vi v8, v10, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -867,7 +867,7 @@ define @vfptoui_nxv4f32_nxv4i1( %va) { ; CHECK-LABEL: vfptoui_nxv4f32_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 ; CHECK-NEXT: vand.vi v8, v10, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -879,9 +879,9 @@ define @vfptosi_nxv4f32_nxv4i8( %va) { ; CHECK-LABEL: vfptosi_nxv4f32_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-NEXT: ret %evec = fptosi %va to @@ -891,9 +891,9 @@ define @vfptoui_nxv4f32_nxv4i8( %va) { ; CHECK-LABEL: vfptoui_nxv4f32_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-NEXT: ret %evec = fptoui %va to @@ -903,7 +903,7 @@ define @vfptosi_nxv4f32_nxv4i16( %va) { ; CHECK-LABEL: vfptosi_nxv4f32_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -914,7 +914,7 @@ define @vfptoui_nxv4f32_nxv4i16( %va) { ; CHECK-LABEL: vfptoui_nxv4f32_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -925,7 +925,7 @@ define @vfptosi_nxv4f32_nxv4i32( %va) { ; CHECK-LABEL: vfptosi_nxv4f32_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: ret %evec = fptosi %va to @@ -935,7 +935,7 @@ define @vfptoui_nxv4f32_nxv4i32( %va) { ; CHECK-LABEL: vfptoui_nxv4f32_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: ret %evec = fptoui %va to @@ -945,7 +945,7 @@ define @vfptosi_nxv4f32_nxv4i64( %va) { ; CHECK-LABEL: vfptosi_nxv4f32_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfwcvt.rtz.x.f.v v12, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -956,7 +956,7 @@ define @vfptoui_nxv4f32_nxv4i64( %va) { ; CHECK-LABEL: vfptoui_nxv4f32_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v12, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -967,7 +967,7 @@ define @vfptosi_nxv8f32_nxv8i1( %va) { ; CHECK-LABEL: vfptosi_nxv8f32_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 ; CHECK-NEXT: vand.vi v8, v12, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -979,7 +979,7 @@ define @vfptoui_nxv8f32_nxv8i1( %va) { ; CHECK-LABEL: vfptoui_nxv8f32_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 ; CHECK-NEXT: vand.vi v8, v12, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -991,9 +991,9 @@ define @vfptosi_nxv8f32_nxv8i8( %va) { ; CHECK-LABEL: vfptosi_nxv8f32_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v12, 0 ; CHECK-NEXT: ret %evec = fptosi %va to @@ -1003,9 +1003,9 @@ define @vfptoui_nxv8f32_nxv8i8( %va) { ; CHECK-LABEL: vfptoui_nxv8f32_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v12, 0 ; CHECK-NEXT: ret %evec = fptoui %va to @@ -1015,7 +1015,7 @@ define @vfptosi_nxv8f32_nxv8i16( %va) { ; CHECK-LABEL: vfptosi_nxv8f32_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1026,7 +1026,7 @@ define @vfptoui_nxv8f32_nxv8i16( %va) { ; CHECK-LABEL: vfptoui_nxv8f32_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1037,7 +1037,7 @@ define @vfptosi_nxv8f32_nxv8i32( %va) { ; CHECK-LABEL: vfptosi_nxv8f32_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: ret %evec = fptosi %va to @@ -1047,7 +1047,7 @@ define @vfptoui_nxv8f32_nxv8i32( %va) { ; CHECK-LABEL: vfptoui_nxv8f32_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: ret %evec = fptoui %va to @@ -1057,7 +1057,7 @@ define @vfptosi_nxv8f32_nxv8i64( %va) { ; CHECK-LABEL: vfptosi_nxv8f32_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfwcvt.rtz.x.f.v v16, v8 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -1068,7 +1068,7 @@ define @vfptoui_nxv8f32_nxv8i64( %va) { ; CHECK-LABEL: vfptoui_nxv8f32_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v16, v8 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -1079,7 +1079,7 @@ define @vfptosi_nxv16f32_nxv16i1( %va) { ; CHECK-LABEL: vfptosi_nxv16f32_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 ; CHECK-NEXT: vand.vi v8, v16, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -1091,7 +1091,7 @@ define @vfptoui_nxv16f32_nxv16i1( %va) { ; CHECK-LABEL: vfptoui_nxv16f32_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 ; CHECK-NEXT: vand.vi v8, v16, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -1103,9 +1103,9 @@ define @vfptosi_nxv16f32_nxv16i8( %va) { ; CHECK-LABEL: vfptosi_nxv16f32_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 -; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v16, 0 ; CHECK-NEXT: ret %evec = fptosi %va to @@ -1115,9 +1115,9 @@ define @vfptoui_nxv16f32_nxv16i8( %va) { ; CHECK-LABEL: vfptoui_nxv16f32_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 -; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v16, 0 ; CHECK-NEXT: ret %evec = fptoui %va to @@ -1127,7 +1127,7 @@ define @vfptosi_nxv16f32_nxv16i16( %va) { ; CHECK-LABEL: vfptosi_nxv16f32_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1138,7 +1138,7 @@ define @vfptoui_nxv16f32_nxv16i16( %va) { ; CHECK-LABEL: vfptoui_nxv16f32_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1149,7 +1149,7 @@ define @vfptosi_nxv16f32_nxv16i32( %va) { ; CHECK-LABEL: vfptosi_nxv16f32_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: ret %evec = fptosi %va to @@ -1159,7 +1159,7 @@ define @vfptoui_nxv16f32_nxv16i32( %va) { ; CHECK-LABEL: vfptoui_nxv16f32_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: ret %evec = fptoui %va to @@ -1169,7 +1169,7 @@ define @vfptosi_nxv1f64_nxv1i1( %va) { ; CHECK-LABEL: vfptosi_nxv1f64_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -1181,7 +1181,7 @@ define @vfptoui_nxv1f64_nxv1i1( %va) { ; CHECK-LABEL: vfptoui_nxv1f64_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vand.vi v8, v9, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -1193,11 +1193,11 @@ define @vfptosi_nxv1f64_nxv1i8( %va) { ; CHECK-LABEL: vfptosi_nxv1f64_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v9, 0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %evec = fptosi %va to @@ -1207,11 +1207,11 @@ define @vfptoui_nxv1f64_nxv1i8( %va) { ; CHECK-LABEL: vfptoui_nxv1f64_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v9, 0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %evec = fptoui %va to @@ -1221,9 +1221,9 @@ define @vfptosi_nxv1f64_nxv1i16( %va) { ; CHECK-LABEL: vfptosi_nxv1f64_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v9, 0 ; CHECK-NEXT: ret %evec = fptosi %va to @@ -1233,9 +1233,9 @@ define @vfptoui_nxv1f64_nxv1i16( %va) { ; CHECK-LABEL: vfptoui_nxv1f64_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v9, 0 ; CHECK-NEXT: ret %evec = fptoui %va to @@ -1245,7 +1245,7 @@ define @vfptosi_nxv1f64_nxv1i32( %va) { ; CHECK-LABEL: vfptosi_nxv1f64_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1256,7 +1256,7 @@ define @vfptoui_nxv1f64_nxv1i32( %va) { ; CHECK-LABEL: vfptoui_nxv1f64_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1267,7 +1267,7 @@ define @vfptosi_nxv1f64_nxv1i64( %va) { ; CHECK-LABEL: vfptosi_nxv1f64_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: ret %evec = fptosi %va to @@ -1277,7 +1277,7 @@ define @vfptoui_nxv1f64_nxv1i64( %va) { ; CHECK-LABEL: vfptoui_nxv1f64_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: ret %evec = fptoui %va to @@ -1287,7 +1287,7 @@ define @vfptosi_nxv2f64_nxv2i1( %va) { ; CHECK-LABEL: vfptosi_nxv2f64_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 ; CHECK-NEXT: vand.vi v8, v10, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -1299,7 +1299,7 @@ define @vfptoui_nxv2f64_nxv2i1( %va) { ; CHECK-LABEL: vfptoui_nxv2f64_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 ; CHECK-NEXT: vand.vi v8, v10, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -1311,11 +1311,11 @@ define @vfptosi_nxv2f64_nxv2i8( %va) { ; CHECK-LABEL: vfptosi_nxv2f64_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %evec = fptosi %va to @@ -1325,11 +1325,11 @@ define @vfptoui_nxv2f64_nxv2i8( %va) { ; CHECK-LABEL: vfptoui_nxv2f64_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %evec = fptoui %va to @@ -1339,9 +1339,9 @@ define @vfptosi_nxv2f64_nxv2i16( %va) { ; CHECK-LABEL: vfptosi_nxv2f64_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-NEXT: ret %evec = fptosi %va to @@ -1351,9 +1351,9 @@ define @vfptoui_nxv2f64_nxv2i16( %va) { ; CHECK-LABEL: vfptoui_nxv2f64_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-NEXT: ret %evec = fptoui %va to @@ -1363,7 +1363,7 @@ define @vfptosi_nxv2f64_nxv2i32( %va) { ; CHECK-LABEL: vfptosi_nxv2f64_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1374,7 +1374,7 @@ define @vfptoui_nxv2f64_nxv2i32( %va) { ; CHECK-LABEL: vfptoui_nxv2f64_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1385,7 +1385,7 @@ define @vfptosi_nxv2f64_nxv2i64( %va) { ; CHECK-LABEL: vfptosi_nxv2f64_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: ret %evec = fptosi %va to @@ -1395,7 +1395,7 @@ define @vfptoui_nxv2f64_nxv2i64( %va) { ; CHECK-LABEL: vfptoui_nxv2f64_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: ret %evec = fptoui %va to @@ -1405,7 +1405,7 @@ define @vfptosi_nxv4f64_nxv4i1( %va) { ; CHECK-LABEL: vfptosi_nxv4f64_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 ; CHECK-NEXT: vand.vi v8, v12, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -1417,7 +1417,7 @@ define @vfptoui_nxv4f64_nxv4i1( %va) { ; CHECK-LABEL: vfptoui_nxv4f64_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 ; CHECK-NEXT: vand.vi v8, v12, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -1429,11 +1429,11 @@ define @vfptosi_nxv4f64_nxv4i8( %va) { ; CHECK-LABEL: vfptosi_nxv4f64_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v12, 0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %evec = fptosi %va to @@ -1443,11 +1443,11 @@ define @vfptoui_nxv4f64_nxv4i8( %va) { ; CHECK-LABEL: vfptoui_nxv4f64_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v12, 0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %evec = fptoui %va to @@ -1457,9 +1457,9 @@ define @vfptosi_nxv4f64_nxv4i16( %va) { ; CHECK-LABEL: vfptosi_nxv4f64_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v12, 0 ; CHECK-NEXT: ret %evec = fptosi %va to @@ -1469,9 +1469,9 @@ define @vfptoui_nxv4f64_nxv4i16( %va) { ; CHECK-LABEL: vfptoui_nxv4f64_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v12, 0 ; CHECK-NEXT: ret %evec = fptoui %va to @@ -1481,7 +1481,7 @@ define @vfptosi_nxv4f64_nxv4i32( %va) { ; CHECK-LABEL: vfptosi_nxv4f64_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1492,7 +1492,7 @@ define @vfptoui_nxv4f64_nxv4i32( %va) { ; CHECK-LABEL: vfptoui_nxv4f64_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1503,7 +1503,7 @@ define @vfptosi_nxv4f64_nxv4i64( %va) { ; CHECK-LABEL: vfptosi_nxv4f64_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: ret %evec = fptosi %va to @@ -1513,7 +1513,7 @@ define @vfptoui_nxv4f64_nxv4i64( %va) { ; CHECK-LABEL: vfptoui_nxv4f64_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: ret %evec = fptoui %va to @@ -1523,7 +1523,7 @@ define @vfptosi_nxv8f64_nxv8i1( %va) { ; CHECK-LABEL: vfptosi_nxv8f64_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 ; CHECK-NEXT: vand.vi v8, v16, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -1535,7 +1535,7 @@ define @vfptoui_nxv8f64_nxv8i1( %va) { ; CHECK-LABEL: vfptoui_nxv8f64_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 ; CHECK-NEXT: vand.vi v8, v16, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -1547,11 +1547,11 @@ define @vfptosi_nxv8f64_nxv8i8( %va) { ; CHECK-LABEL: vfptosi_nxv8f64_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vnsrl.wi v10, v16, 0 -; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-NEXT: ret %evec = fptosi %va to @@ -1561,11 +1561,11 @@ define @vfptoui_nxv8f64_nxv8i8( %va) { ; CHECK-LABEL: vfptoui_nxv8f64_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vnsrl.wi v10, v16, 0 -; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-NEXT: ret %evec = fptoui %va to @@ -1575,9 +1575,9 @@ define @vfptosi_nxv8f64_nxv8i16( %va) { ; CHECK-LABEL: vfptosi_nxv8f64_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v16, 0 ; CHECK-NEXT: ret %evec = fptosi %va to @@ -1587,9 +1587,9 @@ define @vfptoui_nxv8f64_nxv8i16( %va) { ; CHECK-LABEL: vfptoui_nxv8f64_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v16, 0 ; CHECK-NEXT: ret %evec = fptoui %va to @@ -1599,7 +1599,7 @@ define @vfptosi_nxv8f64_nxv8i32( %va) { ; CHECK-LABEL: vfptosi_nxv8f64_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1610,7 +1610,7 @@ define @vfptoui_nxv8f64_nxv8i32( %va) { ; CHECK-LABEL: vfptoui_nxv8f64_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1621,7 +1621,7 @@ define @vfptosi_nxv8f64_nxv8i64( %va) { ; CHECK-LABEL: vfptosi_nxv8f64_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: ret %evec = fptosi %va to @@ -1631,7 +1631,7 @@ define @vfptoui_nxv8f64_nxv8i64( %va) { ; CHECK-LABEL: vfptoui_nxv8f64_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: ret %evec = fptoui %va to diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp-mask.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp-mask.ll @@ -19,7 +19,7 @@ define @vfptosi_nxv2i1_nxv2f16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv2i1_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -44,7 +44,7 @@ define @vfptosi_nxv2i1_nxv2f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv2i1_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -70,7 +70,7 @@ define @vfptosi_nxv2i1_nxv2f64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv2i1_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll @@ -31,7 +31,7 @@ define @vfptosi_nxv2i8_nxv2f16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv2i8_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -54,7 +54,7 @@ define @vfptosi_nxv2i16_nxv2f16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv2i16_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.fptosi.nxv2i16.nxv2f16( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) @@ -77,7 +77,7 @@ define @vfptosi_nxv2i32_nxv2f16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv2i32_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwcvt.rtz.x.f.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -102,9 +102,9 @@ define @vfptosi_nxv2i64_nxv2f16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv2i64_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v10 ; CHECK-NEXT: ret %v = call @llvm.vp.fptosi.nxv2i64.nxv2f16( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) @@ -128,9 +128,9 @@ define @vfptosi_nxv2i8_nxv2f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv2i8_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v9, 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fptosi.nxv2i8.nxv2f32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) @@ -153,7 +153,7 @@ define @vfptosi_nxv2i16_nxv2f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv2i16_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -176,7 +176,7 @@ define @vfptosi_nxv2i32_nxv2f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv2i32_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.fptosi.nxv2i32.nxv2f32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) @@ -199,7 +199,7 @@ define @vfptosi_nxv2i64_nxv2f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv2i64_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfwcvt.rtz.x.f.v v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -226,11 +226,11 @@ define @vfptosi_nxv2i8_nxv2f64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv2i8_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fptosi.nxv2i8.nxv2f64( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) @@ -254,9 +254,9 @@ define @vfptosi_nxv2i16_nxv2f64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv2i16_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fptosi.nxv2i16.nxv2f64( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) @@ -279,7 +279,7 @@ define @vfptosi_nxv2i32_nxv2f64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv2i32_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -302,7 +302,7 @@ define @vfptosi_nxv2i64_nxv2f64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptosi_nxv2i64_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.fptosi.nxv2i64.nxv2f64( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) @@ -325,7 +325,7 @@ ; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a4, a1, 2 -; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma ; CHECK-NEXT: slli a1, a1, 1 ; CHECK-NEXT: sub a3, a0, a1 ; CHECK-NEXT: vslidedown.vx v0, v0, a4 @@ -362,7 +362,7 @@ ; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a4, a1, 2 -; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma ; CHECK-NEXT: slli a1, a1, 1 ; CHECK-NEXT: sub a3, a0, a1 ; CHECK-NEXT: vslidedown.vx v0, v0, a4 @@ -395,14 +395,14 @@ ; CHECK-NEXT: mv a2, a1 ; CHECK-NEXT: .LBB27_2: ; CHECK-NEXT: li a3, 0 -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: sub a1, a0, a1 ; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8 ; CHECK-NEXT: bltu a0, a1, .LBB27_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a3, a1 ; CHECK-NEXT: .LBB27_4: -; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v16 ; CHECK-NEXT: ret %v = call @llvm.vp.fptosi.nxv32i32.nxv32f32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp-mask.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp-mask.ll @@ -19,7 +19,7 @@ define @vfptoui_nxv2i1_nxv2f16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv2i1_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -44,7 +44,7 @@ define @vfptoui_nxv2i1_nxv2f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv2i1_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -70,7 +70,7 @@ define @vfptoui_nxv2i1_nxv2f64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv2i1_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll @@ -31,7 +31,7 @@ define @vfptoui_nxv2i8_nxv2f16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv2i8_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -54,7 +54,7 @@ define @vfptoui_nxv2i16_nxv2f16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv2i16_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.fptoui.nxv2i16.nxv2f16( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) @@ -77,7 +77,7 @@ define @vfptoui_nxv2i32_nxv2f16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv2i32_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -102,9 +102,9 @@ define @vfptoui_nxv2i64_nxv2f16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv2i64_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v10 ; CHECK-NEXT: ret %v = call @llvm.vp.fptoui.nxv2i64.nxv2f16( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) @@ -128,9 +128,9 @@ define @vfptoui_nxv2i8_nxv2f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv2i8_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v9, 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fptoui.nxv2i8.nxv2f32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) @@ -153,7 +153,7 @@ define @vfptoui_nxv2i16_nxv2f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv2i16_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -176,7 +176,7 @@ define @vfptoui_nxv2i32_nxv2f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv2i32_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.fptoui.nxv2i32.nxv2f32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) @@ -199,7 +199,7 @@ define @vfptoui_nxv2i64_nxv2f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv2i64_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -226,11 +226,11 @@ define @vfptoui_nxv2i8_nxv2f64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv2i8_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fptoui.nxv2i8.nxv2f64( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) @@ -254,9 +254,9 @@ define @vfptoui_nxv2i16_nxv2f64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv2i16_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-NEXT: ret %v = call @llvm.vp.fptoui.nxv2i16.nxv2f64( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) @@ -279,7 +279,7 @@ define @vfptoui_nxv2i32_nxv2f64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv2i32_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -302,7 +302,7 @@ define @vfptoui_nxv2i64_nxv2f64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfptoui_nxv2i64_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.fptoui.nxv2i64.nxv2f64( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) @@ -325,7 +325,7 @@ ; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a4, a1, 2 -; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma ; CHECK-NEXT: slli a1, a1, 1 ; CHECK-NEXT: sub a3, a0, a1 ; CHECK-NEXT: vslidedown.vx v0, v0, a4 @@ -362,7 +362,7 @@ ; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a4, a1, 2 -; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma ; CHECK-NEXT: slli a1, a1, 1 ; CHECK-NEXT: sub a3, a0, a1 ; CHECK-NEXT: vslidedown.vx v0, v0, a4 @@ -395,14 +395,14 @@ ; CHECK-NEXT: mv a2, a1 ; CHECK-NEXT: .LBB27_2: ; CHECK-NEXT: li a3, 0 -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: sub a1, a0, a1 ; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; CHECK-NEXT: bltu a0, a1, .LBB27_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a3, a1 ; CHECK-NEXT: .LBB27_4: -; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; CHECK-NEXT: vfcvt.rtz.xu.f.v v16, v16 ; CHECK-NEXT: ret %v = call @llvm.vp.fptoui.nxv32i32.nxv32f32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-sdnode.ll @@ -8,7 +8,7 @@ ; ; CHECK-LABEL: vfptrunc_nxv1f32_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -20,7 +20,7 @@ ; ; CHECK-LABEL: vfptrunc_nxv2f32_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -32,7 +32,7 @@ ; ; CHECK-LABEL: vfptrunc_nxv4f32_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -44,7 +44,7 @@ ; ; CHECK-LABEL: vfptrunc_nxv8f32_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -56,7 +56,7 @@ ; ; CHECK-LABEL: vfptrunc_nxv16f32_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -68,9 +68,9 @@ ; ; CHECK-LABEL: vfptrunc_nxv1f64_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfncvt.rod.f.f.w v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v8, v9 ; CHECK-NEXT: ret %evec = fptrunc %va to @@ -81,7 +81,7 @@ ; ; CHECK-LABEL: vfptrunc_nxv1f64_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -93,9 +93,9 @@ ; ; CHECK-LABEL: vfptrunc_nxv2f64_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.rod.f.f.w v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v8, v10 ; CHECK-NEXT: ret %evec = fptrunc %va to @@ -106,7 +106,7 @@ ; ; CHECK-LABEL: vfptrunc_nxv2f64_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -118,9 +118,9 @@ ; ; CHECK-LABEL: vfptrunc_nxv4f64_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfncvt.rod.f.f.w v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v8, v12 ; CHECK-NEXT: ret %evec = fptrunc %va to @@ -131,7 +131,7 @@ ; ; CHECK-LABEL: vfptrunc_nxv4f64_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -143,9 +143,9 @@ ; ; CHECK-LABEL: vfptrunc_nxv8f64_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfncvt.rod.f.f.w v16, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v8, v16 ; CHECK-NEXT: ret %evec = fptrunc %va to @@ -156,7 +156,7 @@ ; ; CHECK-LABEL: vfptrunc_nxv8f64_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll @@ -18,7 +18,7 @@ define @vfptrunc_nxv2f16_nxv2f32_unmasked( %a, i32 zeroext %vl) { ; CHECK-LABEL: vfptrunc_nxv2f16_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -43,9 +43,9 @@ define @vfptrunc_nxv2f16_nxv2f64_unmasked( %a, i32 zeroext %vl) { ; CHECK-LABEL: vfptrunc_nxv2f16_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.rod.f.f.w v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v8, v10 ; CHECK-NEXT: ret %v = call @llvm.vp.fptrunc.nxv2f16.nxv2f64( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) @@ -68,7 +68,7 @@ define @vfptrunc_nxv2f32_nxv2f64_unmasked( %a, i32 zeroext %vl) { ; CHECK-LABEL: vfptrunc_nxv2f32_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -105,7 +105,7 @@ ; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a4, a1, 3 -; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma ; CHECK-NEXT: sub a3, a0, a1 ; CHECK-NEXT: vslidedown.vx v0, v0, a4 ; CHECK-NEXT: bltu a0, a3, .LBB7_2 @@ -157,7 +157,7 @@ ; CHECK-NEXT: mv a5, a4 ; CHECK-NEXT: .LBB8_2: ; CHECK-NEXT: li a6, 0 -; CHECK-NEXT: vsetvli a7, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a7, zero, e8, mf4, ta, ma ; CHECK-NEXT: sub a7, a5, a1 ; CHECK-NEXT: vslidedown.vx v0, v24, a3 ; CHECK-NEXT: bltu a5, a7, .LBB8_4 @@ -173,7 +173,7 @@ ; CHECK-NEXT: mv a5, a1 ; CHECK-NEXT: .LBB8_6: ; CHECK-NEXT: li a6, 0 -; CHECK-NEXT: vsetvli t1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli t1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v1, v24, a7 ; CHECK-NEXT: add a7, a0, t0 ; CHECK-NEXT: vsetvli zero, a5, e32, m4, ta, mu @@ -189,7 +189,7 @@ ; CHECK-NEXT: # %bb.7: ; CHECK-NEXT: mv a6, a4 ; CHECK-NEXT: .LBB8_8: -; CHECK-NEXT: vsetvli a2, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, mf4, ta, ma ; CHECK-NEXT: vl8re64.v v16, (a7) ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: slli a2, a2, 3 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfrdiv-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrdiv-vp.ll @@ -21,7 +21,7 @@ define @vfrdiv_vf_nxv1f16_unmasked( %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv1f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -49,7 +49,7 @@ define @vfrdiv_vf_nxv2f16_unmasked( %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -77,7 +77,7 @@ define @vfrdiv_vf_nxv4f16_unmasked( %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -105,7 +105,7 @@ define @vfrdiv_vf_nxv8f16_unmasked( %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -133,7 +133,7 @@ define @vfrdiv_vf_nxv16f16_unmasked( %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -161,7 +161,7 @@ define @vfrdiv_vf_nxv32f16_unmasked( %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv32f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -189,7 +189,7 @@ define @vfrdiv_vf_nxv1f32_unmasked( %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv1f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -217,7 +217,7 @@ define @vfrdiv_vf_nxv2f32_unmasked( %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -245,7 +245,7 @@ define @vfrdiv_vf_nxv4f32_unmasked( %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -273,7 +273,7 @@ define @vfrdiv_vf_nxv8f32_unmasked( %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -301,7 +301,7 @@ define @vfrdiv_vf_nxv16f32_unmasked( %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -329,7 +329,7 @@ define @vfrdiv_vf_nxv1f64_unmasked( %va, double %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv1f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -357,7 +357,7 @@ define @vfrdiv_vf_nxv2f64_unmasked( %va, double %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -385,7 +385,7 @@ define @vfrdiv_vf_nxv4f64_unmasked( %va, double %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -413,7 +413,7 @@ define @vfrdiv_vf_nxv8f64_unmasked( %va, double %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrdiv_vf_nxv8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrdiv.ll b/llvm/test/CodeGen/RISCV/rvv/vfrdiv.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfrdiv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrdiv.ll @@ -12,7 +12,7 @@ define @intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -59,7 +59,7 @@ define @intrinsic_vfrdiv_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -106,7 +106,7 @@ define @intrinsic_vfrdiv_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -153,7 +153,7 @@ define @intrinsic_vfrdiv_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -200,7 +200,7 @@ define @intrinsic_vfrdiv_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -247,7 +247,7 @@ define @intrinsic_vfrdiv_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -294,7 +294,7 @@ define @intrinsic_vfrdiv_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -341,7 +341,7 @@ define @intrinsic_vfrdiv_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -388,7 +388,7 @@ define @intrinsic_vfrdiv_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -435,7 +435,7 @@ define @intrinsic_vfrdiv_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -482,7 +482,7 @@ define @intrinsic_vfrdiv_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -529,7 +529,7 @@ define @intrinsic_vfrdiv_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -576,7 +576,7 @@ define @intrinsic_vfrdiv_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vfrdiv_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vfrdiv_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrec7.ll b/llvm/test/CodeGen/RISCV/rvv/vfrec7.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfrec7.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrec7.ll @@ -11,7 +11,7 @@ define @intrinsic_vfrec7_v_nxv1f16_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfrec7.v v8, v8 ; CHECK-NEXT: ret entry: @@ -54,7 +54,7 @@ define @intrinsic_vfrec7_v_nxv2f16_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfrec7.v v8, v8 ; CHECK-NEXT: ret entry: @@ -97,7 +97,7 @@ define @intrinsic_vfrec7_v_nxv4f16_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfrec7.v v8, v8 ; CHECK-NEXT: ret entry: @@ -140,7 +140,7 @@ define @intrinsic_vfrec7_v_nxv8f16_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfrec7.v v8, v8 ; CHECK-NEXT: ret entry: @@ -183,7 +183,7 @@ define @intrinsic_vfrec7_v_nxv16f16_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfrec7.v v8, v8 ; CHECK-NEXT: ret entry: @@ -226,7 +226,7 @@ define @intrinsic_vfrec7_v_nxv32f16_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfrec7.v v8, v8 ; CHECK-NEXT: ret entry: @@ -269,7 +269,7 @@ define @intrinsic_vfrec7_v_nxv1f32_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfrec7.v v8, v8 ; CHECK-NEXT: ret entry: @@ -312,7 +312,7 @@ define @intrinsic_vfrec7_v_nxv2f32_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfrec7.v v8, v8 ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ define @intrinsic_vfrec7_v_nxv4f32_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfrec7.v v8, v8 ; CHECK-NEXT: ret entry: @@ -398,7 +398,7 @@ define @intrinsic_vfrec7_v_nxv8f32_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfrec7.v v8, v8 ; CHECK-NEXT: ret entry: @@ -441,7 +441,7 @@ define @intrinsic_vfrec7_v_nxv16f32_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfrec7.v v8, v8 ; CHECK-NEXT: ret entry: @@ -484,7 +484,7 @@ define @intrinsic_vfrec7_v_nxv1f64_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfrec7.v v8, v8 ; CHECK-NEXT: ret entry: @@ -527,7 +527,7 @@ define @intrinsic_vfrec7_v_nxv2f64_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfrec7.v v8, v8 ; CHECK-NEXT: ret entry: @@ -570,7 +570,7 @@ define @intrinsic_vfrec7_v_nxv4f64_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfrec7.v v8, v8 ; CHECK-NEXT: ret entry: @@ -613,7 +613,7 @@ define @intrinsic_vfrec7_v_nxv8f64_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfrec7.v v8, v8 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredmax.ll b/llvm/test/CodeGen/RISCV/rvv/vfredmax.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfredmax.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfredmax.ll @@ -12,7 +12,7 @@ define @intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -35,7 +35,7 @@ define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -58,7 +58,7 @@ define @intrinsic_vfredmax_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -81,7 +81,7 @@ define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vfredmax_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -127,7 +127,7 @@ define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -150,7 +150,7 @@ define @intrinsic_vfredmax_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfredmax.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -173,7 +173,7 @@ define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfredmax.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -196,7 +196,7 @@ define @intrinsic_vfredmax_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfredmax.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -219,7 +219,7 @@ define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfredmax.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -242,7 +242,7 @@ define @intrinsic_vfredmax_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vfredmax.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -265,7 +265,7 @@ define @intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vfredmax.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -288,7 +288,7 @@ define @intrinsic_vfredmax_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -311,7 +311,7 @@ define @intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -334,7 +334,7 @@ define @intrinsic_vfredmax_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -357,7 +357,7 @@ define @intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -380,7 +380,7 @@ define @intrinsic_vfredmax_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfredmax.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -403,7 +403,7 @@ define @intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfredmax.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -426,7 +426,7 @@ define @intrinsic_vfredmax_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfredmax.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -449,7 +449,7 @@ define @intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfredmax.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -472,7 +472,7 @@ define @intrinsic_vfredmax_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vfredmax.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -495,7 +495,7 @@ define @intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vfredmax.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -518,7 +518,7 @@ define @intrinsic_vfredmax_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vfredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -541,7 +541,7 @@ define @intrinsic_vfredmax_mask_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vfredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -564,7 +564,7 @@ define @intrinsic_vfredmax_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vfredmax.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -587,7 +587,7 @@ define @intrinsic_vfredmax_mask_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vfredmax.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -610,7 +610,7 @@ define @intrinsic_vfredmax_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vfredmax.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -633,7 +633,7 @@ define @intrinsic_vfredmax_mask_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vfredmax.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -656,7 +656,7 @@ define @intrinsic_vfredmax_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vfredmax.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -679,7 +679,7 @@ define @intrinsic_vfredmax_mask_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vfredmax.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredmin.ll b/llvm/test/CodeGen/RISCV/rvv/vfredmin.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfredmin.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfredmin.ll @@ -12,7 +12,7 @@ define @intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -35,7 +35,7 @@ define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -58,7 +58,7 @@ define @intrinsic_vfredmin_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -81,7 +81,7 @@ define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vfredmin_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -127,7 +127,7 @@ define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -150,7 +150,7 @@ define @intrinsic_vfredmin_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfredmin.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -173,7 +173,7 @@ define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfredmin.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -196,7 +196,7 @@ define @intrinsic_vfredmin_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfredmin.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -219,7 +219,7 @@ define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfredmin.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -242,7 +242,7 @@ define @intrinsic_vfredmin_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vfredmin.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -265,7 +265,7 @@ define @intrinsic_vfredmin_mask_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vfredmin.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -288,7 +288,7 @@ define @intrinsic_vfredmin_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -311,7 +311,7 @@ define @intrinsic_vfredmin_mask_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -334,7 +334,7 @@ define @intrinsic_vfredmin_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -357,7 +357,7 @@ define @intrinsic_vfredmin_mask_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -380,7 +380,7 @@ define @intrinsic_vfredmin_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfredmin.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -403,7 +403,7 @@ define @intrinsic_vfredmin_mask_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfredmin.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -426,7 +426,7 @@ define @intrinsic_vfredmin_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfredmin.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -449,7 +449,7 @@ define @intrinsic_vfredmin_mask_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfredmin.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -472,7 +472,7 @@ define @intrinsic_vfredmin_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vfredmin.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -495,7 +495,7 @@ define @intrinsic_vfredmin_mask_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vfredmin.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -518,7 +518,7 @@ define @intrinsic_vfredmin_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vfredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -541,7 +541,7 @@ define @intrinsic_vfredmin_mask_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vfredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -564,7 +564,7 @@ define @intrinsic_vfredmin_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vfredmin.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -587,7 +587,7 @@ define @intrinsic_vfredmin_mask_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vfredmin.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -610,7 +610,7 @@ define @intrinsic_vfredmin_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vfredmin.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -633,7 +633,7 @@ define @intrinsic_vfredmin_mask_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vfredmin.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -656,7 +656,7 @@ define @intrinsic_vfredmin_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vfredmin.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -679,7 +679,7 @@ define @intrinsic_vfredmin_mask_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vfredmin.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredosum.ll b/llvm/test/CodeGen/RISCV/rvv/vfredosum.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfredosum.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfredosum.ll @@ -12,7 +12,7 @@ define @intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfredosum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -35,7 +35,7 @@ define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfredosum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -58,7 +58,7 @@ define @intrinsic_vfredosum_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfredosum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -81,7 +81,7 @@ define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfredosum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vfredosum_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfredosum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -127,7 +127,7 @@ define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfredosum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -150,7 +150,7 @@ define @intrinsic_vfredosum_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfredosum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -173,7 +173,7 @@ define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfredosum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -196,7 +196,7 @@ define @intrinsic_vfredosum_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfredosum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -219,7 +219,7 @@ define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfredosum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -242,7 +242,7 @@ define @intrinsic_vfredosum_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vfredosum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -265,7 +265,7 @@ define @intrinsic_vfredosum_mask_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vfredosum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -288,7 +288,7 @@ define @intrinsic_vfredosum_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfredosum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -311,7 +311,7 @@ define @intrinsic_vfredosum_mask_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfredosum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -334,7 +334,7 @@ define @intrinsic_vfredosum_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfredosum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -357,7 +357,7 @@ define @intrinsic_vfredosum_mask_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfredosum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -380,7 +380,7 @@ define @intrinsic_vfredosum_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfredosum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -403,7 +403,7 @@ define @intrinsic_vfredosum_mask_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfredosum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -426,7 +426,7 @@ define @intrinsic_vfredosum_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfredosum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -449,7 +449,7 @@ define @intrinsic_vfredosum_mask_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfredosum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -472,7 +472,7 @@ define @intrinsic_vfredosum_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vfredosum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -495,7 +495,7 @@ define @intrinsic_vfredosum_mask_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vfredosum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -518,7 +518,7 @@ define @intrinsic_vfredosum_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vfredosum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -541,7 +541,7 @@ define @intrinsic_vfredosum_mask_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vfredosum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -564,7 +564,7 @@ define @intrinsic_vfredosum_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vfredosum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -587,7 +587,7 @@ define @intrinsic_vfredosum_mask_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vfredosum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -610,7 +610,7 @@ define @intrinsic_vfredosum_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vfredosum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -633,7 +633,7 @@ define @intrinsic_vfredosum_mask_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vfredosum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -656,7 +656,7 @@ define @intrinsic_vfredosum_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vfredosum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -679,7 +679,7 @@ define @intrinsic_vfredosum_mask_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vfredosum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredusum.ll b/llvm/test/CodeGen/RISCV/rvv/vfredusum.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfredusum.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfredusum.ll @@ -12,7 +12,7 @@ define @intrinsic_vfredusum_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfredusum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -35,7 +35,7 @@ define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv1f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv1f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfredusum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -58,7 +58,7 @@ define @intrinsic_vfredusum_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfredusum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -81,7 +81,7 @@ define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv2f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv2f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfredusum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vfredusum_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfredusum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -127,7 +127,7 @@ define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfredusum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -150,7 +150,7 @@ define @intrinsic_vfredusum_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfredusum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -173,7 +173,7 @@ define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv8f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv8f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfredusum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -196,7 +196,7 @@ define @intrinsic_vfredusum_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfredusum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -219,7 +219,7 @@ define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv16f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv16f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfredusum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -242,7 +242,7 @@ define @intrinsic_vfredusum_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vfredusum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -265,7 +265,7 @@ define @intrinsic_vfredusum_mask_vs_nxv4f16_nxv32f16_nxv4f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv32f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vfredusum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -288,7 +288,7 @@ define @intrinsic_vfredusum_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfredusum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -311,7 +311,7 @@ define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv1f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv1f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfredusum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -334,7 +334,7 @@ define @intrinsic_vfredusum_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfredusum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -357,7 +357,7 @@ define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfredusum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -380,7 +380,7 @@ define @intrinsic_vfredusum_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfredusum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -403,7 +403,7 @@ define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv4f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv4f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfredusum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -426,7 +426,7 @@ define @intrinsic_vfredusum_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfredusum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -449,7 +449,7 @@ define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv8f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv8f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfredusum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -472,7 +472,7 @@ define @intrinsic_vfredusum_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vfredusum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -495,7 +495,7 @@ define @intrinsic_vfredusum_mask_vs_nxv2f32_nxv16f32_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv16f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vfredusum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -518,7 +518,7 @@ define @intrinsic_vfredusum_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vfredusum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -541,7 +541,7 @@ define @intrinsic_vfredusum_mask_vs_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vfredusum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -564,7 +564,7 @@ define @intrinsic_vfredusum_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vfredusum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -587,7 +587,7 @@ define @intrinsic_vfredusum_mask_vs_nxv1f64_nxv2f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv2f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vfredusum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -610,7 +610,7 @@ define @intrinsic_vfredusum_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vfredusum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -633,7 +633,7 @@ define @intrinsic_vfredusum_mask_vs_nxv1f64_nxv4f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv4f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vfredusum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -656,7 +656,7 @@ define @intrinsic_vfredusum_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vfredusum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -679,7 +679,7 @@ define @intrinsic_vfredusum_mask_vs_nxv1f64_nxv8f64_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv8f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vfredusum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7.ll @@ -11,7 +11,7 @@ define @intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfrsqrt7.v v8, v8 ; CHECK-NEXT: ret entry: @@ -54,7 +54,7 @@ define @intrinsic_vfrsqrt7_v_nxv2f16_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfrsqrt7.v v8, v8 ; CHECK-NEXT: ret entry: @@ -97,7 +97,7 @@ define @intrinsic_vfrsqrt7_v_nxv4f16_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfrsqrt7.v v8, v8 ; CHECK-NEXT: ret entry: @@ -140,7 +140,7 @@ define @intrinsic_vfrsqrt7_v_nxv8f16_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfrsqrt7.v v8, v8 ; CHECK-NEXT: ret entry: @@ -183,7 +183,7 @@ define @intrinsic_vfrsqrt7_v_nxv16f16_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfrsqrt7.v v8, v8 ; CHECK-NEXT: ret entry: @@ -226,7 +226,7 @@ define @intrinsic_vfrsqrt7_v_nxv32f16_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfrsqrt7.v v8, v8 ; CHECK-NEXT: ret entry: @@ -269,7 +269,7 @@ define @intrinsic_vfrsqrt7_v_nxv1f32_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfrsqrt7.v v8, v8 ; CHECK-NEXT: ret entry: @@ -312,7 +312,7 @@ define @intrinsic_vfrsqrt7_v_nxv2f32_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfrsqrt7.v v8, v8 ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ define @intrinsic_vfrsqrt7_v_nxv4f32_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfrsqrt7.v v8, v8 ; CHECK-NEXT: ret entry: @@ -398,7 +398,7 @@ define @intrinsic_vfrsqrt7_v_nxv8f32_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfrsqrt7.v v8, v8 ; CHECK-NEXT: ret entry: @@ -441,7 +441,7 @@ define @intrinsic_vfrsqrt7_v_nxv16f32_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfrsqrt7.v v8, v8 ; CHECK-NEXT: ret entry: @@ -484,7 +484,7 @@ define @intrinsic_vfrsqrt7_v_nxv1f64_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfrsqrt7.v v8, v8 ; CHECK-NEXT: ret entry: @@ -527,7 +527,7 @@ define @intrinsic_vfrsqrt7_v_nxv2f64_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfrsqrt7.v v8, v8 ; CHECK-NEXT: ret entry: @@ -570,7 +570,7 @@ define @intrinsic_vfrsqrt7_v_nxv4f64_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfrsqrt7.v v8, v8 ; CHECK-NEXT: ret entry: @@ -613,7 +613,7 @@ define @intrinsic_vfrsqrt7_v_nxv8f64_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfrsqrt7.v v8, v8 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsub-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfrsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrsub-vp.ll @@ -21,7 +21,7 @@ define @vfrsub_vf_nxv1f16_unmasked( %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv1f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -49,7 +49,7 @@ define @vfrsub_vf_nxv2f16_unmasked( %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -77,7 +77,7 @@ define @vfrsub_vf_nxv4f16_unmasked( %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -105,7 +105,7 @@ define @vfrsub_vf_nxv8f16_unmasked( %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -133,7 +133,7 @@ define @vfrsub_vf_nxv16f16_unmasked( %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -161,7 +161,7 @@ define @vfrsub_vf_nxv32f16_unmasked( %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv32f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -189,7 +189,7 @@ define @vfrsub_vf_nxv1f32_unmasked( %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv1f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -217,7 +217,7 @@ define @vfrsub_vf_nxv2f32_unmasked( %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -245,7 +245,7 @@ define @vfrsub_vf_nxv4f32_unmasked( %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -273,7 +273,7 @@ define @vfrsub_vf_nxv8f32_unmasked( %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -301,7 +301,7 @@ define @vfrsub_vf_nxv16f32_unmasked( %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -329,7 +329,7 @@ define @vfrsub_vf_nxv1f64_unmasked( %va, double %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv1f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -357,7 +357,7 @@ define @vfrsub_vf_nxv2f64_unmasked( %va, double %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -385,7 +385,7 @@ define @vfrsub_vf_nxv4f64_unmasked( %va, double %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -413,7 +413,7 @@ define @vfrsub_vf_nxv8f64_unmasked( %va, double %b, i32 zeroext %evl) { ; CHECK-LABEL: vfrsub_vf_nxv8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsub.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsub.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfrsub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfrsub.ll @@ -12,7 +12,7 @@ define @intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -59,7 +59,7 @@ define @intrinsic_vfrsub_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -106,7 +106,7 @@ define @intrinsic_vfrsub_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -153,7 +153,7 @@ define @intrinsic_vfrsub_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -200,7 +200,7 @@ define @intrinsic_vfrsub_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -247,7 +247,7 @@ define @intrinsic_vfrsub_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -294,7 +294,7 @@ define @intrinsic_vfrsub_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -341,7 +341,7 @@ define @intrinsic_vfrsub_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -388,7 +388,7 @@ define @intrinsic_vfrsub_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -435,7 +435,7 @@ define @intrinsic_vfrsub_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -482,7 +482,7 @@ define @intrinsic_vfrsub_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -529,7 +529,7 @@ define @intrinsic_vfrsub_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -576,7 +576,7 @@ define @intrinsic_vfrsub_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vfrsub_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vfrsub_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnj.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnj.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnj.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnj.ll @@ -12,7 +12,7 @@ define @intrinsic_vfsgnj_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -59,7 +59,7 @@ define @intrinsic_vfsgnj_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -106,7 +106,7 @@ define @intrinsic_vfsgnj_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -153,7 +153,7 @@ define @intrinsic_vfsgnj_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -200,7 +200,7 @@ define @intrinsic_vfsgnj_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -247,7 +247,7 @@ define @intrinsic_vfsgnj_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -295,7 +295,7 @@ define @intrinsic_vfsgnj_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -342,7 +342,7 @@ define @intrinsic_vfsgnj_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -389,7 +389,7 @@ define @intrinsic_vfsgnj_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -436,7 +436,7 @@ define @intrinsic_vfsgnj_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -483,7 +483,7 @@ define @intrinsic_vfsgnj_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -531,7 +531,7 @@ define @intrinsic_vfsgnj_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -578,7 +578,7 @@ define @intrinsic_vfsgnj_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -625,7 +625,7 @@ define @intrinsic_vfsgnj_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -672,7 +672,7 @@ define @intrinsic_vfsgnj_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfsgnj.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -720,7 +720,7 @@ define @intrinsic_vfsgnj_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -767,7 +767,7 @@ define @intrinsic_vfsgnj_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -814,7 +814,7 @@ define @intrinsic_vfsgnj_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -861,7 +861,7 @@ define @intrinsic_vfsgnj_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -908,7 +908,7 @@ define @intrinsic_vfsgnj_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -955,7 +955,7 @@ define @intrinsic_vfsgnj_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1002,7 +1002,7 @@ define @intrinsic_vfsgnj_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1049,7 +1049,7 @@ define @intrinsic_vfsgnj_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1096,7 +1096,7 @@ define @intrinsic_vfsgnj_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1143,7 +1143,7 @@ define @intrinsic_vfsgnj_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1190,7 +1190,7 @@ define @intrinsic_vfsgnj_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1237,7 +1237,7 @@ define @intrinsic_vfsgnj_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1284,7 +1284,7 @@ define @intrinsic_vfsgnj_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1331,7 +1331,7 @@ define @intrinsic_vfsgnj_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1378,7 +1378,7 @@ define @intrinsic_vfsgnj_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn.ll @@ -12,7 +12,7 @@ define @intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -59,7 +59,7 @@ define @intrinsic_vfsgnjn_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -106,7 +106,7 @@ define @intrinsic_vfsgnjn_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -153,7 +153,7 @@ define @intrinsic_vfsgnjn_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfsgnjn.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -200,7 +200,7 @@ define @intrinsic_vfsgnjn_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfsgnjn.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -247,7 +247,7 @@ define @intrinsic_vfsgnjn_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfsgnjn.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -295,7 +295,7 @@ define @intrinsic_vfsgnjn_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -342,7 +342,7 @@ define @intrinsic_vfsgnjn_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -389,7 +389,7 @@ define @intrinsic_vfsgnjn_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfsgnjn.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -436,7 +436,7 @@ define @intrinsic_vfsgnjn_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfsgnjn.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -483,7 +483,7 @@ define @intrinsic_vfsgnjn_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfsgnjn.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -531,7 +531,7 @@ define @intrinsic_vfsgnjn_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -578,7 +578,7 @@ define @intrinsic_vfsgnjn_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfsgnjn.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -625,7 +625,7 @@ define @intrinsic_vfsgnjn_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfsgnjn.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -672,7 +672,7 @@ define @intrinsic_vfsgnjn_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfsgnjn.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -720,7 +720,7 @@ define @intrinsic_vfsgnjn_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -767,7 +767,7 @@ define @intrinsic_vfsgnjn_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -814,7 +814,7 @@ define @intrinsic_vfsgnjn_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -861,7 +861,7 @@ define @intrinsic_vfsgnjn_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -908,7 +908,7 @@ define @intrinsic_vfsgnjn_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -955,7 +955,7 @@ define @intrinsic_vfsgnjn_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1002,7 +1002,7 @@ define @intrinsic_vfsgnjn_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1049,7 +1049,7 @@ define @intrinsic_vfsgnjn_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1096,7 +1096,7 @@ define @intrinsic_vfsgnjn_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1143,7 +1143,7 @@ define @intrinsic_vfsgnjn_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1190,7 +1190,7 @@ define @intrinsic_vfsgnjn_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1237,7 +1237,7 @@ define @intrinsic_vfsgnjn_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1284,7 +1284,7 @@ define @intrinsic_vfsgnjn_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1331,7 +1331,7 @@ define @intrinsic_vfsgnjn_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1378,7 +1378,7 @@ define @intrinsic_vfsgnjn_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx.ll @@ -12,7 +12,7 @@ define @intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfsgnjx.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -59,7 +59,7 @@ define @intrinsic_vfsgnjx_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfsgnjx.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -106,7 +106,7 @@ define @intrinsic_vfsgnjx_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfsgnjx.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -153,7 +153,7 @@ define @intrinsic_vfsgnjx_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfsgnjx.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -200,7 +200,7 @@ define @intrinsic_vfsgnjx_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfsgnjx.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -247,7 +247,7 @@ define @intrinsic_vfsgnjx_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfsgnjx.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -295,7 +295,7 @@ define @intrinsic_vfsgnjx_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfsgnjx.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -342,7 +342,7 @@ define @intrinsic_vfsgnjx_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfsgnjx.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -389,7 +389,7 @@ define @intrinsic_vfsgnjx_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfsgnjx.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -436,7 +436,7 @@ define @intrinsic_vfsgnjx_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfsgnjx.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -483,7 +483,7 @@ define @intrinsic_vfsgnjx_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfsgnjx.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -531,7 +531,7 @@ define @intrinsic_vfsgnjx_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfsgnjx.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -578,7 +578,7 @@ define @intrinsic_vfsgnjx_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfsgnjx.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -625,7 +625,7 @@ define @intrinsic_vfsgnjx_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfsgnjx.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -672,7 +672,7 @@ define @intrinsic_vfsgnjx_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfsgnjx.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -720,7 +720,7 @@ define @intrinsic_vfsgnjx_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -767,7 +767,7 @@ define @intrinsic_vfsgnjx_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -814,7 +814,7 @@ define @intrinsic_vfsgnjx_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -861,7 +861,7 @@ define @intrinsic_vfsgnjx_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -908,7 +908,7 @@ define @intrinsic_vfsgnjx_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -955,7 +955,7 @@ define @intrinsic_vfsgnjx_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1002,7 +1002,7 @@ define @intrinsic_vfsgnjx_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1049,7 +1049,7 @@ define @intrinsic_vfsgnjx_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1096,7 +1096,7 @@ define @intrinsic_vfsgnjx_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1143,7 +1143,7 @@ define @intrinsic_vfsgnjx_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1190,7 +1190,7 @@ define @intrinsic_vfsgnjx_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1237,7 +1237,7 @@ define @intrinsic_vfsgnjx_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1284,7 +1284,7 @@ define @intrinsic_vfsgnjx_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1331,7 +1331,7 @@ define @intrinsic_vfsgnjx_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1378,7 +1378,7 @@ define @intrinsic_vfsgnjx_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1down.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1down.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfslide1down.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1down.ll @@ -12,7 +12,7 @@ define @intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -59,7 +59,7 @@ define @intrinsic_vfslide1down_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -106,7 +106,7 @@ define @intrinsic_vfslide1down_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -153,7 +153,7 @@ define @intrinsic_vfslide1down_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -200,7 +200,7 @@ define @intrinsic_vfslide1down_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -247,7 +247,7 @@ define @intrinsic_vfslide1down_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -294,7 +294,7 @@ define @intrinsic_vfslide1down_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -341,7 +341,7 @@ define @intrinsic_vfslide1down_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -388,7 +388,7 @@ define @intrinsic_vfslide1down_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -435,7 +435,7 @@ define @intrinsic_vfslide1down_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -482,7 +482,7 @@ define @intrinsic_vfslide1down_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -529,7 +529,7 @@ define @intrinsic_vfslide1down_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -576,7 +576,7 @@ define @intrinsic_vfslide1down_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vfslide1down_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vfslide1down_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1up.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1up.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfslide1up.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1up.ll @@ -12,7 +12,7 @@ define @intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfslide1up.vf v9, v8, fa0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -60,7 +60,7 @@ define @intrinsic_vfslide1up_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfslide1up.vf v9, v8, fa0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -108,7 +108,7 @@ define @intrinsic_vfslide1up_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfslide1up.vf v9, v8, fa0 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -156,7 +156,7 @@ define @intrinsic_vfslide1up_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfslide1up.vf v10, v8, fa0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -204,7 +204,7 @@ define @intrinsic_vfslide1up_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfslide1up.vf v12, v8, fa0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -252,7 +252,7 @@ define @intrinsic_vfslide1up_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfslide1up.vf v16, v8, fa0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -300,7 +300,7 @@ define @intrinsic_vfslide1up_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfslide1up.vf v9, v8, fa0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -348,7 +348,7 @@ define @intrinsic_vfslide1up_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfslide1up.vf v9, v8, fa0 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -396,7 +396,7 @@ define @intrinsic_vfslide1up_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfslide1up.vf v10, v8, fa0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -444,7 +444,7 @@ define @intrinsic_vfslide1up_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfslide1up.vf v12, v8, fa0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -492,7 +492,7 @@ define @intrinsic_vfslide1up_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfslide1up.vf v16, v8, fa0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -540,7 +540,7 @@ define @intrinsic_vfslide1up_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfslide1up.vf v9, v8, fa0 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -588,7 +588,7 @@ define @intrinsic_vfslide1up_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfslide1up.vf v10, v8, fa0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -636,7 +636,7 @@ define @intrinsic_vfslide1up_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfslide1up.vf v12, v8, fa0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -684,7 +684,7 @@ define @intrinsic_vfslide1up_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfslide1up.vf v16, v8, fa0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-sdnode.ll @@ -9,7 +9,7 @@ define @vfsqrt_nxv1f16( %v) { ; CHECK-LABEL: vfsqrt_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.sqrt.nxv1f16( %v) @@ -21,7 +21,7 @@ define @vfsqrt_nxv2f16( %v) { ; CHECK-LABEL: vfsqrt_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.sqrt.nxv2f16( %v) @@ -33,7 +33,7 @@ define @vfsqrt_nxv4f16( %v) { ; CHECK-LABEL: vfsqrt_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.sqrt.nxv4f16( %v) @@ -45,7 +45,7 @@ define @vfsqrt_nxv8f16( %v) { ; CHECK-LABEL: vfsqrt_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.sqrt.nxv8f16( %v) @@ -57,7 +57,7 @@ define @vfsqrt_nxv16f16( %v) { ; CHECK-LABEL: vfsqrt_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.sqrt.nxv16f16( %v) @@ -69,7 +69,7 @@ define @vfsqrt_nxv32f16( %v) { ; CHECK-LABEL: vfsqrt_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.sqrt.nxv32f16( %v) @@ -81,7 +81,7 @@ define @vfsqrt_nxv1f32( %v) { ; CHECK-LABEL: vfsqrt_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.sqrt.nxv1f32( %v) @@ -93,7 +93,7 @@ define @vfsqrt_nxv2f32( %v) { ; CHECK-LABEL: vfsqrt_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.sqrt.nxv2f32( %v) @@ -105,7 +105,7 @@ define @vfsqrt_nxv4f32( %v) { ; CHECK-LABEL: vfsqrt_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.sqrt.nxv4f32( %v) @@ -117,7 +117,7 @@ define @vfsqrt_nxv8f32( %v) { ; CHECK-LABEL: vfsqrt_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.sqrt.nxv8f32( %v) @@ -129,7 +129,7 @@ define @vfsqrt_nxv16f32( %v) { ; CHECK-LABEL: vfsqrt_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.sqrt.nxv16f32( %v) @@ -141,7 +141,7 @@ define @vfsqrt_nxv1f64( %v) { ; CHECK-LABEL: vfsqrt_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.sqrt.nxv1f64( %v) @@ -153,7 +153,7 @@ define @vfsqrt_nxv2f64( %v) { ; CHECK-LABEL: vfsqrt_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.sqrt.nxv2f64( %v) @@ -165,7 +165,7 @@ define @vfsqrt_nxv4f64( %v) { ; CHECK-LABEL: vfsqrt_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.sqrt.nxv4f64( %v) @@ -177,7 +177,7 @@ define @vfsqrt_nxv8f64( %v) { ; CHECK-LABEL: vfsqrt_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret %r = call @llvm.sqrt.nxv8f64( %v) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll @@ -19,7 +19,7 @@ define @vfsqrt_vv_nxv1f16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv1f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -43,7 +43,7 @@ define @vfsqrt_vv_nxv2f16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -67,7 +67,7 @@ define @vfsqrt_vv_nxv4f16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -91,7 +91,7 @@ define @vfsqrt_vv_nxv8f16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -115,7 +115,7 @@ define @vfsqrt_vv_nxv16f16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -139,7 +139,7 @@ define @vfsqrt_vv_nxv32f16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv32f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -163,7 +163,7 @@ define @vfsqrt_vv_nxv1f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv1f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -187,7 +187,7 @@ define @vfsqrt_vv_nxv2f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -211,7 +211,7 @@ define @vfsqrt_vv_nxv4f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -235,7 +235,7 @@ define @vfsqrt_vv_nxv8f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -259,7 +259,7 @@ define @vfsqrt_vv_nxv16f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -283,7 +283,7 @@ define @vfsqrt_vv_nxv1f64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv1f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -307,7 +307,7 @@ define @vfsqrt_vv_nxv2f64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -331,7 +331,7 @@ define @vfsqrt_vv_nxv4f64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -355,7 +355,7 @@ define @vfsqrt_vv_nxv7f64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv7f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -379,7 +379,7 @@ define @vfsqrt_vv_nxv8f64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vfsqrt_vv_nxv8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -398,7 +398,7 @@ ; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a4, a1, 3 -; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma ; CHECK-NEXT: sub a3, a0, a1 ; CHECK-NEXT: vslidedown.vx v0, v0, a4 ; CHECK-NEXT: bltu a0, a3, .LBB32_2 @@ -429,14 +429,14 @@ ; CHECK-NEXT: mv a2, a1 ; CHECK-NEXT: .LBB33_2: ; CHECK-NEXT: li a3, 0 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: sub a1, a0, a1 ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: bltu a0, a1, .LBB33_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a3, a1 ; CHECK-NEXT: .LBB33_4: -; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; CHECK-NEXT: vfsqrt.v v16, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsqrt.ll b/llvm/test/CodeGen/RISCV/rvv/vfsqrt.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsqrt.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsqrt.ll @@ -11,7 +11,7 @@ define @intrinsic_vfsqrt_v_nxv1f16_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret entry: @@ -54,7 +54,7 @@ define @intrinsic_vfsqrt_v_nxv2f16_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret entry: @@ -97,7 +97,7 @@ define @intrinsic_vfsqrt_v_nxv4f16_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret entry: @@ -140,7 +140,7 @@ define @intrinsic_vfsqrt_v_nxv8f16_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret entry: @@ -183,7 +183,7 @@ define @intrinsic_vfsqrt_v_nxv16f16_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret entry: @@ -226,7 +226,7 @@ define @intrinsic_vfsqrt_v_nxv32f16_nxv32f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret entry: @@ -269,7 +269,7 @@ define @intrinsic_vfsqrt_v_nxv1f32_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret entry: @@ -312,7 +312,7 @@ define @intrinsic_vfsqrt_v_nxv2f32_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ define @intrinsic_vfsqrt_v_nxv4f32_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret entry: @@ -398,7 +398,7 @@ define @intrinsic_vfsqrt_v_nxv8f32_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret entry: @@ -441,7 +441,7 @@ define @intrinsic_vfsqrt_v_nxv16f32_nxv16f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret entry: @@ -484,7 +484,7 @@ define @intrinsic_vfsqrt_v_nxv1f64_nxv1f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret entry: @@ -527,7 +527,7 @@ define @intrinsic_vfsqrt_v_nxv2f64_nxv2f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret entry: @@ -570,7 +570,7 @@ define @intrinsic_vfsqrt_v_nxv4f64_nxv4f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret entry: @@ -613,7 +613,7 @@ define @intrinsic_vfsqrt_v_nxv8f64_nxv8f64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfsqrt.v v8, v8 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsub-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-sdnode.ll @@ -7,7 +7,7 @@ define @vfsub_vv_nxv1f16( %va, %vb) { ; CHECK-LABEL: vfsub_vv_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = fsub %va, %vb @@ -17,7 +17,7 @@ define @vfsub_vf_nxv1f16( %va, half %b) { ; CHECK-LABEL: vfsub_vf_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -29,7 +29,7 @@ define @vfsub_vv_nxv2f16( %va, %vb) { ; CHECK-LABEL: vfsub_vv_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = fsub %va, %vb @@ -39,7 +39,7 @@ define @vfsub_vf_nxv2f16( %va, half %b) { ; CHECK-LABEL: vfsub_vf_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -51,7 +51,7 @@ define @vfsub_vv_nxv4f16( %va, %vb) { ; CHECK-LABEL: vfsub_vv_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = fsub %va, %vb @@ -61,7 +61,7 @@ define @vfsub_vf_nxv4f16( %va, half %b) { ; CHECK-LABEL: vfsub_vf_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -73,7 +73,7 @@ define @vfsub_vv_nxv8f16( %va, %vb) { ; CHECK-LABEL: vfsub_vv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = fsub %va, %vb @@ -83,7 +83,7 @@ define @vfsub_vf_nxv8f16( %va, half %b) { ; CHECK-LABEL: vfsub_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -95,7 +95,7 @@ define @vfsub_fv_nxv8f16( %va, half %b) { ; CHECK-LABEL: vfsub_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -107,7 +107,7 @@ define @vfsub_vv_nxv16f16( %va, %vb) { ; CHECK-LABEL: vfsub_vv_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = fsub %va, %vb @@ -117,7 +117,7 @@ define @vfsub_vf_nxv16f16( %va, half %b) { ; CHECK-LABEL: vfsub_vf_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -129,7 +129,7 @@ define @vfsub_vv_nxv32f16( %va, %vb) { ; CHECK-LABEL: vfsub_vv_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = fsub %va, %vb @@ -139,7 +139,7 @@ define @vfsub_vf_nxv32f16( %va, half %b) { ; CHECK-LABEL: vfsub_vf_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -151,7 +151,7 @@ define @vfsub_vv_nxv1f32( %va, %vb) { ; CHECK-LABEL: vfsub_vv_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = fsub %va, %vb @@ -161,7 +161,7 @@ define @vfsub_vf_nxv1f32( %va, float %b) { ; CHECK-LABEL: vfsub_vf_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -173,7 +173,7 @@ define @vfsub_vv_nxv2f32( %va, %vb) { ; CHECK-LABEL: vfsub_vv_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = fsub %va, %vb @@ -183,7 +183,7 @@ define @vfsub_vf_nxv2f32( %va, float %b) { ; CHECK-LABEL: vfsub_vf_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -195,7 +195,7 @@ define @vfsub_vv_nxv4f32( %va, %vb) { ; CHECK-LABEL: vfsub_vv_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = fsub %va, %vb @@ -205,7 +205,7 @@ define @vfsub_vf_nxv4f32( %va, float %b) { ; CHECK-LABEL: vfsub_vf_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -217,7 +217,7 @@ define @vfsub_vv_nxv8f32( %va, %vb) { ; CHECK-LABEL: vfsub_vv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = fsub %va, %vb @@ -227,7 +227,7 @@ define @vfsub_vf_nxv8f32( %va, float %b) { ; CHECK-LABEL: vfsub_vf_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -239,7 +239,7 @@ define @vfsub_fv_nxv8f32( %va, float %b) { ; CHECK-LABEL: vfsub_fv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -251,7 +251,7 @@ define @vfsub_vv_nxv16f32( %va, %vb) { ; CHECK-LABEL: vfsub_vv_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = fsub %va, %vb @@ -261,7 +261,7 @@ define @vfsub_vf_nxv16f32( %va, float %b) { ; CHECK-LABEL: vfsub_vf_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -273,7 +273,7 @@ define @vfsub_vv_nxv1f64( %va, %vb) { ; CHECK-LABEL: vfsub_vv_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = fsub %va, %vb @@ -283,7 +283,7 @@ define @vfsub_vf_nxv1f64( %va, double %b) { ; CHECK-LABEL: vfsub_vf_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -295,7 +295,7 @@ define @vfsub_vv_nxv2f64( %va, %vb) { ; CHECK-LABEL: vfsub_vv_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = fsub %va, %vb @@ -305,7 +305,7 @@ define @vfsub_vf_nxv2f64( %va, double %b) { ; CHECK-LABEL: vfsub_vf_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -317,7 +317,7 @@ define @vfsub_vv_nxv4f64( %va, %vb) { ; CHECK-LABEL: vfsub_vv_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = fsub %va, %vb @@ -327,7 +327,7 @@ define @vfsub_vf_nxv4f64( %va, double %b) { ; CHECK-LABEL: vfsub_vf_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -339,7 +339,7 @@ define @vfsub_vv_nxv8f64( %va, %vb) { ; CHECK-LABEL: vfsub_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = fsub %va, %vb @@ -349,7 +349,7 @@ define @vfsub_vf_nxv8f64( %va, double %b) { ; CHECK-LABEL: vfsub_vf_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -361,7 +361,7 @@ define @vfsub_fv_nxv8f64( %va, double %b) { ; CHECK-LABEL: vfsub_fv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfrsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll @@ -19,7 +19,7 @@ define @vfsub_vv_nxv1f16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv1f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -43,7 +43,7 @@ define @vfsub_vf_nxv1f16_unmasked( %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_nxv1f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -69,7 +69,7 @@ define @vfsub_vv_nxv2f16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -93,7 +93,7 @@ define @vfsub_vf_nxv2f16_unmasked( %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_nxv2f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -119,7 +119,7 @@ define @vfsub_vv_nxv4f16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -143,7 +143,7 @@ define @vfsub_vf_nxv4f16_unmasked( %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_nxv4f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -169,7 +169,7 @@ define @vfsub_vv_nxv8f16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -193,7 +193,7 @@ define @vfsub_vf_nxv8f16_unmasked( %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_nxv8f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -219,7 +219,7 @@ define @vfsub_vv_nxv16f16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -243,7 +243,7 @@ define @vfsub_vf_nxv16f16_unmasked( %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_nxv16f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -269,7 +269,7 @@ define @vfsub_vv_nxv32f16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv32f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -293,7 +293,7 @@ define @vfsub_vf_nxv32f16_unmasked( %va, half %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_nxv32f16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 @@ -319,7 +319,7 @@ define @vfsub_vv_nxv1f32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv1f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -343,7 +343,7 @@ define @vfsub_vf_nxv1f32_unmasked( %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_nxv1f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -369,7 +369,7 @@ define @vfsub_vv_nxv2f32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -393,7 +393,7 @@ define @vfsub_vf_nxv2f32_unmasked( %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -419,7 +419,7 @@ define @vfsub_vv_nxv4f32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -443,7 +443,7 @@ define @vfsub_vf_nxv4f32_unmasked( %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_nxv4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -469,7 +469,7 @@ define @vfsub_vv_nxv8f32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -493,7 +493,7 @@ define @vfsub_vf_nxv8f32_unmasked( %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_nxv8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -519,7 +519,7 @@ define @vfsub_vv_nxv16f32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -543,7 +543,7 @@ define @vfsub_vf_nxv16f32_unmasked( %va, float %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_nxv16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %b, i32 0 @@ -569,7 +569,7 @@ define @vfsub_vv_nxv1f64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv1f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -593,7 +593,7 @@ define @vfsub_vf_nxv1f64_unmasked( %va, double %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_nxv1f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -619,7 +619,7 @@ define @vfsub_vv_nxv2f64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -643,7 +643,7 @@ define @vfsub_vf_nxv2f64_unmasked( %va, double %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_nxv2f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -669,7 +669,7 @@ define @vfsub_vv_nxv4f64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -693,7 +693,7 @@ define @vfsub_vf_nxv4f64_unmasked( %va, double %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_nxv4f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 @@ -731,7 +731,7 @@ define @vfsub_vv_nxv8f64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vv_nxv8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -755,7 +755,7 @@ define @vfsub_vf_nxv8f64_unmasked( %va, double %b, i32 zeroext %evl) { ; CHECK-LABEL: vfsub_vf_nxv8f64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsub.ll @@ -12,7 +12,7 @@ define @intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -59,7 +59,7 @@ define @intrinsic_vfsub_vv_nxv2f16_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -106,7 +106,7 @@ define @intrinsic_vfsub_vv_nxv4f16_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -153,7 +153,7 @@ define @intrinsic_vfsub_vv_nxv8f16_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -200,7 +200,7 @@ define @intrinsic_vfsub_vv_nxv16f16_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -247,7 +247,7 @@ define @intrinsic_vfsub_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -295,7 +295,7 @@ define @intrinsic_vfsub_vv_nxv1f32_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -342,7 +342,7 @@ define @intrinsic_vfsub_vv_nxv2f32_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -389,7 +389,7 @@ define @intrinsic_vfsub_vv_nxv4f32_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -436,7 +436,7 @@ define @intrinsic_vfsub_vv_nxv8f32_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -483,7 +483,7 @@ define @intrinsic_vfsub_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -531,7 +531,7 @@ define @intrinsic_vfsub_vv_nxv1f64_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -578,7 +578,7 @@ define @intrinsic_vfsub_vv_nxv2f64_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -625,7 +625,7 @@ define @intrinsic_vfsub_vv_nxv4f64_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -672,7 +672,7 @@ define @intrinsic_vfsub_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfsub.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -720,7 +720,7 @@ define @intrinsic_vfsub_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -767,7 +767,7 @@ define @intrinsic_vfsub_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f16_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -814,7 +814,7 @@ define @intrinsic_vfsub_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f16_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -861,7 +861,7 @@ define @intrinsic_vfsub_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f16_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -908,7 +908,7 @@ define @intrinsic_vfsub_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv16f16_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -955,7 +955,7 @@ define @intrinsic_vfsub_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv32f16_nxv32f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1002,7 +1002,7 @@ define @intrinsic_vfsub_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f32_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1049,7 +1049,7 @@ define @intrinsic_vfsub_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f32_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1096,7 +1096,7 @@ define @intrinsic_vfsub_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f32_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1143,7 +1143,7 @@ define @intrinsic_vfsub_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f32_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1190,7 +1190,7 @@ define @intrinsic_vfsub_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv16f32_nxv16f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1237,7 +1237,7 @@ define @intrinsic_vfsub_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f64_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1284,7 +1284,7 @@ define @intrinsic_vfsub_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f64_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1331,7 +1331,7 @@ define @intrinsic_vfsub_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f64_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1378,7 +1378,7 @@ define @intrinsic_vfsub_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f64_nxv8f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vfsub.vf v8, v8, fa0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd-sdnode.ll @@ -7,7 +7,7 @@ define @vfwadd_vv_nxv1f64( %va, %vb) { ; CHECK-LABEL: vfwadd_vv_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwadd.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -20,7 +20,7 @@ define @vfwadd_vf_nxv1f64( %va, float %b) { ; CHECK-LABEL: vfwadd_vf_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwadd.vf v9, v8, fa0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -35,7 +35,7 @@ define @vfwadd_vf_nxv1f64_2( %va, float %b) { ; CHECK-LABEL: vfwadd_vf_nxv1f64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwadd.vf v9, v8, fa0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -50,7 +50,7 @@ define @vfwadd_wv_nxv1f64( %va, %vb) { ; CHECK-LABEL: vfwadd_wv_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwadd.wv v8, v8, v9 ; CHECK-NEXT: ret %vc = fpext %vb to @@ -61,7 +61,7 @@ define @vfwadd_wf_nxv1f64( %va, float %b) { ; CHECK-LABEL: vfwadd_wf_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwadd.wf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -74,7 +74,7 @@ define @vfwadd_wf_nxv1f64_2( %va, float %b) { ; CHECK-LABEL: vfwadd_wf_nxv1f64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwadd.wf v8, v8, fa0 ; CHECK-NEXT: ret %fpext = fpext float %b to double @@ -87,7 +87,7 @@ define @vfwadd_vv_nxv2f64( %va, %vb) { ; CHECK-LABEL: vfwadd_vv_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwadd.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -100,7 +100,7 @@ define @vfwadd_vf_nxv2f64( %va, float %b) { ; CHECK-LABEL: vfwadd_vf_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwadd.vf v10, v8, fa0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -115,7 +115,7 @@ define @vfwadd_vf_nxv2f64_2( %va, float %b) { ; CHECK-LABEL: vfwadd_vf_nxv2f64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwadd.vf v10, v8, fa0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -130,7 +130,7 @@ define @vfwadd_wv_nxv2f64( %va, %vb) { ; CHECK-LABEL: vfwadd_wv_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwadd.wv v8, v8, v10 ; CHECK-NEXT: ret %vc = fpext %vb to @@ -141,7 +141,7 @@ define @vfwadd_wf_nxv2f64( %va, float %b) { ; CHECK-LABEL: vfwadd_wf_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwadd.wf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -154,7 +154,7 @@ define @vfwadd_wf_nxv2f64_2( %va, float %b) { ; CHECK-LABEL: vfwadd_wf_nxv2f64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwadd.wf v8, v8, fa0 ; CHECK-NEXT: ret %fpext = fpext float %b to double @@ -167,7 +167,7 @@ define @vfwadd_vv_nxv4f64( %va, %vb) { ; CHECK-LABEL: vfwadd_vv_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfwadd.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -180,7 +180,7 @@ define @vfwadd_vf_nxv4f64( %va, float %b) { ; CHECK-LABEL: vfwadd_vf_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfwadd.vf v12, v8, fa0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -195,7 +195,7 @@ define @vfwadd_vf_nxv4f64_2( %va, float %b) { ; CHECK-LABEL: vfwadd_vf_nxv4f64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfwadd.vf v12, v8, fa0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -210,7 +210,7 @@ define @vfwadd_wv_nxv4f64( %va, %vb) { ; CHECK-LABEL: vfwadd_wv_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfwadd.wv v8, v8, v12 ; CHECK-NEXT: ret %vc = fpext %vb to @@ -221,7 +221,7 @@ define @vfwadd_wf_nxv4f64( %va, float %b) { ; CHECK-LABEL: vfwadd_wf_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfwadd.wf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -234,7 +234,7 @@ define @vfwadd_wf_nxv4f64_2( %va, float %b) { ; CHECK-LABEL: vfwadd_wf_nxv4f64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfwadd.wf v8, v8, fa0 ; CHECK-NEXT: ret %fpext = fpext float %b to double @@ -247,7 +247,7 @@ define @vfwadd_vv_nxv8f64( %va, %vb) { ; CHECK-LABEL: vfwadd_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfwadd.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -260,7 +260,7 @@ define @vfwadd_vf_nxv8f64( %va, float %b) { ; CHECK-LABEL: vfwadd_vf_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfwadd.vf v16, v8, fa0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -275,7 +275,7 @@ define @vfwadd_vf_nxv8f64_2( %va, float %b) { ; CHECK-LABEL: vfwadd_vf_nxv8f64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfwadd.vf v16, v8, fa0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -290,7 +290,7 @@ define @vfwadd_wv_nxv8f64( %va, %vb) { ; CHECK-LABEL: vfwadd_wv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfwadd.wv v8, v8, v16 ; CHECK-NEXT: ret %vc = fpext %vb to @@ -301,7 +301,7 @@ define @vfwadd_wf_nxv8f64( %va, float %b) { ; CHECK-LABEL: vfwadd_wf_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfwadd.wf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -314,7 +314,7 @@ define @vfwadd_wf_nxv8f64_2( %va, float %b) { ; CHECK-LABEL: vfwadd_wf_nxv8f64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfwadd.wf v8, v8, fa0 ; CHECK-NEXT: ret %fpext = fpext float %b to double diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd.ll @@ -12,7 +12,7 @@ define @intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfwadd.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -60,7 +60,7 @@ define @intrinsic_vfwadd_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwadd.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -108,7 +108,7 @@ define @intrinsic_vfwadd_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfwadd.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -156,7 +156,7 @@ define @intrinsic_vfwadd_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfwadd.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -204,7 +204,7 @@ define @intrinsic_vfwadd_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfwadd.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -252,7 +252,7 @@ define @intrinsic_vfwadd_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfwadd.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -300,7 +300,7 @@ define @intrinsic_vfwadd_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfwadd.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -348,7 +348,7 @@ define @intrinsic_vfwadd_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfwadd.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -396,7 +396,7 @@ define @intrinsic_vfwadd_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfwadd.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -444,7 +444,7 @@ define @intrinsic_vfwadd_vf_nxv1f32_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv1f32_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfwadd.vf v9, v8, fa0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -492,7 +492,7 @@ define @intrinsic_vfwadd_vf_nxv2f32_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv2f32_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwadd.vf v9, v8, fa0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -540,7 +540,7 @@ define @intrinsic_vfwadd_vf_nxv4f32_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv4f32_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfwadd.vf v10, v8, fa0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -588,7 +588,7 @@ define @intrinsic_vfwadd_vf_nxv8f32_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv8f32_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfwadd.vf v12, v8, fa0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -636,7 +636,7 @@ define @intrinsic_vfwadd_vf_nxv16f32_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv16f32_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfwadd.vf v16, v8, fa0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -684,7 +684,7 @@ define @intrinsic_vfwadd_vf_nxv1f64_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv1f64_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfwadd.vf v9, v8, fa0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -732,7 +732,7 @@ define @intrinsic_vfwadd_vf_nxv2f64_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv2f64_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfwadd.vf v10, v8, fa0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -780,7 +780,7 @@ define @intrinsic_vfwadd_vf_nxv4f64_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv4f64_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfwadd.vf v12, v8, fa0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -828,7 +828,7 @@ define @intrinsic_vfwadd_vf_nxv8f64_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_vf_nxv8f64_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfwadd.vf v16, v8, fa0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w.ll @@ -12,7 +12,7 @@ define @intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfwadd.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -59,7 +59,7 @@ define @intrinsic_vfwadd.w_wv_nxv2f32_nxv2f32_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv2f32_nxv2f32_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwadd.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -106,7 +106,7 @@ define @intrinsic_vfwadd.w_wv_nxv4f32_nxv4f32_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv4f32_nxv4f32_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfwadd.wv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -153,7 +153,7 @@ define @intrinsic_vfwadd.w_wv_nxv8f32_nxv8f32_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv8f32_nxv8f32_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfwadd.wv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -200,7 +200,7 @@ define @intrinsic_vfwadd.w_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv16f32_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfwadd.wv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -248,7 +248,7 @@ define @intrinsic_vfwadd.w_wv_nxv1f64_nxv1f64_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f64_nxv1f64_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfwadd.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -295,7 +295,7 @@ define @intrinsic_vfwadd.w_wv_nxv2f64_nxv2f64_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv2f64_nxv2f64_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfwadd.wv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -342,7 +342,7 @@ define @intrinsic_vfwadd.w_wv_nxv4f64_nxv4f64_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv4f64_nxv4f64_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfwadd.wv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -389,7 +389,7 @@ define @intrinsic_vfwadd.w_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv8f64_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfwadd.wv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -437,7 +437,7 @@ define @intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfwadd.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -484,7 +484,7 @@ define @intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwadd.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -531,7 +531,7 @@ define @intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfwadd.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -578,7 +578,7 @@ define @intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfwadd.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -625,7 +625,7 @@ define @intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfwadd.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -672,7 +672,7 @@ define @intrinsic_vfwadd.w_wf_nxv1f64_nxv1f64_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv1f64_nxv1f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfwadd.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -719,7 +719,7 @@ define @intrinsic_vfwadd.w_wf_nxv2f64_nxv2f64_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv2f64_nxv2f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfwadd.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -766,7 +766,7 @@ define @intrinsic_vfwadd.w_wf_nxv4f64_nxv4f64_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv4f64_nxv4f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfwadd.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -813,7 +813,7 @@ define @intrinsic_vfwadd.w_wf_nxv8f64_nxv8f64_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv8f64_nxv8f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfwadd.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1160,7 +1160,7 @@ define @intrinsic_vfwadd.w_wv_untie_nxv1f32_nxv1f32_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv1f32_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfwadd.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1177,7 +1177,7 @@ define @intrinsic_vfwadd.w_wv_untie_nxv2f32_nxv2f32_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv2f32_nxv2f32_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwadd.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1194,7 +1194,7 @@ define @intrinsic_vfwadd.w_wv_untie_nxv4f32_nxv4f32_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv4f32_nxv4f32_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfwadd.wv v12, v10, v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -1211,7 +1211,7 @@ define @intrinsic_vfwadd.w_wv_untie_nxv8f32_nxv8f32_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv8f32_nxv8f32_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfwadd.wv v16, v12, v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -1228,7 +1228,7 @@ define @intrinsic_vfwadd.w_wv_untie_nxv1f64_nxv1f64_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv1f64_nxv1f64_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfwadd.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1245,7 +1245,7 @@ define @intrinsic_vfwadd.w_wv_untie_nxv2f64_nxv2f64_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv2f64_nxv2f64_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfwadd.wv v12, v10, v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -1262,7 +1262,7 @@ define @intrinsic_vfwadd.w_wv_untie_nxv4f64_nxv4f64_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv4f64_nxv4f64_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfwadd.wv v16, v12, v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -1279,7 +1279,7 @@ define @intrinsic_vfwadd.w_wv_untie_nxv8f64_nxv8f64_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv8f64_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfwadd.wv v24, v16, v8 ; CHECK-NEXT: vmv8r.v v8, v24 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll @@ -11,7 +11,7 @@ define @intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -55,7 +55,7 @@ define @intrinsic_vfwcvt_f.f.v_nxv2f32_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv2f32_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -99,7 +99,7 @@ define @intrinsic_vfwcvt_f.f.v_nxv4f32_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv4f32_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -143,7 +143,7 @@ define @intrinsic_vfwcvt_f.f.v_nxv8f32_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv8f32_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v12, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -187,7 +187,7 @@ define @intrinsic_vfwcvt_f.f.v_nxv16f32_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v16, v8 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -231,7 +231,7 @@ define @intrinsic_vfwcvt_f.f.v_nxv1f64_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv1f64_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -275,7 +275,7 @@ define @intrinsic_vfwcvt_f.f.v_nxv2f64_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv2f64_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -319,7 +319,7 @@ define @intrinsic_vfwcvt_f.f.v_nxv4f64_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv4f64_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v12, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -363,7 +363,7 @@ define @intrinsic_vfwcvt_f.f.v_nxv8f64_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfwcvt.f.f.v v16, v8 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x.ll @@ -11,7 +11,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vfwcvt.f.x.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -55,7 +55,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv2f16_nxv2i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vfwcvt.f.x.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -99,7 +99,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv4f16_nxv4i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.x.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -143,7 +143,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv8f16_nxv8i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vfwcvt.f.x.v v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -187,7 +187,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv16f16_nxv16i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vfwcvt.f.x.v v12, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -231,7 +231,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv32f16_nxv32i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vfwcvt.f.x.v v16, v8 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -275,7 +275,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv1f32_nxv1i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfwcvt.f.x.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -319,7 +319,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv2f32_nxv2i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.x.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -363,7 +363,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv4f32_nxv4i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfwcvt.f.x.v v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -407,7 +407,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv8f32_nxv8i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfwcvt.f.x.v v12, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -451,7 +451,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv16f32_nxv16i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfwcvt.f.x.v v16, v8 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -495,7 +495,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv1f64_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.x.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -539,7 +539,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv2f64_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfwcvt.f.x.v v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -583,7 +583,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv4f64_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfwcvt.f.x.v v12, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -627,7 +627,7 @@ define @intrinsic_vfwcvt_f.x.v_nxv8f64_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfwcvt.f.x.v v16, v8 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu.ll @@ -11,7 +11,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -55,7 +55,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv2f16_nxv2i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -99,7 +99,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv4f16_nxv4i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -143,7 +143,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv8f16_nxv8i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vfwcvt.f.xu.v v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -187,7 +187,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv16f16_nxv16i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vfwcvt.f.xu.v v12, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -231,7 +231,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv32f16_nxv32i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vfwcvt.f.xu.v v16, v8 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -275,7 +275,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv1f32_nxv1i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -319,7 +319,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv2f32_nxv2i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -363,7 +363,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv4f32_nxv4i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfwcvt.f.xu.v v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -407,7 +407,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv8f32_nxv8i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfwcvt.f.xu.v v12, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -451,7 +451,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv16f32_nxv16i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfwcvt.f.xu.v v16, v8 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -495,7 +495,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv1f64_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -539,7 +539,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv2f64_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfwcvt.f.xu.v v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -583,7 +583,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv4f64_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfwcvt.f.xu.v v12, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -627,7 +627,7 @@ define @intrinsic_vfwcvt_f.xu.v_nxv8f64_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfwcvt.f.xu.v v16, v8 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f.ll @@ -11,7 +11,7 @@ define @intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfwcvt.rtz.x.f.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -55,7 +55,7 @@ define @intrinsic_vfwcvt_rtz.x.f.v_nxv2i32_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv2i32_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwcvt.rtz.x.f.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -99,7 +99,7 @@ define @intrinsic_vfwcvt_rtz.x.f.v_nxv4i32_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv4i32_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfwcvt.rtz.x.f.v v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -143,7 +143,7 @@ define @intrinsic_vfwcvt_rtz.x.f.v_nxv8i32_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv8i32_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfwcvt.rtz.x.f.v v12, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -187,7 +187,7 @@ define @intrinsic_vfwcvt_rtz.x.f.v_nxv16i32_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv16i32_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfwcvt.rtz.x.f.v v16, v8 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -231,7 +231,7 @@ define @intrinsic_vfwcvt_rtz.x.f.v_nxv1i64_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv1i64_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfwcvt.rtz.x.f.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -275,7 +275,7 @@ define @intrinsic_vfwcvt_rtz.x.f.v_nxv2i64_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv2i64_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfwcvt.rtz.x.f.v v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -319,7 +319,7 @@ define @intrinsic_vfwcvt_rtz.x.f.v_nxv4i64_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv4i64_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfwcvt.rtz.x.f.v v12, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -363,7 +363,7 @@ define @intrinsic_vfwcvt_rtz.x.f.v_nxv8i64_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv8i64_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfwcvt.rtz.x.f.v v16, v8 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f.ll @@ -11,7 +11,7 @@ define @intrinsic_vfwcvt_rtz.xu.f.v_nxv1i32_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -55,7 +55,7 @@ define @intrinsic_vfwcvt_rtz.xu.f.v_nxv2i32_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv2i32_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -99,7 +99,7 @@ define @intrinsic_vfwcvt_rtz.xu.f.v_nxv4i32_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv4i32_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -143,7 +143,7 @@ define @intrinsic_vfwcvt_rtz.xu.f.v_nxv8i32_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv8i32_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v12, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -187,7 +187,7 @@ define @intrinsic_vfwcvt_rtz.xu.f.v_nxv16i32_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv16i32_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v16, v8 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -231,7 +231,7 @@ define @intrinsic_vfwcvt_rtz.xu.f.v_nxv1i64_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv1i64_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -275,7 +275,7 @@ define @intrinsic_vfwcvt_rtz.xu.f.v_nxv2i64_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv2i64_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -319,7 +319,7 @@ define @intrinsic_vfwcvt_rtz.xu.f.v_nxv4i64_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv4i64_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v12, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -363,7 +363,7 @@ define @intrinsic_vfwcvt_rtz.xu.f.v_nxv8i64_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv8i64_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfwcvt.rtz.xu.f.v v16, v8 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f.ll @@ -11,7 +11,7 @@ define @intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfwcvt.x.f.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -55,7 +55,7 @@ define @intrinsic_vfwcvt_x.f.v_nxv2i32_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv2i32_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwcvt.x.f.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -99,7 +99,7 @@ define @intrinsic_vfwcvt_x.f.v_nxv4i32_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv4i32_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfwcvt.x.f.v v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -143,7 +143,7 @@ define @intrinsic_vfwcvt_x.f.v_nxv8i32_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv8i32_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfwcvt.x.f.v v12, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -187,7 +187,7 @@ define @intrinsic_vfwcvt_x.f.v_nxv16i32_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv16i32_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfwcvt.x.f.v v16, v8 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -231,7 +231,7 @@ define @intrinsic_vfwcvt_x.f.v_nxv1i64_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv1i64_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfwcvt.x.f.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -275,7 +275,7 @@ define @intrinsic_vfwcvt_x.f.v_nxv2i64_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv2i64_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfwcvt.x.f.v v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -319,7 +319,7 @@ define @intrinsic_vfwcvt_x.f.v_nxv4i64_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv4i64_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfwcvt.x.f.v v12, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -363,7 +363,7 @@ define @intrinsic_vfwcvt_x.f.v_nxv8i64_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv8i64_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfwcvt.x.f.v v16, v8 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f.ll @@ -11,7 +11,7 @@ define @intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfwcvt.xu.f.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -55,7 +55,7 @@ define @intrinsic_vfwcvt_xu.f.v_nxv2i32_nxv2f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv2i32_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwcvt.xu.f.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -99,7 +99,7 @@ define @intrinsic_vfwcvt_xu.f.v_nxv4i32_nxv4f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv4i32_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfwcvt.xu.f.v v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -143,7 +143,7 @@ define @intrinsic_vfwcvt_xu.f.v_nxv8i32_nxv8f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv8i32_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfwcvt.xu.f.v v12, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -187,7 +187,7 @@ define @intrinsic_vfwcvt_xu.f.v_nxv16i32_nxv16f16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv16i32_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfwcvt.xu.f.v v16, v8 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -231,7 +231,7 @@ define @intrinsic_vfwcvt_xu.f.v_nxv1i64_nxv1f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv1i64_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfwcvt.xu.f.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -275,7 +275,7 @@ define @intrinsic_vfwcvt_xu.f.v_nxv2i64_nxv2f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv2i64_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfwcvt.xu.f.v v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -319,7 +319,7 @@ define @intrinsic_vfwcvt_xu.f.v_nxv4i64_nxv4f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv4i64_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfwcvt.xu.f.v v12, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -363,7 +363,7 @@ define @intrinsic_vfwcvt_xu.f.v_nxv8i64_nxv8f32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv8i64_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfwcvt.xu.f.v v16, v8 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-sdnode.ll @@ -9,7 +9,7 @@ define @vfwmacc_vv_nxv1f32( %va, %vb, %vc) { ; CHECK-LABEL: vfwmacc_vv_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfwmacc.vv v8, v9, v10 ; CHECK-NEXT: ret %vd = fpext %vb to @@ -21,7 +21,7 @@ define @vfwmacc_vf_nxv1f32( %va, %vb, half %c) { ; CHECK-LABEL: vfwmacc_vf_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfwmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -35,7 +35,7 @@ define @vfwnmacc_vv_nxv1f32( %va, %vb, %vc) { ; CHECK-LABEL: vfwnmacc_vv_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfwnmacc.vv v8, v9, v10 ; CHECK-NEXT: ret %vd = fpext %vb to @@ -49,7 +49,7 @@ define @vfwnmacc_vf_nxv1f32( %va, %vb, half %c) { ; CHECK-LABEL: vfwnmacc_vf_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -65,7 +65,7 @@ define @vfwnmacc_fv_nxv1f32( %va, %vb, half %c) { ; CHECK-LABEL: vfwnmacc_fv_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -81,7 +81,7 @@ define @vfwmsac_vv_nxv1f32( %va, %vb, %vc) { ; CHECK-LABEL: vfwmsac_vv_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfwmsac.vv v8, v9, v10 ; CHECK-NEXT: ret %vd = fpext %vb to @@ -94,7 +94,7 @@ define @vfwmsac_vf_nxv1f32( %va, %vb, half %c) { ; CHECK-LABEL: vfwmsac_vf_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfwmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -109,7 +109,7 @@ define @vfwnmsac_vv_nxv1f32( %va, %vb, %vc) { ; CHECK-LABEL: vfwnmsac_vv_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfwnmsac.vv v8, v9, v10 ; CHECK-NEXT: ret %vd = fpext %vb to @@ -122,7 +122,7 @@ define @vfwnmsac_vf_nxv1f32( %va, %vb, half %c) { ; CHECK-LABEL: vfwnmsac_vf_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -137,7 +137,7 @@ define @vfwnmsac_fv_nxv1f32( %va, %vb, half %c) { ; CHECK-LABEL: vfwnmsac_fv_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -154,7 +154,7 @@ define @vfwmacc_vv_nxv2f32( %va, %vb, %vc) { ; CHECK-LABEL: vfwmacc_vv_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfwmacc.vv v8, v9, v10 ; CHECK-NEXT: ret %vd = fpext %vb to @@ -166,7 +166,7 @@ define @vfwmacc_vf_nxv2f32( %va, %vb, half %c) { ; CHECK-LABEL: vfwmacc_vf_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfwmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -180,7 +180,7 @@ define @vfwnmacc_vv_nxv2f32( %va, %vb, %vc) { ; CHECK-LABEL: vfwnmacc_vv_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfwnmacc.vv v8, v9, v10 ; CHECK-NEXT: ret %vd = fpext %vb to @@ -194,7 +194,7 @@ define @vfwnmacc_vf_nxv2f32( %va, %vb, half %c) { ; CHECK-LABEL: vfwnmacc_vf_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -210,7 +210,7 @@ define @vfwnmacc_fv_nxv2f32( %va, %vb, half %c) { ; CHECK-LABEL: vfwnmacc_fv_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -226,7 +226,7 @@ define @vfwmsac_vv_nxv2f32( %va, %vb, %vc) { ; CHECK-LABEL: vfwmsac_vv_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfwmsac.vv v8, v9, v10 ; CHECK-NEXT: ret %vd = fpext %vb to @@ -239,7 +239,7 @@ define @vfwmsac_vf_nxv2f32( %va, %vb, half %c) { ; CHECK-LABEL: vfwmsac_vf_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfwmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -254,7 +254,7 @@ define @vfwnmsac_vv_nxv2f32( %va, %vb, %vc) { ; CHECK-LABEL: vfwnmsac_vv_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfwnmsac.vv v8, v9, v10 ; CHECK-NEXT: ret %vd = fpext %vb to @@ -267,7 +267,7 @@ define @vfwnmsac_vf_nxv2f32( %va, %vb, half %c) { ; CHECK-LABEL: vfwnmsac_vf_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -282,7 +282,7 @@ define @vfwnmsac_fv_nxv2f32( %va, %vb, half %c) { ; CHECK-LABEL: vfwnmsac_fv_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -300,7 +300,7 @@ define @vfwmacc_vv_nxv4f32( %va, %vb, %vc) { ; CHECK-LABEL: vfwmacc_vv_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfwmacc.vv v8, v10, v11 ; CHECK-NEXT: ret %vd = fpext %vb to @@ -312,7 +312,7 @@ define @vfwmacc_vf_nxv4f32( %va, %vb, half %c) { ; CHECK-LABEL: vfwmacc_vf_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfwmacc.vf v8, fa0, v10 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -326,7 +326,7 @@ define @vfwnmacc_vv_nxv4f32( %va, %vb, %vc) { ; CHECK-LABEL: vfwnmacc_vv_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfwnmacc.vv v8, v10, v11 ; CHECK-NEXT: ret %vd = fpext %vb to @@ -340,7 +340,7 @@ define @vfwnmacc_vf_nxv4f32( %va, %vb, half %c) { ; CHECK-LABEL: vfwnmacc_vf_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v10 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -356,7 +356,7 @@ define @vfwnmacc_fv_nxv4f32( %va, %vb, half %c) { ; CHECK-LABEL: vfwnmacc_fv_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v10 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -372,7 +372,7 @@ define @vfwmsac_vv_nxv4f32( %va, %vb, %vc) { ; CHECK-LABEL: vfwmsac_vv_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfwmsac.vv v8, v10, v11 ; CHECK-NEXT: ret %vd = fpext %vb to @@ -385,7 +385,7 @@ define @vfwmsac_vf_nxv4f32( %va, %vb, half %c) { ; CHECK-LABEL: vfwmsac_vf_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfwmsac.vf v8, fa0, v10 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -400,7 +400,7 @@ define @vfwnmsac_vv_nxv4f32( %va, %vb, %vc) { ; CHECK-LABEL: vfwnmsac_vv_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfwnmsac.vv v8, v10, v11 ; CHECK-NEXT: ret %vd = fpext %vb to @@ -413,7 +413,7 @@ define @vfwnmsac_vf_nxv4f32( %va, %vb, half %c) { ; CHECK-LABEL: vfwnmsac_vf_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v10 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -428,7 +428,7 @@ define @vfwnmsac_fv_nxv4f32( %va, %vb, half %c) { ; CHECK-LABEL: vfwnmsac_fv_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v10 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -445,7 +445,7 @@ define @vfwmacc_vv_nxv8f32( %va, %vb, %vc) { ; CHECK-LABEL: vfwmacc_vv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfwmacc.vv v8, v12, v14 ; CHECK-NEXT: ret %vd = fpext %vb to @@ -457,7 +457,7 @@ define @vfwmacc_vf_nxv8f32( %va, %vb, half %c) { ; CHECK-LABEL: vfwmacc_vf_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfwmacc.vf v8, fa0, v12 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -471,7 +471,7 @@ define @vfwnmacc_vv_nxv8f32( %va, %vb, %vc) { ; CHECK-LABEL: vfwnmacc_vv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfwnmacc.vv v8, v12, v14 ; CHECK-NEXT: ret %vd = fpext %vb to @@ -485,7 +485,7 @@ define @vfwnmacc_vf_nxv8f32( %va, %vb, half %c) { ; CHECK-LABEL: vfwnmacc_vf_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v12 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -501,7 +501,7 @@ define @vfwnmacc_fv_nxv8f32( %va, %vb, half %c) { ; CHECK-LABEL: vfwnmacc_fv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v12 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -517,7 +517,7 @@ define @vfwmsac_vv_nxv8f32( %va, %vb, %vc) { ; CHECK-LABEL: vfwmsac_vv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfwmsac.vv v8, v12, v14 ; CHECK-NEXT: ret %vd = fpext %vb to @@ -530,7 +530,7 @@ define @vfwmsac_vf_nxv8f32( %va, %vb, half %c) { ; CHECK-LABEL: vfwmsac_vf_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfwmsac.vf v8, fa0, v12 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -545,7 +545,7 @@ define @vfwnmsac_vv_nxv8f32( %va, %vb, %vc) { ; CHECK-LABEL: vfwnmsac_vv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfwnmsac.vv v8, v12, v14 ; CHECK-NEXT: ret %vd = fpext %vb to @@ -558,7 +558,7 @@ define @vfwnmsac_vf_nxv8f32( %va, %vb, half %c) { ; CHECK-LABEL: vfwnmsac_vf_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v12 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -573,7 +573,7 @@ define @vfwnmsac_fv_nxv8f32( %va, %vb, half %c) { ; CHECK-LABEL: vfwnmsac_fv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v12 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -590,7 +590,7 @@ define @vfwmacc_vv_nxv16f32( %va, %vb, %vc) { ; CHECK-LABEL: vfwmacc_vv_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfwmacc.vv v8, v16, v20 ; CHECK-NEXT: ret %vd = fpext %vb to @@ -602,7 +602,7 @@ define @vfwmacc_vf_nxv16f32( %va, %vb, half %c) { ; CHECK-LABEL: vfwmacc_vf_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfwmacc.vf v8, fa0, v16 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -616,7 +616,7 @@ define @vfwnmacc_vv_nxv16f32( %va, %vb, %vc) { ; CHECK-LABEL: vfwnmacc_vv_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfwnmacc.vv v8, v16, v20 ; CHECK-NEXT: ret %vd = fpext %vb to @@ -630,7 +630,7 @@ define @vfwnmacc_vf_nxv16f32( %va, %vb, half %c) { ; CHECK-LABEL: vfwnmacc_vf_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v16 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -646,7 +646,7 @@ define @vfwnmacc_fv_nxv16f32( %va, %vb, half %c) { ; CHECK-LABEL: vfwnmacc_fv_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v16 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -662,7 +662,7 @@ define @vfwmsac_vv_nxv16f32( %va, %vb, %vc) { ; CHECK-LABEL: vfwmsac_vv_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfwmsac.vv v8, v16, v20 ; CHECK-NEXT: ret %vd = fpext %vb to @@ -675,7 +675,7 @@ define @vfwmsac_vf_nxv16f32( %va, %vb, half %c) { ; CHECK-LABEL: vfwmsac_vf_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfwmsac.vf v8, fa0, v16 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -690,7 +690,7 @@ define @vfwnmsac_vv_nxv16f32( %va, %vb, %vc) { ; CHECK-LABEL: vfwnmsac_vv_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfwnmsac.vv v8, v16, v20 ; CHECK-NEXT: ret %vd = fpext %vb to @@ -703,7 +703,7 @@ define @vfwnmsac_vf_nxv16f32( %va, %vb, half %c) { ; CHECK-LABEL: vfwnmsac_vf_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v16 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -718,7 +718,7 @@ define @vfwnmsac_fv_nxv16f32( %va, %vb, half %c) { ; CHECK-LABEL: vfwnmsac_fv_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v16 ; CHECK-NEXT: ret %head = insertelement poison, half %c, i32 0 @@ -735,7 +735,7 @@ define @vfwmacc_vv_nxv1f64( %va, %vb, %vc) { ; CHECK-LABEL: vfwmacc_vv_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwmacc.vv v8, v9, v10 ; CHECK-NEXT: ret %vd = fpext %vb to @@ -747,7 +747,7 @@ define @vfwmacc_vf_nxv1f64( %va, %vb, float %c) { ; CHECK-LABEL: vfwmacc_vf_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement poison, float %c, i32 0 @@ -761,7 +761,7 @@ define @vfwnmacc_vv_nxv1f64( %va, %vb, %vc) { ; CHECK-LABEL: vfwnmacc_vv_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwnmacc.vv v8, v9, v10 ; CHECK-NEXT: ret %vd = fpext %vb to @@ -775,7 +775,7 @@ define @vfwnmacc_vf_nxv1f64( %va, %vb, float %c) { ; CHECK-LABEL: vfwnmacc_vf_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement poison, float %c, i32 0 @@ -791,7 +791,7 @@ define @vfwnmacc_fv_nxv1f64( %va, %vb, float %c) { ; CHECK-LABEL: vfwnmacc_fv_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement poison, float %c, i32 0 @@ -807,7 +807,7 @@ define @vfwmsac_vv_nxv1f64( %va, %vb, %vc) { ; CHECK-LABEL: vfwmsac_vv_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwmsac.vv v8, v9, v10 ; CHECK-NEXT: ret %vd = fpext %vb to @@ -820,7 +820,7 @@ define @vfwmsac_vf_nxv1f64( %va, %vb, float %c) { ; CHECK-LABEL: vfwmsac_vf_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement poison, float %c, i32 0 @@ -835,7 +835,7 @@ define @vfwnmsac_vv_nxv1f64( %va, %vb, %vc) { ; CHECK-LABEL: vfwnmsac_vv_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwnmsac.vv v8, v9, v10 ; CHECK-NEXT: ret %vd = fpext %vb to @@ -848,7 +848,7 @@ define @vfwnmsac_vf_nxv1f64( %va, %vb, float %c) { ; CHECK-LABEL: vfwnmsac_vf_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement poison, float %c, i32 0 @@ -863,7 +863,7 @@ define @vfwnmsac_fv_nxv1f64( %va, %vb, float %c) { ; CHECK-LABEL: vfwnmsac_fv_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret %head = insertelement poison, float %c, i32 0 @@ -880,7 +880,7 @@ define @vfwmacc_vv_nxv2f64( %va, %vb, %vc) { ; CHECK-LABEL: vfwmacc_vv_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwmacc.vv v8, v10, v11 ; CHECK-NEXT: ret %vd = fpext %vb to @@ -892,7 +892,7 @@ define @vfwmacc_vf_nxv2f64( %va, %vb, float %c) { ; CHECK-LABEL: vfwmacc_vf_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwmacc.vf v8, fa0, v10 ; CHECK-NEXT: ret %head = insertelement poison, float %c, i32 0 @@ -906,7 +906,7 @@ define @vfwnmacc_vv_nxv2f64( %va, %vb, %vc) { ; CHECK-LABEL: vfwnmacc_vv_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwnmacc.vv v8, v10, v11 ; CHECK-NEXT: ret %vd = fpext %vb to @@ -920,7 +920,7 @@ define @vfwnmacc_vf_nxv2f64( %va, %vb, float %c) { ; CHECK-LABEL: vfwnmacc_vf_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v10 ; CHECK-NEXT: ret %head = insertelement poison, float %c, i32 0 @@ -936,7 +936,7 @@ define @vfwnmacc_fv_nxv2f64( %va, %vb, float %c) { ; CHECK-LABEL: vfwnmacc_fv_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v10 ; CHECK-NEXT: ret %head = insertelement poison, float %c, i32 0 @@ -952,7 +952,7 @@ define @vfwmsac_vv_nxv2f64( %va, %vb, %vc) { ; CHECK-LABEL: vfwmsac_vv_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwmsac.vv v8, v10, v11 ; CHECK-NEXT: ret %vd = fpext %vb to @@ -965,7 +965,7 @@ define @vfwmsac_vf_nxv2f64( %va, %vb, float %c) { ; CHECK-LABEL: vfwmsac_vf_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwmsac.vf v8, fa0, v10 ; CHECK-NEXT: ret %head = insertelement poison, float %c, i32 0 @@ -980,7 +980,7 @@ define @vfwnmsac_vv_nxv2f64( %va, %vb, %vc) { ; CHECK-LABEL: vfwnmsac_vv_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwnmsac.vv v8, v10, v11 ; CHECK-NEXT: ret %vd = fpext %vb to @@ -993,7 +993,7 @@ define @vfwnmsac_vf_nxv2f64( %va, %vb, float %c) { ; CHECK-LABEL: vfwnmsac_vf_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v10 ; CHECK-NEXT: ret %head = insertelement poison, float %c, i32 0 @@ -1008,7 +1008,7 @@ define @vfwnmsac_fv_nxv2f64( %va, %vb, float %c) { ; CHECK-LABEL: vfwnmsac_fv_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v10 ; CHECK-NEXT: ret %head = insertelement poison, float %c, i32 0 @@ -1026,7 +1026,7 @@ define @vfwmacc_vv_nxv4f64( %va, %vb, %vc) { ; CHECK-LABEL: vfwmacc_vv_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfwmacc.vv v8, v12, v14 ; CHECK-NEXT: ret %vd = fpext %vb to @@ -1038,7 +1038,7 @@ define @vfwmacc_vf_nxv4f64( %va, %vb, float %c) { ; CHECK-LABEL: vfwmacc_vf_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfwmacc.vf v8, fa0, v12 ; CHECK-NEXT: ret %head = insertelement poison, float %c, i32 0 @@ -1052,7 +1052,7 @@ define @vfwnmacc_vv_nxv4f64( %va, %vb, %vc) { ; CHECK-LABEL: vfwnmacc_vv_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfwnmacc.vv v8, v12, v14 ; CHECK-NEXT: ret %vd = fpext %vb to @@ -1066,7 +1066,7 @@ define @vfwnmacc_vf_nxv4f64( %va, %vb, float %c) { ; CHECK-LABEL: vfwnmacc_vf_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v12 ; CHECK-NEXT: ret %head = insertelement poison, float %c, i32 0 @@ -1082,7 +1082,7 @@ define @vfwnmacc_fv_nxv4f64( %va, %vb, float %c) { ; CHECK-LABEL: vfwnmacc_fv_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v12 ; CHECK-NEXT: ret %head = insertelement poison, float %c, i32 0 @@ -1098,7 +1098,7 @@ define @vfwmsac_vv_nxv4f64( %va, %vb, %vc) { ; CHECK-LABEL: vfwmsac_vv_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfwmsac.vv v8, v12, v14 ; CHECK-NEXT: ret %vd = fpext %vb to @@ -1111,7 +1111,7 @@ define @vfwmsac_vf_nxv4f64( %va, %vb, float %c) { ; CHECK-LABEL: vfwmsac_vf_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfwmsac.vf v8, fa0, v12 ; CHECK-NEXT: ret %head = insertelement poison, float %c, i32 0 @@ -1126,7 +1126,7 @@ define @vfwnmsac_vv_nxv4f64( %va, %vb, %vc) { ; CHECK-LABEL: vfwnmsac_vv_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfwnmsac.vv v8, v12, v14 ; CHECK-NEXT: ret %vd = fpext %vb to @@ -1139,7 +1139,7 @@ define @vfwnmsac_vf_nxv4f64( %va, %vb, float %c) { ; CHECK-LABEL: vfwnmsac_vf_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v12 ; CHECK-NEXT: ret %head = insertelement poison, float %c, i32 0 @@ -1154,7 +1154,7 @@ define @vfwnmsac_fv_nxv4f64( %va, %vb, float %c) { ; CHECK-LABEL: vfwnmsac_fv_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v12 ; CHECK-NEXT: ret %head = insertelement poison, float %c, i32 0 @@ -1171,7 +1171,7 @@ define @vfwmacc_vv_nxv8f64( %va, %vb, %vc) { ; CHECK-LABEL: vfwmacc_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfwmacc.vv v8, v16, v20 ; CHECK-NEXT: ret %vd = fpext %vb to @@ -1183,7 +1183,7 @@ define @vfwmacc_vf_nxv8f64( %va, %vb, float %c) { ; CHECK-LABEL: vfwmacc_vf_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfwmacc.vf v8, fa0, v16 ; CHECK-NEXT: ret %head = insertelement poison, float %c, i32 0 @@ -1197,7 +1197,7 @@ define @vfwnmacc_vv_nxv8f64( %va, %vb, %vc) { ; CHECK-LABEL: vfwnmacc_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfwnmacc.vv v8, v16, v20 ; CHECK-NEXT: ret %vd = fpext %vb to @@ -1211,7 +1211,7 @@ define @vfwnmacc_vf_nxv8f64( %va, %vb, float %c) { ; CHECK-LABEL: vfwnmacc_vf_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v16 ; CHECK-NEXT: ret %head = insertelement poison, float %c, i32 0 @@ -1227,7 +1227,7 @@ define @vfwnmacc_fv_nxv8f64( %va, %vb, float %c) { ; CHECK-LABEL: vfwnmacc_fv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v16 ; CHECK-NEXT: ret %head = insertelement poison, float %c, i32 0 @@ -1243,7 +1243,7 @@ define @vfwmsac_vv_nxv8f64( %va, %vb, %vc) { ; CHECK-LABEL: vfwmsac_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfwmsac.vv v8, v16, v20 ; CHECK-NEXT: ret %vd = fpext %vb to @@ -1256,7 +1256,7 @@ define @vfwmsac_vf_nxv8f64( %va, %vb, float %c) { ; CHECK-LABEL: vfwmsac_vf_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfwmsac.vf v8, fa0, v16 ; CHECK-NEXT: ret %head = insertelement poison, float %c, i32 0 @@ -1271,7 +1271,7 @@ define @vfwnmsac_vv_nxv8f64( %va, %vb, %vc) { ; CHECK-LABEL: vfwnmsac_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfwnmsac.vv v8, v16, v20 ; CHECK-NEXT: ret %vd = fpext %vb to @@ -1284,7 +1284,7 @@ define @vfwnmsac_vf_nxv8f64( %va, %vb, float %c) { ; CHECK-LABEL: vfwnmsac_vf_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v16 ; CHECK-NEXT: ret %head = insertelement poison, float %c, i32 0 @@ -1299,7 +1299,7 @@ define @vfwnmsac_fv_nxv8f64( %va, %vb, float %c) { ; CHECK-LABEL: vfwnmsac_fv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v16 ; CHECK-NEXT: ret %head = insertelement poison, float %c, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmacc.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmacc.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwmacc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmacc.ll @@ -13,7 +13,7 @@ define @intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfwmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -60,7 +60,7 @@ define @intrinsic_vfwmacc_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfwmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -107,7 +107,7 @@ define @intrinsic_vfwmacc_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfwmacc.vv v8, v10, v11 ; CHECK-NEXT: ret entry: @@ -154,7 +154,7 @@ define @intrinsic_vfwmacc_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfwmacc.vv v8, v12, v14 ; CHECK-NEXT: ret entry: @@ -201,7 +201,7 @@ define @intrinsic_vfwmacc_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfwmacc.vv v8, v16, v20 ; CHECK-NEXT: ret entry: @@ -248,7 +248,7 @@ define @intrinsic_vfwmacc_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfwmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -295,7 +295,7 @@ define @intrinsic_vfwmacc_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfwmacc.vv v8, v10, v11 ; CHECK-NEXT: ret entry: @@ -342,7 +342,7 @@ define @intrinsic_vfwmacc_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfwmacc.vv v8, v12, v14 ; CHECK-NEXT: ret entry: @@ -389,7 +389,7 @@ define @intrinsic_vfwmacc_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfwmacc.vv v8, v16, v20 ; CHECK-NEXT: ret entry: @@ -436,7 +436,7 @@ define @intrinsic_vfwmacc_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfwmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -483,7 +483,7 @@ define @intrinsic_vfwmacc_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfwmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -530,7 +530,7 @@ define @intrinsic_vfwmacc_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfwmacc.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: @@ -577,7 +577,7 @@ define @intrinsic_vfwmacc_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfwmacc.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: @@ -624,7 +624,7 @@ define @intrinsic_vfwmacc_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfwmacc.vf v8, fa0, v16 ; CHECK-NEXT: ret entry: @@ -671,7 +671,7 @@ define @intrinsic_vfwmacc_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfwmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -718,7 +718,7 @@ define @intrinsic_vfwmacc_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfwmacc.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: @@ -765,7 +765,7 @@ define @intrinsic_vfwmacc_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfwmacc.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: @@ -812,7 +812,7 @@ define @intrinsic_vfwmacc_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfwmacc.vf v8, fa0, v16 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmsac.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmsac.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwmsac.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmsac.ll @@ -13,7 +13,7 @@ define @intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfwmsac.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -60,7 +60,7 @@ define @intrinsic_vfwmsac_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfwmsac.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -107,7 +107,7 @@ define @intrinsic_vfwmsac_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfwmsac.vv v8, v10, v11 ; CHECK-NEXT: ret entry: @@ -154,7 +154,7 @@ define @intrinsic_vfwmsac_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfwmsac.vv v8, v12, v14 ; CHECK-NEXT: ret entry: @@ -201,7 +201,7 @@ define @intrinsic_vfwmsac_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfwmsac.vv v8, v16, v20 ; CHECK-NEXT: ret entry: @@ -248,7 +248,7 @@ define @intrinsic_vfwmsac_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfwmsac.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -295,7 +295,7 @@ define @intrinsic_vfwmsac_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfwmsac.vv v8, v10, v11 ; CHECK-NEXT: ret entry: @@ -342,7 +342,7 @@ define @intrinsic_vfwmsac_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfwmsac.vv v8, v12, v14 ; CHECK-NEXT: ret entry: @@ -389,7 +389,7 @@ define @intrinsic_vfwmsac_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfwmsac.vv v8, v16, v20 ; CHECK-NEXT: ret entry: @@ -436,7 +436,7 @@ define @intrinsic_vfwmsac_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfwmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -483,7 +483,7 @@ define @intrinsic_vfwmsac_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfwmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -530,7 +530,7 @@ define @intrinsic_vfwmsac_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfwmsac.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: @@ -577,7 +577,7 @@ define @intrinsic_vfwmsac_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfwmsac.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: @@ -624,7 +624,7 @@ define @intrinsic_vfwmsac_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfwmsac.vf v8, fa0, v16 ; CHECK-NEXT: ret entry: @@ -671,7 +671,7 @@ define @intrinsic_vfwmsac_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfwmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -718,7 +718,7 @@ define @intrinsic_vfwmsac_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfwmsac.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: @@ -765,7 +765,7 @@ define @intrinsic_vfwmsac_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfwmsac.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: @@ -812,7 +812,7 @@ define @intrinsic_vfwmsac_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfwmsac.vf v8, fa0, v16 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmul-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmul-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwmul-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmul-sdnode.ll @@ -7,7 +7,7 @@ define @vfwmul_vv_nxv1f64( %va, %vb) { ; CHECK-LABEL: vfwmul_vv_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwmul.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -20,7 +20,7 @@ define @vfwmul_vf_nxv1f64( %va, float %b) { ; CHECK-LABEL: vfwmul_vf_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwmul.vf v9, v8, fa0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -35,7 +35,7 @@ define @vfwmul_vf_nxv1f64_2( %va, float %b) { ; CHECK-LABEL: vfwmul_vf_nxv1f64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwmul.vf v9, v8, fa0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -50,7 +50,7 @@ define @vfwmul_vv_nxv2f64( %va, %vb) { ; CHECK-LABEL: vfwmul_vv_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwmul.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -63,7 +63,7 @@ define @vfwmul_vf_nxv2f64( %va, float %b) { ; CHECK-LABEL: vfwmul_vf_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwmul.vf v10, v8, fa0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -78,7 +78,7 @@ define @vfwmul_vf_nxv2f64_2( %va, float %b) { ; CHECK-LABEL: vfwmul_vf_nxv2f64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwmul.vf v10, v8, fa0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -93,7 +93,7 @@ define @vfwmul_vv_nxv4f64( %va, %vb) { ; CHECK-LABEL: vfwmul_vv_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfwmul.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -106,7 +106,7 @@ define @vfwmul_vf_nxv4f64( %va, float %b) { ; CHECK-LABEL: vfwmul_vf_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfwmul.vf v12, v8, fa0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -121,7 +121,7 @@ define @vfwmul_vf_nxv4f64_2( %va, float %b) { ; CHECK-LABEL: vfwmul_vf_nxv4f64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfwmul.vf v12, v8, fa0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -136,7 +136,7 @@ define @vfwmul_vv_nxv8f64( %va, %vb) { ; CHECK-LABEL: vfwmul_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfwmul.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -149,7 +149,7 @@ define @vfwmul_vf_nxv8f64( %va, float %b) { ; CHECK-LABEL: vfwmul_vf_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfwmul.vf v16, v8, fa0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -164,7 +164,7 @@ define @vfwmul_vf_nxv8f64_2( %va, float %b) { ; CHECK-LABEL: vfwmul_vf_nxv8f64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfwmul.vf v16, v8, fa0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmul.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmul.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwmul.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmul.ll @@ -12,7 +12,7 @@ define @intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfwmul.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -60,7 +60,7 @@ define @intrinsic_vfwmul_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwmul.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -108,7 +108,7 @@ define @intrinsic_vfwmul_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfwmul.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -156,7 +156,7 @@ define @intrinsic_vfwmul_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfwmul.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -204,7 +204,7 @@ define @intrinsic_vfwmul_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfwmul.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -252,7 +252,7 @@ define @intrinsic_vfwmul_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfwmul.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -300,7 +300,7 @@ define @intrinsic_vfwmul_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfwmul.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -348,7 +348,7 @@ define @intrinsic_vfwmul_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfwmul.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -396,7 +396,7 @@ define @intrinsic_vfwmul_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfwmul.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -444,7 +444,7 @@ define @intrinsic_vfwmul_vf_nxv1f32_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv1f32_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfwmul.vf v9, v8, fa0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -492,7 +492,7 @@ define @intrinsic_vfwmul_vf_nxv2f32_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv2f32_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwmul.vf v9, v8, fa0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -540,7 +540,7 @@ define @intrinsic_vfwmul_vf_nxv4f32_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv4f32_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfwmul.vf v10, v8, fa0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -588,7 +588,7 @@ define @intrinsic_vfwmul_vf_nxv8f32_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv8f32_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfwmul.vf v12, v8, fa0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -636,7 +636,7 @@ define @intrinsic_vfwmul_vf_nxv16f32_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv16f32_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfwmul.vf v16, v8, fa0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -684,7 +684,7 @@ define @intrinsic_vfwmul_vf_nxv1f64_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv1f64_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfwmul.vf v9, v8, fa0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -732,7 +732,7 @@ define @intrinsic_vfwmul_vf_nxv2f64_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv2f64_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfwmul.vf v10, v8, fa0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -780,7 +780,7 @@ define @intrinsic_vfwmul_vf_nxv4f64_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv4f64_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfwmul.vf v12, v8, fa0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -828,7 +828,7 @@ define @intrinsic_vfwmul_vf_nxv8f64_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_vf_nxv8f64_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfwmul.vf v16, v8, fa0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc.ll @@ -13,7 +13,7 @@ define @intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfwnmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -60,7 +60,7 @@ define @intrinsic_vfwnmacc_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfwnmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -107,7 +107,7 @@ define @intrinsic_vfwnmacc_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfwnmacc.vv v8, v10, v11 ; CHECK-NEXT: ret entry: @@ -154,7 +154,7 @@ define @intrinsic_vfwnmacc_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfwnmacc.vv v8, v12, v14 ; CHECK-NEXT: ret entry: @@ -201,7 +201,7 @@ define @intrinsic_vfwnmacc_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfwnmacc.vv v8, v16, v20 ; CHECK-NEXT: ret entry: @@ -248,7 +248,7 @@ define @intrinsic_vfwnmacc_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfwnmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -295,7 +295,7 @@ define @intrinsic_vfwnmacc_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfwnmacc.vv v8, v10, v11 ; CHECK-NEXT: ret entry: @@ -342,7 +342,7 @@ define @intrinsic_vfwnmacc_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfwnmacc.vv v8, v12, v14 ; CHECK-NEXT: ret entry: @@ -389,7 +389,7 @@ define @intrinsic_vfwnmacc_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfwnmacc.vv v8, v16, v20 ; CHECK-NEXT: ret entry: @@ -436,7 +436,7 @@ define @intrinsic_vfwnmacc_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -483,7 +483,7 @@ define @intrinsic_vfwnmacc_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -530,7 +530,7 @@ define @intrinsic_vfwnmacc_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: @@ -577,7 +577,7 @@ define @intrinsic_vfwnmacc_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: @@ -624,7 +624,7 @@ define @intrinsic_vfwnmacc_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v16 ; CHECK-NEXT: ret entry: @@ -671,7 +671,7 @@ define @intrinsic_vfwnmacc_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -718,7 +718,7 @@ define @intrinsic_vfwnmacc_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: @@ -765,7 +765,7 @@ define @intrinsic_vfwnmacc_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: @@ -812,7 +812,7 @@ define @intrinsic_vfwnmacc_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v16 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac.ll @@ -13,7 +13,7 @@ define @intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfwnmsac.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -60,7 +60,7 @@ define @intrinsic_vfwnmsac_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfwnmsac.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -107,7 +107,7 @@ define @intrinsic_vfwnmsac_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfwnmsac.vv v8, v10, v11 ; CHECK-NEXT: ret entry: @@ -154,7 +154,7 @@ define @intrinsic_vfwnmsac_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfwnmsac.vv v8, v12, v14 ; CHECK-NEXT: ret entry: @@ -201,7 +201,7 @@ define @intrinsic_vfwnmsac_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfwnmsac.vv v8, v16, v20 ; CHECK-NEXT: ret entry: @@ -248,7 +248,7 @@ define @intrinsic_vfwnmsac_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfwnmsac.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -295,7 +295,7 @@ define @intrinsic_vfwnmsac_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfwnmsac.vv v8, v10, v11 ; CHECK-NEXT: ret entry: @@ -342,7 +342,7 @@ define @intrinsic_vfwnmsac_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfwnmsac.vv v8, v12, v14 ; CHECK-NEXT: ret entry: @@ -389,7 +389,7 @@ define @intrinsic_vfwnmsac_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfwnmsac.vv v8, v16, v20 ; CHECK-NEXT: ret entry: @@ -436,7 +436,7 @@ define @intrinsic_vfwnmsac_vf_nxv1f32_f16_nxv1f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv1f32_f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -483,7 +483,7 @@ define @intrinsic_vfwnmsac_vf_nxv2f32_f16_nxv2f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv2f32_f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -530,7 +530,7 @@ define @intrinsic_vfwnmsac_vf_nxv4f32_f16_nxv4f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv4f32_f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: @@ -577,7 +577,7 @@ define @intrinsic_vfwnmsac_vf_nxv8f32_f16_nxv8f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv8f32_f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: @@ -624,7 +624,7 @@ define @intrinsic_vfwnmsac_vf_nxv16f32_f16_nxv16f16( %0, half %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv16f32_f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v16 ; CHECK-NEXT: ret entry: @@ -671,7 +671,7 @@ define @intrinsic_vfwnmsac_vf_nxv1f64_f32_nxv1f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv1f64_f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9 ; CHECK-NEXT: ret entry: @@ -718,7 +718,7 @@ define @intrinsic_vfwnmsac_vf_nxv2f64_f32_nxv2f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv2f64_f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v10 ; CHECK-NEXT: ret entry: @@ -765,7 +765,7 @@ define @intrinsic_vfwnmsac_vf_nxv4f64_f32_nxv4f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv4f64_f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v12 ; CHECK-NEXT: ret entry: @@ -812,7 +812,7 @@ define @intrinsic_vfwnmsac_vf_nxv8f64_f32_nxv8f32( %0, float %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv8f64_f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v16 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwredosum.ll b/llvm/test/CodeGen/RISCV/rvv/vfwredosum.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwredosum.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwredosum.ll @@ -12,7 +12,7 @@ define @intrinsic_vfwredosum_vs_nxv2f32_nxv1f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv1f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfwredosum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -35,7 +35,7 @@ define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv1f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv1f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfwredosum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -58,7 +58,7 @@ define @intrinsic_vfwredosum_vs_nxv2f32_nxv2f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv2f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfwredosum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -81,7 +81,7 @@ define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv2f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv2f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfwredosum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vfwredosum_vs_nxv2f32_nxv4f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv4f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfwredosum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -127,7 +127,7 @@ define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv4f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv4f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfwredosum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -150,7 +150,7 @@ define @intrinsic_vfwredosum_vs_nxv2f32_nxv8f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv8f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfwredosum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -173,7 +173,7 @@ define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv8f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv8f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfwredosum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -196,7 +196,7 @@ define @intrinsic_vfwredosum_vs_nxv2f32_nxv16f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv16f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfwredosum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -219,7 +219,7 @@ define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv16f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv16f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfwredosum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -242,7 +242,7 @@ define @intrinsic_vfwredosum_vs_nxv2f32_nxv32f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv32f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vfwredosum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -265,7 +265,7 @@ define @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv32f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv32f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vfwredosum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -288,7 +288,7 @@ define @intrinsic_vfwredosum_vs_nxv1f64_nxv1f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv1f32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfwredosum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -311,7 +311,7 @@ define @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv1f32_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv1f32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfwredosum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -334,7 +334,7 @@ define @intrinsic_vfwredosum_vs_nxv1f64_nxv2f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv2f32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfwredosum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -357,7 +357,7 @@ define @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv2f32_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv2f32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfwredosum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -380,7 +380,7 @@ define @intrinsic_vfwredosum_vs_nxv1f64_nxv4f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv4f32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfwredosum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -403,7 +403,7 @@ define @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv4f32_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv4f32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfwredosum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -426,7 +426,7 @@ define @intrinsic_vfwredosum_vs_nxv1f64_nxv8f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv8f32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfwredosum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -449,7 +449,7 @@ define @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv8f32_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv8f32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfwredosum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -472,7 +472,7 @@ define @intrinsic_vfwredosum_vs_nxv1f64_nxv16f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv16f32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vfwredosum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -495,7 +495,7 @@ define @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv16f32_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv16f32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vfwredosum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwredusum.ll b/llvm/test/CodeGen/RISCV/rvv/vfwredusum.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwredusum.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwredusum.ll @@ -12,7 +12,7 @@ define @intrinsic_vfwredusum_vs_nxv2f32_nxv1f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv1f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfwredusum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -35,7 +35,7 @@ define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv1f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv1f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfwredusum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -58,7 +58,7 @@ define @intrinsic_vfwredusum_vs_nxv2f32_nxv2f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv2f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfwredusum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -81,7 +81,7 @@ define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv2f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv2f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfwredusum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vfwredusum_vs_nxv2f32_nxv4f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv4f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfwredusum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -127,7 +127,7 @@ define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv4f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv4f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfwredusum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -150,7 +150,7 @@ define @intrinsic_vfwredusum_vs_nxv2f32_nxv8f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv8f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfwredusum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -173,7 +173,7 @@ define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv8f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv8f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfwredusum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -196,7 +196,7 @@ define @intrinsic_vfwredusum_vs_nxv2f32_nxv16f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv16f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfwredusum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -219,7 +219,7 @@ define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv16f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv16f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfwredusum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -242,7 +242,7 @@ define @intrinsic_vfwredusum_vs_nxv2f32_nxv32f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv32f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vfwredusum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -265,7 +265,7 @@ define @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv32f16_nxv2f32( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv32f16_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vfwredusum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -288,7 +288,7 @@ define @intrinsic_vfwredusum_vs_nxv1f64_nxv1f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv1f32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfwredusum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -311,7 +311,7 @@ define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv1f32_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv1f32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfwredusum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -334,7 +334,7 @@ define @intrinsic_vfwredusum_vs_nxv1f64_nxv2f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv2f32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfwredusum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -357,7 +357,7 @@ define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv2f32_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv2f32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfwredusum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -380,7 +380,7 @@ define @intrinsic_vfwredusum_vs_nxv1f64_nxv4f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv4f32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfwredusum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -403,7 +403,7 @@ define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv4f32_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv4f32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfwredusum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -426,7 +426,7 @@ define @intrinsic_vfwredusum_vs_nxv1f64_nxv8f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv8f32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfwredusum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -449,7 +449,7 @@ define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv8f32_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv8f32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfwredusum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -472,7 +472,7 @@ define @intrinsic_vfwredusum_vs_nxv1f64_nxv16f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv16f32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vfwredusum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -495,7 +495,7 @@ define @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv16f32_nxv1f64( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv16f32_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vfwredusum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwsub-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub-sdnode.ll @@ -7,7 +7,7 @@ define @vfwsub_vv_nxv1f64( %va, %vb) { ; CHECK-LABEL: vfwsub_vv_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwsub.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -20,7 +20,7 @@ define @vfwsub_vf_nxv1f64( %va, float %b) { ; CHECK-LABEL: vfwsub_vf_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwsub.vf v9, v8, fa0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -35,7 +35,7 @@ define @vfwsub_vf_nxv1f64_2( %va, float %b) { ; CHECK-LABEL: vfwsub_vf_nxv1f64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwsub.vf v9, v8, fa0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -50,7 +50,7 @@ define @vfwsub_wv_nxv1f64( %va, %vb) { ; CHECK-LABEL: vfwsub_wv_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwsub.wv v8, v8, v9 ; CHECK-NEXT: ret %vc = fpext %vb to @@ -61,7 +61,7 @@ define @vfwsub_wf_nxv1f64( %va, float %b) { ; CHECK-LABEL: vfwsub_wf_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwsub.wf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -74,7 +74,7 @@ define @vfwsub_wf_nxv1f64_2( %va, float %b) { ; CHECK-LABEL: vfwsub_wf_nxv1f64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwsub.wf v8, v8, fa0 ; CHECK-NEXT: ret %fpext = fpext float %b to double @@ -87,7 +87,7 @@ define @vfwsub_vv_nxv2f64( %va, %vb) { ; CHECK-LABEL: vfwsub_vv_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwsub.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -100,7 +100,7 @@ define @vfwsub_vf_nxv2f64( %va, float %b) { ; CHECK-LABEL: vfwsub_vf_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwsub.vf v10, v8, fa0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -115,7 +115,7 @@ define @vfwsub_vf_nxv2f64_2( %va, float %b) { ; CHECK-LABEL: vfwsub_vf_nxv2f64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwsub.vf v10, v8, fa0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -130,7 +130,7 @@ define @vfwsub_wv_nxv2f64( %va, %vb) { ; CHECK-LABEL: vfwsub_wv_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwsub.wv v8, v8, v10 ; CHECK-NEXT: ret %vc = fpext %vb to @@ -141,7 +141,7 @@ define @vfwsub_wf_nxv2f64( %va, float %b) { ; CHECK-LABEL: vfwsub_wf_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwsub.wf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -154,7 +154,7 @@ define @vfwsub_wf_nxv2f64_2( %va, float %b) { ; CHECK-LABEL: vfwsub_wf_nxv2f64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwsub.wf v8, v8, fa0 ; CHECK-NEXT: ret %fpext = fpext float %b to double @@ -167,7 +167,7 @@ define @vfwsub_vv_nxv4f64( %va, %vb) { ; CHECK-LABEL: vfwsub_vv_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfwsub.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -180,7 +180,7 @@ define @vfwsub_vf_nxv4f64( %va, float %b) { ; CHECK-LABEL: vfwsub_vf_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfwsub.vf v12, v8, fa0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -195,7 +195,7 @@ define @vfwsub_vf_nxv4f64_2( %va, float %b) { ; CHECK-LABEL: vfwsub_vf_nxv4f64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfwsub.vf v12, v8, fa0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -210,7 +210,7 @@ define @vfwsub_wv_nxv4f64( %va, %vb) { ; CHECK-LABEL: vfwsub_wv_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfwsub.wv v8, v8, v12 ; CHECK-NEXT: ret %vc = fpext %vb to @@ -221,7 +221,7 @@ define @vfwsub_wf_nxv4f64( %va, float %b) { ; CHECK-LABEL: vfwsub_wf_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfwsub.wf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -234,7 +234,7 @@ define @vfwsub_wf_nxv4f64_2( %va, float %b) { ; CHECK-LABEL: vfwsub_wf_nxv4f64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfwsub.wf v8, v8, fa0 ; CHECK-NEXT: ret %fpext = fpext float %b to double @@ -247,7 +247,7 @@ define @vfwsub_vv_nxv8f64( %va, %vb) { ; CHECK-LABEL: vfwsub_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfwsub.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -260,7 +260,7 @@ define @vfwsub_vf_nxv8f64( %va, float %b) { ; CHECK-LABEL: vfwsub_vf_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfwsub.vf v16, v8, fa0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -275,7 +275,7 @@ define @vfwsub_vf_nxv8f64_2( %va, float %b) { ; CHECK-LABEL: vfwsub_vf_nxv8f64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfwsub.vf v16, v8, fa0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -290,7 +290,7 @@ define @vfwsub_wv_nxv8f64( %va, %vb) { ; CHECK-LABEL: vfwsub_wv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfwsub.wv v8, v8, v16 ; CHECK-NEXT: ret %vc = fpext %vb to @@ -301,7 +301,7 @@ define @vfwsub_wf_nxv8f64( %va, float %b) { ; CHECK-LABEL: vfwsub_wf_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfwsub.wf v8, v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -314,7 +314,7 @@ define @vfwsub_wf_nxv8f64_2( %va, float %b) { ; CHECK-LABEL: vfwsub_wf_nxv8f64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfwsub.wf v8, v8, fa0 ; CHECK-NEXT: ret %fpext = fpext float %b to double diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwsub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub.ll @@ -12,7 +12,7 @@ define @intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfwsub.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -60,7 +60,7 @@ define @intrinsic_vfwsub_vv_nxv2f32_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv2f32_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwsub.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -108,7 +108,7 @@ define @intrinsic_vfwsub_vv_nxv4f32_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv4f32_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfwsub.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -156,7 +156,7 @@ define @intrinsic_vfwsub_vv_nxv8f32_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv8f32_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfwsub.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -204,7 +204,7 @@ define @intrinsic_vfwsub_vv_nxv16f32_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv16f32_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfwsub.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -252,7 +252,7 @@ define @intrinsic_vfwsub_vv_nxv1f64_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv1f64_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfwsub.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -300,7 +300,7 @@ define @intrinsic_vfwsub_vv_nxv2f64_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv2f64_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfwsub.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -348,7 +348,7 @@ define @intrinsic_vfwsub_vv_nxv4f64_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv4f64_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfwsub.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -396,7 +396,7 @@ define @intrinsic_vfwsub_vv_nxv8f64_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vv_nxv8f64_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfwsub.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -444,7 +444,7 @@ define @intrinsic_vfwsub_vf_nxv1f32_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv1f32_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfwsub.vf v9, v8, fa0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -492,7 +492,7 @@ define @intrinsic_vfwsub_vf_nxv2f32_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv2f32_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwsub.vf v9, v8, fa0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -540,7 +540,7 @@ define @intrinsic_vfwsub_vf_nxv4f32_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv4f32_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfwsub.vf v10, v8, fa0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -588,7 +588,7 @@ define @intrinsic_vfwsub_vf_nxv8f32_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv8f32_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfwsub.vf v12, v8, fa0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -636,7 +636,7 @@ define @intrinsic_vfwsub_vf_nxv16f32_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv16f32_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfwsub.vf v16, v8, fa0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -684,7 +684,7 @@ define @intrinsic_vfwsub_vf_nxv1f64_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv1f64_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfwsub.vf v9, v8, fa0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -732,7 +732,7 @@ define @intrinsic_vfwsub_vf_nxv2f64_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv2f64_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfwsub.vf v10, v8, fa0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -780,7 +780,7 @@ define @intrinsic_vfwsub_vf_nxv4f64_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv4f64_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfwsub.vf v12, v8, fa0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -828,7 +828,7 @@ define @intrinsic_vfwsub_vf_nxv8f64_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub_vf_nxv8f64_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfwsub.vf v16, v8, fa0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w.ll @@ -12,7 +12,7 @@ define @intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfwsub.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -59,7 +59,7 @@ define @intrinsic_vfwsub.w_wv_nxv2f32_nxv2f32_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv2f32_nxv2f32_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwsub.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -106,7 +106,7 @@ define @intrinsic_vfwsub.w_wv_nxv4f32_nxv4f32_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv4f32_nxv4f32_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfwsub.wv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -153,7 +153,7 @@ define @intrinsic_vfwsub.w_wv_nxv8f32_nxv8f32_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv8f32_nxv8f32_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfwsub.wv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -200,7 +200,7 @@ define @intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfwsub.wv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -248,7 +248,7 @@ define @intrinsic_vfwsub.w_wv_nxv1f64_nxv1f64_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv1f64_nxv1f64_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfwsub.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -295,7 +295,7 @@ define @intrinsic_vfwsub.w_wv_nxv2f64_nxv2f64_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv2f64_nxv2f64_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfwsub.wv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -342,7 +342,7 @@ define @intrinsic_vfwsub.w_wv_nxv4f64_nxv4f64_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv4f64_nxv4f64_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfwsub.wv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -389,7 +389,7 @@ define @intrinsic_vfwsub.w_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv8f64_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfwsub.wv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -437,7 +437,7 @@ define @intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfwsub.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -484,7 +484,7 @@ define @intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwsub.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -531,7 +531,7 @@ define @intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfwsub.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -578,7 +578,7 @@ define @intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfwsub.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -625,7 +625,7 @@ define @intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vfwsub.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -672,7 +672,7 @@ define @intrinsic_vfwsub.w_wf_nxv1f64_nxv1f64_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv1f64_nxv1f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfwsub.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -719,7 +719,7 @@ define @intrinsic_vfwsub.w_wf_nxv2f64_nxv2f64_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv2f64_nxv2f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfwsub.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -766,7 +766,7 @@ define @intrinsic_vfwsub.w_wf_nxv4f64_nxv4f64_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv4f64_nxv4f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfwsub.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -813,7 +813,7 @@ define @intrinsic_vfwsub.w_wf_nxv8f64_nxv8f64_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv8f64_nxv8f64_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfwsub.wf v8, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1160,7 +1160,7 @@ define @intrinsic_vfwsub.w_wv_untie_nxv1f32_nxv1f32_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv1f32_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vfwsub.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1177,7 +1177,7 @@ define @intrinsic_vfwsub.w_wv_untie_nxv2f32_nxv2f32_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv2f32_nxv2f32_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwsub.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1194,7 +1194,7 @@ define @intrinsic_vfwsub.w_wv_untie_nxv4f32_nxv4f32_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv4f32_nxv4f32_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vfwsub.wv v12, v10, v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -1211,7 +1211,7 @@ define @intrinsic_vfwsub.w_wv_untie_nxv8f32_nxv8f32_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv8f32_nxv8f32_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vfwsub.wv v16, v12, v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -1228,7 +1228,7 @@ define @intrinsic_vfwsub.w_wv_untie_nxv1f64_nxv1f64_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv1f64_nxv1f64_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfwsub.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1245,7 +1245,7 @@ define @intrinsic_vfwsub.w_wv_untie_nxv2f64_nxv2f64_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv2f64_nxv2f64_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfwsub.wv v12, v10, v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -1262,7 +1262,7 @@ define @intrinsic_vfwsub.w_wv_untie_nxv4f64_nxv4f64_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv4f64_nxv4f64_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfwsub.wv v16, v12, v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -1279,7 +1279,7 @@ define @intrinsic_vfwsub.w_wv_untie_nxv8f64_nxv8f64_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv8f64_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfwsub.wv v24, v16, v8 ; CHECK-NEXT: vmv8r.v v8, v24 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vid.ll b/llvm/test/CodeGen/RISCV/rvv/vid.ll --- a/llvm/test/CodeGen/RISCV/rvv/vid.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vid.ll @@ -10,7 +10,7 @@ define @intrinsic_vid_v_nxv1i8(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret entry: @@ -48,7 +48,7 @@ define @intrinsic_vid_v_nxv2i8(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret entry: @@ -86,7 +86,7 @@ define @intrinsic_vid_v_nxv4i8(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret entry: @@ -124,7 +124,7 @@ define @intrinsic_vid_v_nxv8i8(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret entry: @@ -162,7 +162,7 @@ define @intrinsic_vid_v_nxv16i8(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret entry: @@ -200,7 +200,7 @@ define @intrinsic_vid_v_nxv32i8(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret entry: @@ -238,7 +238,7 @@ define @intrinsic_vid_v_nxv1i16(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret entry: @@ -276,7 +276,7 @@ define @intrinsic_vid_v_nxv2i16(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret entry: @@ -314,7 +314,7 @@ define @intrinsic_vid_v_nxv4i16(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret entry: @@ -352,7 +352,7 @@ define @intrinsic_vid_v_nxv8i16(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret entry: @@ -390,7 +390,7 @@ define @intrinsic_vid_v_nxv16i16(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret entry: @@ -428,7 +428,7 @@ define @intrinsic_vid_v_nxv32i16(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret entry: @@ -466,7 +466,7 @@ define @intrinsic_vid_v_nxv1i32(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret entry: @@ -504,7 +504,7 @@ define @intrinsic_vid_v_nxv2i32(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret entry: @@ -542,7 +542,7 @@ define @intrinsic_vid_v_nxv4i32(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret entry: @@ -580,7 +580,7 @@ define @intrinsic_vid_v_nxv8i32(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret entry: @@ -618,7 +618,7 @@ define @intrinsic_vid_v_nxv16i32(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret entry: @@ -656,7 +656,7 @@ define @intrinsic_vid_v_nxv1i64(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret entry: @@ -694,7 +694,7 @@ define @intrinsic_vid_v_nxv2i64(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret entry: @@ -732,7 +732,7 @@ define @intrinsic_vid_v_nxv4i64(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret entry: @@ -770,7 +770,7 @@ define @intrinsic_vid_v_nxv8i64(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vid_v_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/viota.ll b/llvm/test/CodeGen/RISCV/rvv/viota.ll --- a/llvm/test/CodeGen/RISCV/rvv/viota.ll +++ b/llvm/test/CodeGen/RISCV/rvv/viota.ll @@ -11,7 +11,7 @@ define @intrinsic_viota_m_nxv1i8_nxv1i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv1i8_nxv1i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: viota.m v8, v0 ; CHECK-NEXT: ret entry: @@ -53,7 +53,7 @@ define @intrinsic_viota_m_nxv2i8_nxv2i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv2i8_nxv2i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: viota.m v8, v0 ; CHECK-NEXT: ret entry: @@ -95,7 +95,7 @@ define @intrinsic_viota_m_nxv4i8_nxv4i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv4i8_nxv4i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: viota.m v8, v0 ; CHECK-NEXT: ret entry: @@ -137,7 +137,7 @@ define @intrinsic_viota_m_nxv8i8_nxv8i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv8i8_nxv8i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: viota.m v8, v0 ; CHECK-NEXT: ret entry: @@ -179,7 +179,7 @@ define @intrinsic_viota_m_nxv16i8_nxv16i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv16i8_nxv16i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: viota.m v8, v0 ; CHECK-NEXT: ret entry: @@ -221,7 +221,7 @@ define @intrinsic_viota_m_nxv32i8_nxv32i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv32i8_nxv32i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: viota.m v8, v0 ; CHECK-NEXT: ret entry: @@ -263,7 +263,7 @@ define @intrinsic_viota_m_nxv64i8_nxv64i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv64i8_nxv64i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: viota.m v8, v0 ; CHECK-NEXT: ret entry: @@ -305,7 +305,7 @@ define @intrinsic_viota_m_nxv1i16_nxv1i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv1i16_nxv1i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: viota.m v8, v0 ; CHECK-NEXT: ret entry: @@ -347,7 +347,7 @@ define @intrinsic_viota_m_nxv2i16_nxv2i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv2i16_nxv2i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: viota.m v8, v0 ; CHECK-NEXT: ret entry: @@ -389,7 +389,7 @@ define @intrinsic_viota_m_nxv4i16_nxv4i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv4i16_nxv4i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: viota.m v8, v0 ; CHECK-NEXT: ret entry: @@ -431,7 +431,7 @@ define @intrinsic_viota_m_nxv8i16_nxv8i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv8i16_nxv8i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: viota.m v8, v0 ; CHECK-NEXT: ret entry: @@ -473,7 +473,7 @@ define @intrinsic_viota_m_nxv16i16_nxv16i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv16i16_nxv16i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: viota.m v8, v0 ; CHECK-NEXT: ret entry: @@ -515,7 +515,7 @@ define @intrinsic_viota_m_nxv32i16_nxv32i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv32i16_nxv32i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: viota.m v8, v0 ; CHECK-NEXT: ret entry: @@ -557,7 +557,7 @@ define @intrinsic_viota_m_nxv1i32_nxv1i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv1i32_nxv1i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: viota.m v8, v0 ; CHECK-NEXT: ret entry: @@ -599,7 +599,7 @@ define @intrinsic_viota_m_nxv2i32_nxv2i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv2i32_nxv2i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: viota.m v8, v0 ; CHECK-NEXT: ret entry: @@ -641,7 +641,7 @@ define @intrinsic_viota_m_nxv4i32_nxv4i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv4i32_nxv4i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: viota.m v8, v0 ; CHECK-NEXT: ret entry: @@ -683,7 +683,7 @@ define @intrinsic_viota_m_nxv8i32_nxv8i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv8i32_nxv8i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: viota.m v8, v0 ; CHECK-NEXT: ret entry: @@ -725,7 +725,7 @@ define @intrinsic_viota_m_nxv16i32_nxv16i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv16i32_nxv16i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: viota.m v8, v0 ; CHECK-NEXT: ret entry: @@ -767,7 +767,7 @@ define @intrinsic_viota_m_nxv1i64_nxv1i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv1i64_nxv1i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: viota.m v8, v0 ; CHECK-NEXT: ret entry: @@ -809,7 +809,7 @@ define @intrinsic_viota_m_nxv2i64_nxv2i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv2i64_nxv2i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: viota.m v8, v0 ; CHECK-NEXT: ret entry: @@ -851,7 +851,7 @@ define @intrinsic_viota_m_nxv4i64_nxv4i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv4i64_nxv4i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: viota.m v8, v0 ; CHECK-NEXT: ret entry: @@ -893,7 +893,7 @@ define @intrinsic_viota_m_nxv8i64_nxv8i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_viota_m_nxv8i64_nxv8i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: viota.m v8, v0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vitofp-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vitofp-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vitofp-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vitofp-sdnode.ll @@ -7,7 +7,7 @@ define @vsitofp_nxv1i1_nxv1f16( %va) { ; CHECK-LABEL: vsitofp_nxv1i1_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: vfcvt.f.x.v v8, v8 @@ -19,7 +19,7 @@ define @vuitofp_nxv1i1_nxv1f16( %va) { ; CHECK-LABEL: vuitofp_nxv1i1_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 @@ -31,7 +31,7 @@ define @vsitofp_nxv1i1_nxv1f32( %va) { ; CHECK-LABEL: vsitofp_nxv1i1_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: vfcvt.f.x.v v8, v8 @@ -43,7 +43,7 @@ define @vuitofp_nxv1i1_nxv1f32( %va) { ; CHECK-LABEL: vuitofp_nxv1i1_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 @@ -55,7 +55,7 @@ define @vsitofp_nxv1i1_nxv1f64( %va) { ; CHECK-LABEL: vsitofp_nxv1i1_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: vfcvt.f.x.v v8, v8 @@ -67,7 +67,7 @@ define @vuitofp_nxv1i1_nxv1f64( %va) { ; CHECK-LABEL: vuitofp_nxv1i1_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 @@ -79,7 +79,7 @@ define @vsitofp_nxv2i1_nxv2f16( %va) { ; CHECK-LABEL: vsitofp_nxv2i1_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: vfcvt.f.x.v v8, v8 @@ -91,7 +91,7 @@ define @vuitofp_nxv2i1_nxv2f16( %va) { ; CHECK-LABEL: vuitofp_nxv2i1_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 @@ -103,7 +103,7 @@ define @vsitofp_nxv2i1_nxv2f32( %va) { ; CHECK-LABEL: vsitofp_nxv2i1_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: vfcvt.f.x.v v8, v8 @@ -115,7 +115,7 @@ define @vuitofp_nxv2i1_nxv2f32( %va) { ; CHECK-LABEL: vuitofp_nxv2i1_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 @@ -127,7 +127,7 @@ define @vsitofp_nxv2i1_nxv2f64( %va) { ; CHECK-LABEL: vsitofp_nxv2i1_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: vfcvt.f.x.v v8, v8 @@ -139,7 +139,7 @@ define @vuitofp_nxv2i1_nxv2f64( %va) { ; CHECK-LABEL: vuitofp_nxv2i1_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 @@ -151,7 +151,7 @@ define @vsitofp_nxv4i1_nxv4f16( %va) { ; CHECK-LABEL: vsitofp_nxv4i1_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: vfcvt.f.x.v v8, v8 @@ -163,7 +163,7 @@ define @vuitofp_nxv4i1_nxv4f16( %va) { ; CHECK-LABEL: vuitofp_nxv4i1_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 @@ -175,7 +175,7 @@ define @vsitofp_nxv4i1_nxv4f32( %va) { ; CHECK-LABEL: vsitofp_nxv4i1_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: vfcvt.f.x.v v8, v8 @@ -187,7 +187,7 @@ define @vuitofp_nxv4i1_nxv4f32( %va) { ; CHECK-LABEL: vuitofp_nxv4i1_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 @@ -199,7 +199,7 @@ define @vsitofp_nxv4i1_nxv4f64( %va) { ; CHECK-LABEL: vsitofp_nxv4i1_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: vfcvt.f.x.v v8, v8 @@ -211,7 +211,7 @@ define @vuitofp_nxv4i1_nxv4f64( %va) { ; CHECK-LABEL: vuitofp_nxv4i1_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 @@ -223,7 +223,7 @@ define @vsitofp_nxv8i1_nxv8f16( %va) { ; CHECK-LABEL: vsitofp_nxv8i1_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: vfcvt.f.x.v v8, v8 @@ -235,7 +235,7 @@ define @vuitofp_nxv8i1_nxv8f16( %va) { ; CHECK-LABEL: vuitofp_nxv8i1_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 @@ -247,7 +247,7 @@ define @vsitofp_nxv8i1_nxv8f32( %va) { ; CHECK-LABEL: vsitofp_nxv8i1_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: vfcvt.f.x.v v8, v8 @@ -259,7 +259,7 @@ define @vuitofp_nxv8i1_nxv8f32( %va) { ; CHECK-LABEL: vuitofp_nxv8i1_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 @@ -271,7 +271,7 @@ define @vsitofp_nxv8i1_nxv8f64( %va) { ; CHECK-LABEL: vsitofp_nxv8i1_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: vfcvt.f.x.v v8, v8 @@ -283,7 +283,7 @@ define @vuitofp_nxv8i1_nxv8f64( %va) { ; CHECK-LABEL: vuitofp_nxv8i1_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 @@ -295,7 +295,7 @@ define @vsitofp_nxv16i1_nxv16f16( %va) { ; CHECK-LABEL: vsitofp_nxv16i1_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: vfcvt.f.x.v v8, v8 @@ -307,7 +307,7 @@ define @vuitofp_nxv16i1_nxv16f16( %va) { ; CHECK-LABEL: vuitofp_nxv16i1_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 @@ -319,7 +319,7 @@ define @vsitofp_nxv16i1_nxv16f32( %va) { ; CHECK-LABEL: vsitofp_nxv16i1_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: vfcvt.f.x.v v8, v8 @@ -331,7 +331,7 @@ define @vuitofp_nxv16i1_nxv16f32( %va) { ; CHECK-LABEL: vuitofp_nxv16i1_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 @@ -343,7 +343,7 @@ define @vsitofp_nxv32i1_nxv32f16( %va) { ; CHECK-LABEL: vsitofp_nxv32i1_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: vfcvt.f.x.v v8, v8 @@ -355,7 +355,7 @@ define @vuitofp_nxv32i1_nxv32f16( %va) { ; CHECK-LABEL: vuitofp_nxv32i1_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 @@ -367,7 +367,7 @@ define @vsitofp_nxv1i8_nxv1f16( %va) { ; CHECK-LABEL: vsitofp_nxv1i8_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vfwcvt.f.x.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -378,7 +378,7 @@ define @vsitofp_nxv1i7_nxv1f16( %va) { ; CHECK-LABEL: vsitofp_nxv1i7_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vsra.vi v9, v8, 1 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9 @@ -391,7 +391,7 @@ ; CHECK-LABEL: vuitofp_nxv1i7_nxv1f16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 127 -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vand.vx v9, v8, a0 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9 ; CHECK-NEXT: ret @@ -402,7 +402,7 @@ define @vuitofp_nxv1i8_nxv1f16( %va) { ; CHECK-LABEL: vuitofp_nxv1i8_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -413,7 +413,7 @@ define @vsitofp_nxv1i8_nxv1f32( %va) { ; CHECK-LABEL: vsitofp_nxv1i8_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vsext.vf4 v9, v8 ; CHECK-NEXT: vfcvt.f.x.v v8, v9 ; CHECK-NEXT: ret @@ -424,7 +424,7 @@ define @vuitofp_nxv1i8_nxv1f32( %va) { ; CHECK-LABEL: vuitofp_nxv1i8_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vzext.vf4 v9, v8 ; CHECK-NEXT: vfcvt.f.xu.v v8, v9 ; CHECK-NEXT: ret @@ -435,7 +435,7 @@ define @vsitofp_nxv1i8_nxv1f64( %va) { ; CHECK-LABEL: vsitofp_nxv1i8_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vsext.vf8 v9, v8 ; CHECK-NEXT: vfcvt.f.x.v v8, v9 ; CHECK-NEXT: ret @@ -446,7 +446,7 @@ define @vuitofp_nxv1i8_nxv1f64( %va) { ; CHECK-LABEL: vuitofp_nxv1i8_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vzext.vf8 v9, v8 ; CHECK-NEXT: vfcvt.f.xu.v v8, v9 ; CHECK-NEXT: ret @@ -457,7 +457,7 @@ define @vsitofp_nxv2i8_nxv2f16( %va) { ; CHECK-LABEL: vsitofp_nxv2i8_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vfwcvt.f.x.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -468,7 +468,7 @@ define @vuitofp_nxv2i8_nxv2f16( %va) { ; CHECK-LABEL: vuitofp_nxv2i8_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -479,7 +479,7 @@ define @vsitofp_nxv2i8_nxv2f32( %va) { ; CHECK-LABEL: vsitofp_nxv2i8_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vsext.vf4 v9, v8 ; CHECK-NEXT: vfcvt.f.x.v v8, v9 ; CHECK-NEXT: ret @@ -490,7 +490,7 @@ define @vuitofp_nxv2i8_nxv2f32( %va) { ; CHECK-LABEL: vuitofp_nxv2i8_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vzext.vf4 v9, v8 ; CHECK-NEXT: vfcvt.f.xu.v v8, v9 ; CHECK-NEXT: ret @@ -501,7 +501,7 @@ define @vsitofp_nxv2i8_nxv2f64( %va) { ; CHECK-LABEL: vsitofp_nxv2i8_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vsext.vf8 v10, v8 ; CHECK-NEXT: vfcvt.f.x.v v8, v10 ; CHECK-NEXT: ret @@ -512,7 +512,7 @@ define @vuitofp_nxv2i8_nxv2f64( %va) { ; CHECK-LABEL: vuitofp_nxv2i8_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vzext.vf8 v10, v8 ; CHECK-NEXT: vfcvt.f.xu.v v8, v10 ; CHECK-NEXT: ret @@ -523,7 +523,7 @@ define @vsitofp_nxv4i8_nxv4f16( %va) { ; CHECK-LABEL: vsitofp_nxv4i8_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.x.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -534,7 +534,7 @@ define @vuitofp_nxv4i8_nxv4f16( %va) { ; CHECK-LABEL: vuitofp_nxv4i8_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -545,7 +545,7 @@ define @vsitofp_nxv4i8_nxv4f32( %va) { ; CHECK-LABEL: vsitofp_nxv4i8_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vsext.vf4 v10, v8 ; CHECK-NEXT: vfcvt.f.x.v v8, v10 ; CHECK-NEXT: ret @@ -556,7 +556,7 @@ define @vuitofp_nxv4i8_nxv4f32( %va) { ; CHECK-LABEL: vuitofp_nxv4i8_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vzext.vf4 v10, v8 ; CHECK-NEXT: vfcvt.f.xu.v v8, v10 ; CHECK-NEXT: ret @@ -567,7 +567,7 @@ define @vsitofp_nxv4i8_nxv4f64( %va) { ; CHECK-LABEL: vsitofp_nxv4i8_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vsext.vf8 v12, v8 ; CHECK-NEXT: vfcvt.f.x.v v8, v12 ; CHECK-NEXT: ret @@ -578,7 +578,7 @@ define @vuitofp_nxv4i8_nxv4f64( %va) { ; CHECK-LABEL: vuitofp_nxv4i8_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vzext.vf8 v12, v8 ; CHECK-NEXT: vfcvt.f.xu.v v8, v12 ; CHECK-NEXT: ret @@ -589,7 +589,7 @@ define @vsitofp_nxv8i8_nxv8f16( %va) { ; CHECK-LABEL: vsitofp_nxv8i8_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vfwcvt.f.x.v v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -600,7 +600,7 @@ define @vuitofp_nxv8i8_nxv8f16( %va) { ; CHECK-LABEL: vuitofp_nxv8i8_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vfwcvt.f.xu.v v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -611,7 +611,7 @@ define @vsitofp_nxv8i8_nxv8f32( %va) { ; CHECK-LABEL: vsitofp_nxv8i8_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vsext.vf4 v12, v8 ; CHECK-NEXT: vfcvt.f.x.v v8, v12 ; CHECK-NEXT: ret @@ -622,7 +622,7 @@ define @vuitofp_nxv8i8_nxv8f32( %va) { ; CHECK-LABEL: vuitofp_nxv8i8_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vzext.vf4 v12, v8 ; CHECK-NEXT: vfcvt.f.xu.v v8, v12 ; CHECK-NEXT: ret @@ -633,7 +633,7 @@ define @vsitofp_nxv8i8_nxv8f64( %va) { ; CHECK-LABEL: vsitofp_nxv8i8_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vsext.vf8 v16, v8 ; CHECK-NEXT: vfcvt.f.x.v v8, v16 ; CHECK-NEXT: ret @@ -644,7 +644,7 @@ define @vuitofp_nxv8i8_nxv8f64( %va) { ; CHECK-LABEL: vuitofp_nxv8i8_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vzext.vf8 v16, v8 ; CHECK-NEXT: vfcvt.f.xu.v v8, v16 ; CHECK-NEXT: ret @@ -655,7 +655,7 @@ define @vsitofp_nxv16i8_nxv16f16( %va) { ; CHECK-LABEL: vsitofp_nxv16i8_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vfwcvt.f.x.v v12, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -666,7 +666,7 @@ define @vuitofp_nxv16i8_nxv16f16( %va) { ; CHECK-LABEL: vuitofp_nxv16i8_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vfwcvt.f.xu.v v12, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -677,7 +677,7 @@ define @vsitofp_nxv16i8_nxv16f32( %va) { ; CHECK-LABEL: vsitofp_nxv16i8_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vsext.vf4 v16, v8 ; CHECK-NEXT: vfcvt.f.x.v v8, v16 ; CHECK-NEXT: ret @@ -688,7 +688,7 @@ define @vuitofp_nxv16i8_nxv16f32( %va) { ; CHECK-LABEL: vuitofp_nxv16i8_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vzext.vf4 v16, v8 ; CHECK-NEXT: vfcvt.f.xu.v v8, v16 ; CHECK-NEXT: ret @@ -699,7 +699,7 @@ define @vsitofp_nxv32i8_nxv32f16( %va) { ; CHECK-LABEL: vsitofp_nxv32i8_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vfwcvt.f.x.v v16, v8 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -710,7 +710,7 @@ define @vuitofp_nxv32i8_nxv32f16( %va) { ; CHECK-LABEL: vuitofp_nxv32i8_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vfwcvt.f.xu.v v16, v8 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -721,7 +721,7 @@ define @vsitofp_nxv1i16_nxv1f16( %va) { ; CHECK-LABEL: vsitofp_nxv1i16_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfcvt.f.x.v v8, v8 ; CHECK-NEXT: ret %evec = sitofp %va to @@ -731,7 +731,7 @@ define @vuitofp_nxv1i16_nxv1f16( %va) { ; CHECK-LABEL: vuitofp_nxv1i16_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 ; CHECK-NEXT: ret %evec = uitofp %va to @@ -741,7 +741,7 @@ define @vsitofp_nxv1i16_nxv1f32( %va) { ; CHECK-LABEL: vsitofp_nxv1i16_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfwcvt.f.x.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -752,7 +752,7 @@ define @vuitofp_nxv1i16_nxv1f32( %va) { ; CHECK-LABEL: vuitofp_nxv1i16_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -763,7 +763,7 @@ define @vsitofp_nxv1i16_nxv1f64( %va) { ; CHECK-LABEL: vsitofp_nxv1i16_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vsext.vf4 v9, v8 ; CHECK-NEXT: vfcvt.f.x.v v8, v9 ; CHECK-NEXT: ret @@ -774,7 +774,7 @@ define @vuitofp_nxv1i16_nxv1f64( %va) { ; CHECK-LABEL: vuitofp_nxv1i16_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vzext.vf4 v9, v8 ; CHECK-NEXT: vfcvt.f.xu.v v8, v9 ; CHECK-NEXT: ret @@ -785,7 +785,7 @@ define @vsitofp_nxv2i16_nxv2f16( %va) { ; CHECK-LABEL: vsitofp_nxv2i16_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfcvt.f.x.v v8, v8 ; CHECK-NEXT: ret %evec = sitofp %va to @@ -795,7 +795,7 @@ define @vuitofp_nxv2i16_nxv2f16( %va) { ; CHECK-LABEL: vuitofp_nxv2i16_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 ; CHECK-NEXT: ret %evec = uitofp %va to @@ -805,7 +805,7 @@ define @vsitofp_nxv2i16_nxv2f32( %va) { ; CHECK-LABEL: vsitofp_nxv2i16_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.x.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -816,7 +816,7 @@ define @vuitofp_nxv2i16_nxv2f32( %va) { ; CHECK-LABEL: vuitofp_nxv2i16_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -827,7 +827,7 @@ define @vsitofp_nxv2i16_nxv2f64( %va) { ; CHECK-LABEL: vsitofp_nxv2i16_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vsext.vf4 v10, v8 ; CHECK-NEXT: vfcvt.f.x.v v8, v10 ; CHECK-NEXT: ret @@ -838,7 +838,7 @@ define @vuitofp_nxv2i16_nxv2f64( %va) { ; CHECK-LABEL: vuitofp_nxv2i16_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vzext.vf4 v10, v8 ; CHECK-NEXT: vfcvt.f.xu.v v8, v10 ; CHECK-NEXT: ret @@ -849,7 +849,7 @@ define @vsitofp_nxv4i16_nxv4f16( %va) { ; CHECK-LABEL: vsitofp_nxv4i16_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfcvt.f.x.v v8, v8 ; CHECK-NEXT: ret %evec = sitofp %va to @@ -859,7 +859,7 @@ define @vuitofp_nxv4i16_nxv4f16( %va) { ; CHECK-LABEL: vuitofp_nxv4i16_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 ; CHECK-NEXT: ret %evec = uitofp %va to @@ -869,7 +869,7 @@ define @vsitofp_nxv4i16_nxv4f32( %va) { ; CHECK-LABEL: vsitofp_nxv4i16_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfwcvt.f.x.v v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -880,7 +880,7 @@ define @vuitofp_nxv4i16_nxv4f32( %va) { ; CHECK-LABEL: vuitofp_nxv4i16_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfwcvt.f.xu.v v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -891,7 +891,7 @@ define @vsitofp_nxv4i16_nxv4f64( %va) { ; CHECK-LABEL: vsitofp_nxv4i16_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vsext.vf4 v12, v8 ; CHECK-NEXT: vfcvt.f.x.v v8, v12 ; CHECK-NEXT: ret @@ -902,7 +902,7 @@ define @vuitofp_nxv4i16_nxv4f64( %va) { ; CHECK-LABEL: vuitofp_nxv4i16_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vzext.vf4 v12, v8 ; CHECK-NEXT: vfcvt.f.xu.v v8, v12 ; CHECK-NEXT: ret @@ -913,7 +913,7 @@ define @vsitofp_nxv8i16_nxv8f16( %va) { ; CHECK-LABEL: vsitofp_nxv8i16_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfcvt.f.x.v v8, v8 ; CHECK-NEXT: ret %evec = sitofp %va to @@ -923,7 +923,7 @@ define @vuitofp_nxv8i16_nxv8f16( %va) { ; CHECK-LABEL: vuitofp_nxv8i16_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 ; CHECK-NEXT: ret %evec = uitofp %va to @@ -933,7 +933,7 @@ define @vsitofp_nxv8i16_nxv8f32( %va) { ; CHECK-LABEL: vsitofp_nxv8i16_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfwcvt.f.x.v v12, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -944,7 +944,7 @@ define @vuitofp_nxv8i16_nxv8f32( %va) { ; CHECK-LABEL: vuitofp_nxv8i16_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfwcvt.f.xu.v v12, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -955,7 +955,7 @@ define @vsitofp_nxv8i16_nxv8f64( %va) { ; CHECK-LABEL: vsitofp_nxv8i16_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vsext.vf4 v16, v8 ; CHECK-NEXT: vfcvt.f.x.v v8, v16 ; CHECK-NEXT: ret @@ -966,7 +966,7 @@ define @vuitofp_nxv8i16_nxv8f64( %va) { ; CHECK-LABEL: vuitofp_nxv8i16_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vzext.vf4 v16, v8 ; CHECK-NEXT: vfcvt.f.xu.v v8, v16 ; CHECK-NEXT: ret @@ -977,7 +977,7 @@ define @vsitofp_nxv16i16_nxv16f16( %va) { ; CHECK-LABEL: vsitofp_nxv16i16_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfcvt.f.x.v v8, v8 ; CHECK-NEXT: ret %evec = sitofp %va to @@ -987,7 +987,7 @@ define @vuitofp_nxv16i16_nxv16f16( %va) { ; CHECK-LABEL: vuitofp_nxv16i16_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 ; CHECK-NEXT: ret %evec = uitofp %va to @@ -997,7 +997,7 @@ define @vsitofp_nxv16i16_nxv16f32( %va) { ; CHECK-LABEL: vsitofp_nxv16i16_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfwcvt.f.x.v v16, v8 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -1008,7 +1008,7 @@ define @vuitofp_nxv16i16_nxv16f32( %va) { ; CHECK-LABEL: vuitofp_nxv16i16_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfwcvt.f.xu.v v16, v8 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -1019,7 +1019,7 @@ define @vsitofp_nxv32i16_nxv32f16( %va) { ; CHECK-LABEL: vsitofp_nxv32i16_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vfcvt.f.x.v v8, v8 ; CHECK-NEXT: ret %evec = sitofp %va to @@ -1029,7 +1029,7 @@ define @vuitofp_nxv32i16_nxv32f16( %va) { ; CHECK-LABEL: vuitofp_nxv32i16_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 ; CHECK-NEXT: ret %evec = uitofp %va to @@ -1039,7 +1039,7 @@ define @vsitofp_nxv1i32_nxv1f16( %va) { ; CHECK-LABEL: vsitofp_nxv1i32_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfncvt.f.x.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1050,7 +1050,7 @@ define @vuitofp_nxv1i32_nxv1f16( %va) { ; CHECK-LABEL: vuitofp_nxv1i32_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfncvt.f.xu.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1061,7 +1061,7 @@ define @vsitofp_nxv1i32_nxv1f32( %va) { ; CHECK-LABEL: vsitofp_nxv1i32_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfcvt.f.x.v v8, v8 ; CHECK-NEXT: ret %evec = sitofp %va to @@ -1071,7 +1071,7 @@ define @vuitofp_nxv1i32_nxv1f32( %va) { ; CHECK-LABEL: vuitofp_nxv1i32_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 ; CHECK-NEXT: ret %evec = uitofp %va to @@ -1081,7 +1081,7 @@ define @vsitofp_nxv1i32_nxv1f64( %va) { ; CHECK-LABEL: vsitofp_nxv1i32_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.x.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1092,7 +1092,7 @@ define @vuitofp_nxv1i32_nxv1f64( %va) { ; CHECK-LABEL: vuitofp_nxv1i32_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1103,7 +1103,7 @@ define @vsitofp_nxv2i32_nxv2f16( %va) { ; CHECK-LABEL: vsitofp_nxv2i32_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.f.x.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1114,7 +1114,7 @@ define @vuitofp_nxv2i32_nxv2f16( %va) { ; CHECK-LABEL: vuitofp_nxv2i32_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.f.xu.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1125,7 +1125,7 @@ define @vsitofp_nxv2i32_nxv2f32( %va) { ; CHECK-LABEL: vsitofp_nxv2i32_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfcvt.f.x.v v8, v8 ; CHECK-NEXT: ret %evec = sitofp %va to @@ -1135,7 +1135,7 @@ define @vuitofp_nxv2i32_nxv2f32( %va) { ; CHECK-LABEL: vuitofp_nxv2i32_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 ; CHECK-NEXT: ret %evec = uitofp %va to @@ -1145,7 +1145,7 @@ define @vsitofp_nxv2i32_nxv2f64( %va) { ; CHECK-LABEL: vsitofp_nxv2i32_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwcvt.f.x.v v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -1156,7 +1156,7 @@ define @vuitofp_nxv2i32_nxv2f64( %va) { ; CHECK-LABEL: vuitofp_nxv2i32_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwcvt.f.xu.v v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -1167,7 +1167,7 @@ define @vsitofp_nxv4i32_nxv4f16( %va) { ; CHECK-LABEL: vsitofp_nxv4i32_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfncvt.f.x.w v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1178,7 +1178,7 @@ define @vuitofp_nxv4i32_nxv4f16( %va) { ; CHECK-LABEL: vuitofp_nxv4i32_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfncvt.f.xu.w v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1189,7 +1189,7 @@ define @vsitofp_nxv4i32_nxv4f32( %va) { ; CHECK-LABEL: vsitofp_nxv4i32_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfcvt.f.x.v v8, v8 ; CHECK-NEXT: ret %evec = sitofp %va to @@ -1199,7 +1199,7 @@ define @vuitofp_nxv4i32_nxv4f32( %va) { ; CHECK-LABEL: vuitofp_nxv4i32_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 ; CHECK-NEXT: ret %evec = uitofp %va to @@ -1209,7 +1209,7 @@ define @vsitofp_nxv4i32_nxv4f64( %va) { ; CHECK-LABEL: vsitofp_nxv4i32_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfwcvt.f.x.v v12, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -1220,7 +1220,7 @@ define @vuitofp_nxv4i32_nxv4f64( %va) { ; CHECK-LABEL: vuitofp_nxv4i32_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfwcvt.f.xu.v v12, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -1231,7 +1231,7 @@ define @vsitofp_nxv8i32_nxv8f16( %va) { ; CHECK-LABEL: vsitofp_nxv8i32_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfncvt.f.x.w v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1242,7 +1242,7 @@ define @vuitofp_nxv8i32_nxv8f16( %va) { ; CHECK-LABEL: vuitofp_nxv8i32_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfncvt.f.xu.w v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1253,7 +1253,7 @@ define @vsitofp_nxv8i32_nxv8f32( %va) { ; CHECK-LABEL: vsitofp_nxv8i32_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfcvt.f.x.v v8, v8 ; CHECK-NEXT: ret %evec = sitofp %va to @@ -1263,7 +1263,7 @@ define @vuitofp_nxv8i32_nxv8f32( %va) { ; CHECK-LABEL: vuitofp_nxv8i32_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 ; CHECK-NEXT: ret %evec = uitofp %va to @@ -1273,7 +1273,7 @@ define @vsitofp_nxv8i32_nxv8f64( %va) { ; CHECK-LABEL: vsitofp_nxv8i32_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfwcvt.f.x.v v16, v8 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -1284,7 +1284,7 @@ define @vuitofp_nxv8i32_nxv8f64( %va) { ; CHECK-LABEL: vuitofp_nxv8i32_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfwcvt.f.xu.v v16, v8 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -1295,7 +1295,7 @@ define @vsitofp_nxv16i32_nxv16f16( %va) { ; CHECK-LABEL: vsitofp_nxv16i32_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfncvt.f.x.w v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1306,7 +1306,7 @@ define @vuitofp_nxv16i32_nxv16f16( %va) { ; CHECK-LABEL: vuitofp_nxv16i32_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfncvt.f.xu.w v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1317,7 +1317,7 @@ define @vsitofp_nxv16i32_nxv16f32( %va) { ; CHECK-LABEL: vsitofp_nxv16i32_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vfcvt.f.x.v v8, v8 ; CHECK-NEXT: ret %evec = sitofp %va to @@ -1327,7 +1327,7 @@ define @vuitofp_nxv16i32_nxv16f32( %va) { ; CHECK-LABEL: vuitofp_nxv16i32_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 ; CHECK-NEXT: ret %evec = uitofp %va to @@ -1337,9 +1337,9 @@ define @vsitofp_nxv1i64_nxv1f16( %va) { ; CHECK-LABEL: vsitofp_nxv1i64_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfncvt.f.x.w v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v8, v9 ; CHECK-NEXT: ret %evec = sitofp %va to @@ -1349,9 +1349,9 @@ define @vuitofp_nxv1i64_nxv1f16( %va) { ; CHECK-LABEL: vuitofp_nxv1i64_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfncvt.f.xu.w v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v8, v9 ; CHECK-NEXT: ret %evec = uitofp %va to @@ -1361,7 +1361,7 @@ define @vsitofp_nxv1i64_nxv1f32( %va) { ; CHECK-LABEL: vsitofp_nxv1i64_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfncvt.f.x.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1372,7 +1372,7 @@ define @vuitofp_nxv1i64_nxv1f32( %va) { ; CHECK-LABEL: vuitofp_nxv1i64_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfncvt.f.xu.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1383,7 +1383,7 @@ define @vsitofp_nxv1i64_nxv1f64( %va) { ; CHECK-LABEL: vsitofp_nxv1i64_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfcvt.f.x.v v8, v8 ; CHECK-NEXT: ret %evec = sitofp %va to @@ -1393,7 +1393,7 @@ define @vuitofp_nxv1i64_nxv1f64( %va) { ; CHECK-LABEL: vuitofp_nxv1i64_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 ; CHECK-NEXT: ret %evec = uitofp %va to @@ -1403,9 +1403,9 @@ define @vsitofp_nxv2i64_nxv2f16( %va) { ; CHECK-LABEL: vsitofp_nxv2i64_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.f.x.w v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v8, v10 ; CHECK-NEXT: ret %evec = sitofp %va to @@ -1415,9 +1415,9 @@ define @vuitofp_nxv2i64_nxv2f16( %va) { ; CHECK-LABEL: vuitofp_nxv2i64_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.f.xu.w v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v8, v10 ; CHECK-NEXT: ret %evec = uitofp %va to @@ -1427,7 +1427,7 @@ define @vsitofp_nxv2i64_nxv2f32( %va) { ; CHECK-LABEL: vsitofp_nxv2i64_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.f.x.w v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1438,7 +1438,7 @@ define @vuitofp_nxv2i64_nxv2f32( %va) { ; CHECK-LABEL: vuitofp_nxv2i64_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.f.xu.w v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1449,7 +1449,7 @@ define @vsitofp_nxv2i64_nxv2f64( %va) { ; CHECK-LABEL: vsitofp_nxv2i64_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vfcvt.f.x.v v8, v8 ; CHECK-NEXT: ret %evec = sitofp %va to @@ -1459,7 +1459,7 @@ define @vuitofp_nxv2i64_nxv2f64( %va) { ; CHECK-LABEL: vuitofp_nxv2i64_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 ; CHECK-NEXT: ret %evec = uitofp %va to @@ -1469,9 +1469,9 @@ define @vsitofp_nxv4i64_nxv4f16( %va) { ; CHECK-LABEL: vsitofp_nxv4i64_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfncvt.f.x.w v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v8, v12 ; CHECK-NEXT: ret %evec = sitofp %va to @@ -1481,9 +1481,9 @@ define @vuitofp_nxv4i64_nxv4f16( %va) { ; CHECK-LABEL: vuitofp_nxv4i64_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfncvt.f.xu.w v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v8, v12 ; CHECK-NEXT: ret %evec = uitofp %va to @@ -1493,7 +1493,7 @@ define @vsitofp_nxv4i64_nxv4f32( %va) { ; CHECK-LABEL: vsitofp_nxv4i64_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfncvt.f.x.w v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1504,7 +1504,7 @@ define @vuitofp_nxv4i64_nxv4f32( %va) { ; CHECK-LABEL: vuitofp_nxv4i64_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfncvt.f.xu.w v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1515,7 +1515,7 @@ define @vsitofp_nxv4i64_nxv4f64( %va) { ; CHECK-LABEL: vsitofp_nxv4i64_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vfcvt.f.x.v v8, v8 ; CHECK-NEXT: ret %evec = sitofp %va to @@ -1525,7 +1525,7 @@ define @vuitofp_nxv4i64_nxv4f64( %va) { ; CHECK-LABEL: vuitofp_nxv4i64_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 ; CHECK-NEXT: ret %evec = uitofp %va to @@ -1535,9 +1535,9 @@ define @vsitofp_nxv8i64_nxv8f16( %va) { ; CHECK-LABEL: vsitofp_nxv8i64_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfncvt.f.x.w v16, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v8, v16 ; CHECK-NEXT: ret %evec = sitofp %va to @@ -1547,9 +1547,9 @@ define @vuitofp_nxv8i64_nxv8f16( %va) { ; CHECK-LABEL: vuitofp_nxv8i64_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfncvt.f.xu.w v16, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v8, v16 ; CHECK-NEXT: ret %evec = uitofp %va to @@ -1559,7 +1559,7 @@ define @vsitofp_nxv8i64_nxv8f32( %va) { ; CHECK-LABEL: vsitofp_nxv8i64_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfncvt.f.x.w v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1570,7 +1570,7 @@ define @vuitofp_nxv8i64_nxv8f32( %va) { ; CHECK-LABEL: vuitofp_nxv8i64_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfncvt.f.xu.w v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1581,7 +1581,7 @@ define @vsitofp_nxv8i64_nxv8f64( %va) { ; CHECK-LABEL: vsitofp_nxv8i64_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfcvt.f.x.v v8, v8 ; CHECK-NEXT: ret %evec = sitofp %va to @@ -1591,7 +1591,7 @@ define @vuitofp_nxv8i64_nxv8f64( %va) { ; CHECK-LABEL: vuitofp_nxv8i64_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 ; CHECK-NEXT: ret %evec = uitofp %va to diff --git a/llvm/test/CodeGen/RISCV/rvv/vle.ll b/llvm/test/CodeGen/RISCV/rvv/vle.ll --- a/llvm/test/CodeGen/RISCV/rvv/vle.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vle.ll @@ -11,7 +11,7 @@ define @intrinsic_vle_v_nxv1i64_nxv1i64(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -54,7 +54,7 @@ define @intrinsic_vle_v_nxv2i64_nxv2i64(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -97,7 +97,7 @@ define @intrinsic_vle_v_nxv4i64_nxv4i64(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -140,7 +140,7 @@ define @intrinsic_vle_v_nxv8i64_nxv8i64(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -183,7 +183,7 @@ define @intrinsic_vle_v_nxv1f64_nxv1f64(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -226,7 +226,7 @@ define @intrinsic_vle_v_nxv2f64_nxv2f64(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -269,7 +269,7 @@ define @intrinsic_vle_v_nxv4f64_nxv4f64(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -312,7 +312,7 @@ define @intrinsic_vle_v_nxv8f64_nxv8f64(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ define @intrinsic_vle_v_nxv1i32_nxv1i32(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -398,7 +398,7 @@ define @intrinsic_vle_v_nxv2i32_nxv2i32(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -441,7 +441,7 @@ define @intrinsic_vle_v_nxv4i32_nxv4i32(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -484,7 +484,7 @@ define @intrinsic_vle_v_nxv8i32_nxv8i32(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -527,7 +527,7 @@ define @intrinsic_vle_v_nxv16i32_nxv16i32(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -570,7 +570,7 @@ define @intrinsic_vle_v_nxv1f32_nxv1f32(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -613,7 +613,7 @@ define @intrinsic_vle_v_nxv2f32_nxv2f32(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -656,7 +656,7 @@ define @intrinsic_vle_v_nxv4f32_nxv4f32(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -699,7 +699,7 @@ define @intrinsic_vle_v_nxv8f32_nxv8f32(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -742,7 +742,7 @@ define @intrinsic_vle_v_nxv16f32_nxv16f32(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -785,7 +785,7 @@ define @intrinsic_vle_v_nxv1i16_nxv1i16(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -828,7 +828,7 @@ define @intrinsic_vle_v_nxv2i16_nxv2i16(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -871,7 +871,7 @@ define @intrinsic_vle_v_nxv4i16_nxv4i16(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -914,7 +914,7 @@ define @intrinsic_vle_v_nxv8i16_nxv8i16(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -957,7 +957,7 @@ define @intrinsic_vle_v_nxv16i16_nxv16i16(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vle_v_nxv32i16_nxv32i16(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1043,7 +1043,7 @@ define @intrinsic_vle_v_nxv1f16_nxv1f16(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1086,7 +1086,7 @@ define @intrinsic_vle_v_nxv2f16_nxv2f16(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1129,7 +1129,7 @@ define @intrinsic_vle_v_nxv4f16_nxv4f16(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1172,7 +1172,7 @@ define @intrinsic_vle_v_nxv8f16_nxv8f16(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1215,7 +1215,7 @@ define @intrinsic_vle_v_nxv16f16_nxv16f16(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1258,7 +1258,7 @@ define @intrinsic_vle_v_nxv32f16_nxv32f16(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1301,7 +1301,7 @@ define @intrinsic_vle_v_nxv1i8_nxv1i8(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1344,7 +1344,7 @@ define @intrinsic_vle_v_nxv2i8_nxv2i8(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1387,7 +1387,7 @@ define @intrinsic_vle_v_nxv4i8_nxv4i8(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1430,7 +1430,7 @@ define @intrinsic_vle_v_nxv8i8_nxv8i8(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1473,7 +1473,7 @@ define @intrinsic_vle_v_nxv16i8_nxv16i8(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1516,7 +1516,7 @@ define @intrinsic_vle_v_nxv32i8_nxv32i8(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1559,7 +1559,7 @@ define @intrinsic_vle_v_nxv64i8_nxv64i8(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vle_v_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vle_vid-vfcvt.ll b/llvm/test/CodeGen/RISCV/rvv/vle_vid-vfcvt.ll --- a/llvm/test/CodeGen/RISCV/rvv/vle_vid-vfcvt.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vle_vid-vfcvt.ll @@ -6,7 +6,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: lui a1, %hi(.LCPI0_0) ; CHECK-NEXT: addi a1, a1, %lo(.LCPI0_0) -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret @@ -20,7 +20,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: lui a1, %hi(.LCPI1_0) ; CHECK-NEXT: addi a1, a1, %lo(.LCPI1_0) -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret @@ -32,7 +32,7 @@ define void @foo_3(ptr nocapture noundef writeonly %t) { ; CHECK-LABEL: foo_3: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: vfcvt.f.x.v v8, v8 @@ -46,7 +46,7 @@ define void @foo_4(ptr nocapture noundef writeonly %t) { ; CHECK-LABEL: foo_4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vsll.vi v8, v8, 10 ; CHECK-NEXT: vadd.vi v8, v8, -16 @@ -61,7 +61,7 @@ define void @foo_5(ptr nocapture noundef writeonly %t) { ; CHECK-LABEL: foo_5: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vfcvt.f.x.v v8, v8 ; CHECK-NEXT: vse32.v v8, (a0) @@ -76,7 +76,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: lui a1, %hi(.LCPI5_0) ; CHECK-NEXT: addi a1, a1, %lo(.LCPI5_0) -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret @@ -90,7 +90,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: lui a1, %hi(.LCPI6_0) ; CHECK-NEXT: addi a1, a1, %lo(.LCPI6_0) -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret @@ -104,7 +104,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: lui a1, %hi(.LCPI7_0) ; CHECK-NEXT: addi a1, a1, %lo(.LCPI7_0) -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vleff-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vleff-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vleff-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vleff-rv32.ll @@ -9,7 +9,7 @@ define @intrinsic_vleff_v_nxv1i64_nxv1i64(* %0, i32 %1, i32* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vle64ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -61,7 +61,7 @@ define @intrinsic_vleff_v_nxv2i64_nxv2i64(* %0, i32 %1, i32* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vle64ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -113,7 +113,7 @@ define @intrinsic_vleff_v_nxv4i64_nxv4i64(* %0, i32 %1, i32* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vle64ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -165,7 +165,7 @@ define @intrinsic_vleff_v_nxv8i64_nxv8i64(* %0, i32 %1, i32* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vle64ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -217,7 +217,7 @@ define @intrinsic_vleff_v_nxv1f64_nxv1f64(* %0, i32 %1, i32* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vle64ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -269,7 +269,7 @@ define @intrinsic_vleff_v_nxv2f64_nxv2f64(* %0, i32 %1, i32* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vle64ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -321,7 +321,7 @@ define @intrinsic_vleff_v_nxv4f64_nxv4f64(* %0, i32 %1, i32* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vle64ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -373,7 +373,7 @@ define @intrinsic_vleff_v_nxv8f64_nxv8f64(* %0, i32 %1, i32* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vle64ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -425,7 +425,7 @@ define @intrinsic_vleff_v_nxv1i32_nxv1i32(* %0, i32 %1, i32* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vle32ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -477,7 +477,7 @@ define @intrinsic_vleff_v_nxv2i32_nxv2i32(* %0, i32 %1, i32* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vle32ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -529,7 +529,7 @@ define @intrinsic_vleff_v_nxv4i32_nxv4i32(* %0, i32 %1, i32* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vle32ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -581,7 +581,7 @@ define @intrinsic_vleff_v_nxv8i32_nxv8i32(* %0, i32 %1, i32* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vle32ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -633,7 +633,7 @@ define @intrinsic_vleff_v_nxv16i32_nxv16i32(* %0, i32 %1, i32* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vle32ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -685,7 +685,7 @@ define @intrinsic_vleff_v_nxv1f32_nxv1f32(* %0, i32 %1, i32* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vle32ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -737,7 +737,7 @@ define @intrinsic_vleff_v_nxv2f32_nxv2f32(* %0, i32 %1, i32* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vle32ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -789,7 +789,7 @@ define @intrinsic_vleff_v_nxv4f32_nxv4f32(* %0, i32 %1, i32* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vle32ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -841,7 +841,7 @@ define @intrinsic_vleff_v_nxv8f32_nxv8f32(* %0, i32 %1, i32* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vle32ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -893,7 +893,7 @@ define @intrinsic_vleff_v_nxv16f32_nxv16f32(* %0, i32 %1, i32* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vle32ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -945,7 +945,7 @@ define @intrinsic_vleff_v_nxv1i16_nxv1i16(* %0, i32 %1, i32* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vle16ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -997,7 +997,7 @@ define @intrinsic_vleff_v_nxv2i16_nxv2i16(* %0, i32 %1, i32* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vle16ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1049,7 +1049,7 @@ define @intrinsic_vleff_v_nxv4i16_nxv4i16(* %0, i32 %1, i32* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vle16ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1101,7 +1101,7 @@ define @intrinsic_vleff_v_nxv8i16_nxv8i16(* %0, i32 %1, i32* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vle16ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1153,7 +1153,7 @@ define @intrinsic_vleff_v_nxv16i16_nxv16i16(* %0, i32 %1, i32* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vle16ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1205,7 +1205,7 @@ define @intrinsic_vleff_v_nxv32i16_nxv32i16(* %0, i32 %1, i32* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vle16ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1257,7 +1257,7 @@ define @intrinsic_vleff_v_nxv1half_nxv1f16(* %0, i32 %1, i32* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv1half_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vle16ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1309,7 +1309,7 @@ define @intrinsic_vleff_v_nxv2half_nxv2f16(* %0, i32 %1, i32* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv2half_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vle16ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1361,7 +1361,7 @@ define @intrinsic_vleff_v_nxv4half_nxv4f16(* %0, i32 %1, i32* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv4half_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vle16ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1413,7 +1413,7 @@ define @intrinsic_vleff_v_nxv8half_nxv8f16(* %0, i32 %1, i32* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv8half_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vle16ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1465,7 +1465,7 @@ define @intrinsic_vleff_v_nxv16half_nxv16f16(* %0, i32 %1, i32* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv16half_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vle16ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1517,7 +1517,7 @@ define @intrinsic_vleff_v_nxv32half_nxv32f16(* %0, i32 %1, i32* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv32half_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vle16ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1569,7 +1569,7 @@ define @intrinsic_vleff_v_nxv1i8_nxv1i8(* %0, i32 %1, i32* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vle8ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1621,7 +1621,7 @@ define @intrinsic_vleff_v_nxv2i8_nxv2i8(* %0, i32 %1, i32* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vle8ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1673,7 +1673,7 @@ define @intrinsic_vleff_v_nxv4i8_nxv4i8(* %0, i32 %1, i32* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vle8ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1725,7 +1725,7 @@ define @intrinsic_vleff_v_nxv8i8_nxv8i8(* %0, i32 %1, i32* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vle8ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1777,7 +1777,7 @@ define @intrinsic_vleff_v_nxv16i8_nxv16i8(* %0, i32 %1, i32* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vle8ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1829,7 +1829,7 @@ define @intrinsic_vleff_v_nxv32i8_nxv32i8(* %0, i32 %1, i32* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vle8ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1881,7 +1881,7 @@ define @intrinsic_vleff_v_nxv64i8_nxv64i8(* %0, i32 %1, i32* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vle8ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1929,7 +1929,7 @@ define @intrinsic_vleff_dead_vl(* %0, i32 %1, i32* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_dead_vl: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vle64ff.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1962,7 +1962,7 @@ define void @intrinsic_vleff_dead_value(* %0, i32 %1, i32* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_dead_value: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vle64ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -2001,7 +2001,7 @@ define void @intrinsic_vleff_dead_all(* %0, i32 %1, i32* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_dead_all: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vle64ff.v v8, (a0) ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vleff-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vleff-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vleff-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vleff-rv64.ll @@ -9,7 +9,7 @@ define @intrinsic_vleff_v_nxv1i64_nxv1i64(* %0, i64 %1, i64* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vle64ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -61,7 +61,7 @@ define @intrinsic_vleff_v_nxv2i64_nxv2i64(* %0, i64 %1, i64* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vle64ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -113,7 +113,7 @@ define @intrinsic_vleff_v_nxv4i64_nxv4i64(* %0, i64 %1, i64* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vle64ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -165,7 +165,7 @@ define @intrinsic_vleff_v_nxv8i64_nxv8i64(* %0, i64 %1, i64* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vle64ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -217,7 +217,7 @@ define @intrinsic_vleff_v_nxv1f64_nxv1f64(* %0, i64 %1, i64* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vle64ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -269,7 +269,7 @@ define @intrinsic_vleff_v_nxv2f64_nxv2f64(* %0, i64 %1, i64* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vle64ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -321,7 +321,7 @@ define @intrinsic_vleff_v_nxv4f64_nxv4f64(* %0, i64 %1, i64* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vle64ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -373,7 +373,7 @@ define @intrinsic_vleff_v_nxv8f64_nxv8f64(* %0, i64 %1, i64* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vle64ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -425,7 +425,7 @@ define @intrinsic_vleff_v_nxv1i32_nxv1i32(* %0, i64 %1, i64* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vle32ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -477,7 +477,7 @@ define @intrinsic_vleff_v_nxv2i32_nxv2i32(* %0, i64 %1, i64* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vle32ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -529,7 +529,7 @@ define @intrinsic_vleff_v_nxv4i32_nxv4i32(* %0, i64 %1, i64* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vle32ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -581,7 +581,7 @@ define @intrinsic_vleff_v_nxv8i32_nxv8i32(* %0, i64 %1, i64* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vle32ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -633,7 +633,7 @@ define @intrinsic_vleff_v_nxv16i32_nxv16i32(* %0, i64 %1, i64* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vle32ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -685,7 +685,7 @@ define @intrinsic_vleff_v_nxv1f32_nxv1f32(* %0, i64 %1, i64* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vle32ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -737,7 +737,7 @@ define @intrinsic_vleff_v_nxv2f32_nxv2f32(* %0, i64 %1, i64* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vle32ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -789,7 +789,7 @@ define @intrinsic_vleff_v_nxv4f32_nxv4f32(* %0, i64 %1, i64* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vle32ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -841,7 +841,7 @@ define @intrinsic_vleff_v_nxv8f32_nxv8f32(* %0, i64 %1, i64* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vle32ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -893,7 +893,7 @@ define @intrinsic_vleff_v_nxv16f32_nxv16f32(* %0, i64 %1, i64* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vle32ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -945,7 +945,7 @@ define @intrinsic_vleff_v_nxv1i16_nxv1i16(* %0, i64 %1, i64* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vle16ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -997,7 +997,7 @@ define @intrinsic_vleff_v_nxv2i16_nxv2i16(* %0, i64 %1, i64* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vle16ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1049,7 +1049,7 @@ define @intrinsic_vleff_v_nxv4i16_nxv4i16(* %0, i64 %1, i64* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vle16ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1101,7 +1101,7 @@ define @intrinsic_vleff_v_nxv8i16_nxv8i16(* %0, i64 %1, i64* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vle16ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1153,7 +1153,7 @@ define @intrinsic_vleff_v_nxv16i16_nxv16i16(* %0, i64 %1, i64* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vle16ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1205,7 +1205,7 @@ define @intrinsic_vleff_v_nxv32i16_nxv32i16(* %0, i64 %1, i64* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vle16ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1257,7 +1257,7 @@ define @intrinsic_vleff_v_nxv1half_nxv1f16(* %0, i64 %1, i64* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv1half_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vle16ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1309,7 +1309,7 @@ define @intrinsic_vleff_v_nxv2half_nxv2f16(* %0, i64 %1, i64* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv2half_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vle16ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1361,7 +1361,7 @@ define @intrinsic_vleff_v_nxv4half_nxv4f16(* %0, i64 %1, i64* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv4half_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vle16ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1413,7 +1413,7 @@ define @intrinsic_vleff_v_nxv8half_nxv8f16(* %0, i64 %1, i64* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv8half_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vle16ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1465,7 +1465,7 @@ define @intrinsic_vleff_v_nxv16half_nxv16f16(* %0, i64 %1, i64* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv16half_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vle16ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1517,7 +1517,7 @@ define @intrinsic_vleff_v_nxv32half_nxv32f16(* %0, i64 %1, i64* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv32half_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vle16ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1569,7 +1569,7 @@ define @intrinsic_vleff_v_nxv1i8_nxv1i8(* %0, i64 %1, i64* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vle8ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1621,7 +1621,7 @@ define @intrinsic_vleff_v_nxv2i8_nxv2i8(* %0, i64 %1, i64* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vle8ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1673,7 +1673,7 @@ define @intrinsic_vleff_v_nxv4i8_nxv4i8(* %0, i64 %1, i64* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vle8ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1725,7 +1725,7 @@ define @intrinsic_vleff_v_nxv8i8_nxv8i8(* %0, i64 %1, i64* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vle8ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1777,7 +1777,7 @@ define @intrinsic_vleff_v_nxv16i8_nxv16i8(* %0, i64 %1, i64* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vle8ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1829,7 +1829,7 @@ define @intrinsic_vleff_v_nxv32i8_nxv32i8(* %0, i64 %1, i64* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vle8ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1881,7 +1881,7 @@ define @intrinsic_vleff_v_nxv64i8_nxv64i8(* %0, i64 %1, i64* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_v_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vle8ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1929,7 +1929,7 @@ define @intrinsic_vleff_dead_vl(* %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vleff_dead_vl: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vle64ff.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1962,7 +1962,7 @@ define void @intrinsic_vleff_dead_value(* %0, i64 %1, i64* %2) nounwind { ; CHECK-LABEL: intrinsic_vleff_dead_value: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vle64ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -2001,7 +2001,7 @@ define void @intrinsic_vleff_dead_all(* %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vleff_dead_all: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vle64ff.v v8, (a0) ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vlm.ll b/llvm/test/CodeGen/RISCV/rvv/vlm.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlm.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlm.ll @@ -9,7 +9,7 @@ define @intrinsic_vlm_v_nxv1i1(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vlm_v_nxv1i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: ret entry: @@ -22,7 +22,7 @@ define @intrinsic_vlm_v_nxv2i1(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vlm_v_nxv2i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: ret entry: @@ -35,7 +35,7 @@ define @intrinsic_vlm_v_nxv4i1(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vlm_v_nxv4i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: ret entry: @@ -48,7 +48,7 @@ define @intrinsic_vlm_v_nxv8i1(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vlm_v_nxv8i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: ret entry: @@ -61,7 +61,7 @@ define @intrinsic_vlm_v_nxv16i1(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vlm_v_nxv16i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: ret entry: @@ -74,7 +74,7 @@ define @intrinsic_vlm_v_nxv32i1(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vlm_v_nxv32i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: ret entry: @@ -87,7 +87,7 @@ define @intrinsic_vlm_v_nxv64i1(* %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vlm_v_nxv64i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vlm.v v0, (a0) ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll @@ -13,7 +13,7 @@ define @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -61,7 +61,7 @@ define @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -109,7 +109,7 @@ define @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxei64.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v12 ; CHECK-NEXT: ret @@ -157,7 +157,7 @@ define @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxei64.v v16, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -205,7 +205,7 @@ define @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -253,7 +253,7 @@ define @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -301,7 +301,7 @@ define @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxei64.v v12, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -349,7 +349,7 @@ define @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vloxei64.v v16, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -397,7 +397,7 @@ define @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -445,7 +445,7 @@ define @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxei64.v v10, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -493,7 +493,7 @@ define @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vloxei64.v v12, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -541,7 +541,7 @@ define @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vloxei64.v v16, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -589,7 +589,7 @@ define @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxei64.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -636,7 +636,7 @@ define @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vloxei64.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -683,7 +683,7 @@ define @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vloxei64.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -730,7 +730,7 @@ define @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vloxei64.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -777,7 +777,7 @@ define @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -825,7 +825,7 @@ define @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -873,7 +873,7 @@ define @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxei64.v v12, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -921,7 +921,7 @@ define @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vloxei64.v v16, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -969,7 +969,7 @@ define @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1017,7 +1017,7 @@ define @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxei64.v v10, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1065,7 +1065,7 @@ define @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vloxei64.v v12, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1113,7 +1113,7 @@ define @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vloxei64.v v16, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1161,7 +1161,7 @@ define @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxei64.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -1208,7 +1208,7 @@ define @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vloxei64.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -1255,7 +1255,7 @@ define @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vloxei64.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -1302,7 +1302,7 @@ define @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vloxei64.v v8, (a0), v8 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vloxei.ll b/llvm/test/CodeGen/RISCV/rvv/vloxei.ll --- a/llvm/test/CodeGen/RISCV/rvv/vloxei.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vloxei.ll @@ -13,7 +13,7 @@ define @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -61,7 +61,7 @@ define @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -109,7 +109,7 @@ define @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -157,7 +157,7 @@ define @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxei32.v v12, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -205,7 +205,7 @@ define @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vloxei32.v v16, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -253,7 +253,7 @@ define @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -301,7 +301,7 @@ define @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -349,7 +349,7 @@ define @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxei32.v v10, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -397,7 +397,7 @@ define @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vloxei32.v v12, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -445,7 +445,7 @@ define @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vloxei32.v v16, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -493,7 +493,7 @@ define @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxei32.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -540,7 +540,7 @@ define @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxei32.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -587,7 +587,7 @@ define @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vloxei32.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -634,7 +634,7 @@ define @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vloxei32.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -681,7 +681,7 @@ define @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vloxei32.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -728,7 +728,7 @@ define @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxei32.v v9, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -776,7 +776,7 @@ define @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vloxei32.v v10, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -824,7 +824,7 @@ define @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vloxei32.v v12, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -872,7 +872,7 @@ define @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vloxei32.v v16, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -920,7 +920,7 @@ define @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -968,7 +968,7 @@ define @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1016,7 +1016,7 @@ define @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxei32.v v10, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1064,7 +1064,7 @@ define @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vloxei32.v v12, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1112,7 +1112,7 @@ define @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vloxei32.v v16, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1160,7 +1160,7 @@ define @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxei32.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -1207,7 +1207,7 @@ define @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxei32.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -1254,7 +1254,7 @@ define @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vloxei32.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -1301,7 +1301,7 @@ define @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vloxei32.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -1348,7 +1348,7 @@ define @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vloxei32.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -1395,7 +1395,7 @@ define @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxei32.v v9, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -1443,7 +1443,7 @@ define @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vloxei32.v v10, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1491,7 +1491,7 @@ define @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vloxei32.v v12, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1539,7 +1539,7 @@ define @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vloxei32.v v16, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1587,7 +1587,7 @@ define @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1635,7 +1635,7 @@ define @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1683,7 +1683,7 @@ define @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1731,7 +1731,7 @@ define @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxei16.v v10, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1779,7 +1779,7 @@ define @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vloxei16.v v12, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1827,7 +1827,7 @@ define @intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vloxei16.v v16, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1875,7 +1875,7 @@ define @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxei16.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -1922,7 +1922,7 @@ define @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxei16.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -1969,7 +1969,7 @@ define @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxei16.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -2016,7 +2016,7 @@ define @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vloxei16.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -2063,7 +2063,7 @@ define @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vloxei16.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -2110,7 +2110,7 @@ define @intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vloxei16.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -2157,7 +2157,7 @@ define @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -2205,7 +2205,7 @@ define @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxei16.v v9, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -2253,7 +2253,7 @@ define @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vloxei16.v v10, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -2301,7 +2301,7 @@ define @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vloxei16.v v12, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -2349,7 +2349,7 @@ define @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vloxei16.v v16, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -2397,7 +2397,7 @@ define @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxei16.v v9, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -2445,7 +2445,7 @@ define @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vloxei16.v v10, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -2493,7 +2493,7 @@ define @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vloxei16.v v12, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -2541,7 +2541,7 @@ define @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vloxei16.v v16, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -2589,7 +2589,7 @@ define @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxei16.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -2636,7 +2636,7 @@ define @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxei16.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -2683,7 +2683,7 @@ define @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxei16.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -2730,7 +2730,7 @@ define @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vloxei16.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -2777,7 +2777,7 @@ define @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vloxei16.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -2824,7 +2824,7 @@ define @intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vloxei16.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -2871,7 +2871,7 @@ define @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -2919,7 +2919,7 @@ define @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxei16.v v9, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -2967,7 +2967,7 @@ define @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vloxei16.v v10, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -3015,7 +3015,7 @@ define @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vloxei16.v v12, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -3063,7 +3063,7 @@ define @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vloxei16.v v16, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -3111,7 +3111,7 @@ define @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxei16.v v9, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -3159,7 +3159,7 @@ define @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vloxei16.v v10, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -3207,7 +3207,7 @@ define @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vloxei16.v v12, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -3255,7 +3255,7 @@ define @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vloxei16.v v16, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -3303,7 +3303,7 @@ define @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxei8.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -3350,7 +3350,7 @@ define @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxei8.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -3397,7 +3397,7 @@ define @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxei8.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -3444,7 +3444,7 @@ define @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxei8.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -3491,7 +3491,7 @@ define @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vloxei8.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -3538,7 +3538,7 @@ define @intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vloxei8.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -3585,7 +3585,7 @@ define @intrinsic_vloxei_v_nxv64i8_nxv64i8_nxv64i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vloxei8.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -3632,7 +3632,7 @@ define @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -3680,7 +3680,7 @@ define @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -3728,7 +3728,7 @@ define @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxei8.v v9, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -3776,7 +3776,7 @@ define @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vloxei8.v v10, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -3824,7 +3824,7 @@ define @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vloxei8.v v12, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -3872,7 +3872,7 @@ define @intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vloxei8.v v16, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -3920,7 +3920,7 @@ define @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -3968,7 +3968,7 @@ define @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxei8.v v9, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -4016,7 +4016,7 @@ define @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vloxei8.v v10, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -4064,7 +4064,7 @@ define @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vloxei8.v v12, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -4112,7 +4112,7 @@ define @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vloxei8.v v16, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -4160,7 +4160,7 @@ define @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxei8.v v9, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -4208,7 +4208,7 @@ define @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vloxei8.v v10, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -4256,7 +4256,7 @@ define @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vloxei8.v v12, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -4304,7 +4304,7 @@ define @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vloxei8.v v16, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -4352,7 +4352,7 @@ define @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -4400,7 +4400,7 @@ define @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -4448,7 +4448,7 @@ define @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxei8.v v9, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -4496,7 +4496,7 @@ define @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vloxei8.v v10, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -4544,7 +4544,7 @@ define @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vloxei8.v v12, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -4592,7 +4592,7 @@ define @intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vloxei8.v v16, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -4640,7 +4640,7 @@ define @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -4688,7 +4688,7 @@ define @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxei8.v v9, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -4736,7 +4736,7 @@ define @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vloxei8.v v10, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -4784,7 +4784,7 @@ define @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vloxei8.v v12, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -4832,7 +4832,7 @@ define @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vloxei8.v v16, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -4880,7 +4880,7 @@ define @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxei8.v v9, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -4928,7 +4928,7 @@ define @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vloxei8.v v10, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -4976,7 +4976,7 @@ define @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vloxei8.v v12, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -5024,7 +5024,7 @@ define @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vloxei8.v v16, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv32.ll @@ -8,7 +8,7 @@ define @test_vloxseg2_nxv16i16_nxv16i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -38,7 +38,7 @@ define @test_vloxseg2_nxv16i16_nxv16i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -68,7 +68,7 @@ define @test_vloxseg2_nxv16i16_nxv16i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret @@ -98,7 +98,7 @@ define @test_vloxseg2_nxv1i8_nxv1i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -128,7 +128,7 @@ define @test_vloxseg2_nxv1i8_nxv1i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -158,7 +158,7 @@ define @test_vloxseg2_nxv1i8_nxv1i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -188,7 +188,7 @@ define @test_vloxseg3_nxv1i8_nxv1i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -220,7 +220,7 @@ define @test_vloxseg3_nxv1i8_nxv1i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -252,7 +252,7 @@ define @test_vloxseg3_nxv1i8_nxv1i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -284,7 +284,7 @@ define @test_vloxseg4_nxv1i8_nxv1i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -317,7 +317,7 @@ define @test_vloxseg4_nxv1i8_nxv1i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -350,7 +350,7 @@ define @test_vloxseg4_nxv1i8_nxv1i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -383,7 +383,7 @@ define @test_vloxseg5_nxv1i8_nxv1i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -417,7 +417,7 @@ define @test_vloxseg5_nxv1i8_nxv1i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -451,7 +451,7 @@ define @test_vloxseg5_nxv1i8_nxv1i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -485,7 +485,7 @@ define @test_vloxseg6_nxv1i8_nxv1i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -520,7 +520,7 @@ define @test_vloxseg6_nxv1i8_nxv1i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -555,7 +555,7 @@ define @test_vloxseg6_nxv1i8_nxv1i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -590,7 +590,7 @@ define @test_vloxseg7_nxv1i8_nxv1i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -626,7 +626,7 @@ define @test_vloxseg7_nxv1i8_nxv1i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -662,7 +662,7 @@ define @test_vloxseg7_nxv1i8_nxv1i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -698,7 +698,7 @@ define @test_vloxseg8_nxv1i8_nxv1i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -735,7 +735,7 @@ define @test_vloxseg8_nxv1i8_nxv1i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -772,7 +772,7 @@ define @test_vloxseg8_nxv1i8_nxv1i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -809,7 +809,7 @@ define @test_vloxseg2_nxv16i8_nxv16i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret @@ -839,7 +839,7 @@ define @test_vloxseg2_nxv16i8_nxv16i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -869,7 +869,7 @@ define @test_vloxseg2_nxv16i8_nxv16i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret @@ -899,7 +899,7 @@ define @test_vloxseg3_nxv16i8_nxv16i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vloxseg3ei16.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret @@ -930,7 +930,7 @@ define @test_vloxseg3_nxv16i8_nxv16i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -962,7 +962,7 @@ define @test_vloxseg3_nxv16i8_nxv16i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vloxseg3ei32.v v16, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret @@ -993,7 +993,7 @@ define @test_vloxseg4_nxv16i8_nxv16i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret @@ -1026,7 +1026,7 @@ define @test_vloxseg4_nxv16i8_nxv16i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -1059,7 +1059,7 @@ define @test_vloxseg4_nxv16i8_nxv16i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret @@ -1091,7 +1091,7 @@ define @test_vloxseg2_nxv2i32_nxv2i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1121,7 +1121,7 @@ define @test_vloxseg2_nxv2i32_nxv2i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1151,7 +1151,7 @@ define @test_vloxseg2_nxv2i32_nxv2i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1181,7 +1181,7 @@ define @test_vloxseg3_nxv2i32_nxv2i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1213,7 +1213,7 @@ define @test_vloxseg3_nxv2i32_nxv2i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1245,7 +1245,7 @@ define @test_vloxseg3_nxv2i32_nxv2i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1277,7 +1277,7 @@ define @test_vloxseg4_nxv2i32_nxv2i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1310,7 +1310,7 @@ define @test_vloxseg4_nxv2i32_nxv2i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1343,7 +1343,7 @@ define @test_vloxseg4_nxv2i32_nxv2i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1376,7 +1376,7 @@ define @test_vloxseg5_nxv2i32_nxv2i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1410,7 +1410,7 @@ define @test_vloxseg5_nxv2i32_nxv2i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1444,7 +1444,7 @@ define @test_vloxseg5_nxv2i32_nxv2i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1478,7 +1478,7 @@ define @test_vloxseg6_nxv2i32_nxv2i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1513,7 +1513,7 @@ define @test_vloxseg6_nxv2i32_nxv2i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1548,7 +1548,7 @@ define @test_vloxseg6_nxv2i32_nxv2i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1583,7 +1583,7 @@ define @test_vloxseg7_nxv2i32_nxv2i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1619,7 +1619,7 @@ define @test_vloxseg7_nxv2i32_nxv2i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1655,7 +1655,7 @@ define @test_vloxseg7_nxv2i32_nxv2i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1691,7 +1691,7 @@ define @test_vloxseg8_nxv2i32_nxv2i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1728,7 +1728,7 @@ define @test_vloxseg8_nxv2i32_nxv2i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1765,7 +1765,7 @@ define @test_vloxseg8_nxv2i32_nxv2i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1802,7 +1802,7 @@ define @test_vloxseg2_nxv4i16_nxv4i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1832,7 +1832,7 @@ define @test_vloxseg2_nxv4i16_nxv4i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1862,7 +1862,7 @@ define @test_vloxseg2_nxv4i16_nxv4i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -1892,7 +1892,7 @@ define @test_vloxseg3_nxv4i16_nxv4i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1924,7 +1924,7 @@ define @test_vloxseg3_nxv4i16_nxv4i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1956,7 +1956,7 @@ define @test_vloxseg3_nxv4i16_nxv4i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -1987,7 +1987,7 @@ define @test_vloxseg4_nxv4i16_nxv4i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2020,7 +2020,7 @@ define @test_vloxseg4_nxv4i16_nxv4i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2053,7 +2053,7 @@ define @test_vloxseg4_nxv4i16_nxv4i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -2086,7 +2086,7 @@ define @test_vloxseg5_nxv4i16_nxv4i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2120,7 +2120,7 @@ define @test_vloxseg5_nxv4i16_nxv4i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2154,7 +2154,7 @@ define @test_vloxseg5_nxv4i16_nxv4i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -2188,7 +2188,7 @@ define @test_vloxseg6_nxv4i16_nxv4i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2223,7 +2223,7 @@ define @test_vloxseg6_nxv4i16_nxv4i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2258,7 +2258,7 @@ define @test_vloxseg6_nxv4i16_nxv4i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -2293,7 +2293,7 @@ define @test_vloxseg7_nxv4i16_nxv4i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2329,7 +2329,7 @@ define @test_vloxseg7_nxv4i16_nxv4i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2365,7 +2365,7 @@ define @test_vloxseg7_nxv4i16_nxv4i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -2401,7 +2401,7 @@ define @test_vloxseg8_nxv4i16_nxv4i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2438,7 +2438,7 @@ define @test_vloxseg8_nxv4i16_nxv4i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2475,7 +2475,7 @@ define @test_vloxseg8_nxv4i16_nxv4i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -2512,7 +2512,7 @@ define @test_vloxseg2_nxv1i32_nxv1i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2542,7 +2542,7 @@ define @test_vloxseg2_nxv1i32_nxv1i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2572,7 +2572,7 @@ define @test_vloxseg2_nxv1i32_nxv1i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2602,7 +2602,7 @@ define @test_vloxseg3_nxv1i32_nxv1i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2634,7 +2634,7 @@ define @test_vloxseg3_nxv1i32_nxv1i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2666,7 +2666,7 @@ define @test_vloxseg3_nxv1i32_nxv1i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2698,7 +2698,7 @@ define @test_vloxseg4_nxv1i32_nxv1i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2731,7 +2731,7 @@ define @test_vloxseg4_nxv1i32_nxv1i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2764,7 +2764,7 @@ define @test_vloxseg4_nxv1i32_nxv1i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2797,7 +2797,7 @@ define @test_vloxseg5_nxv1i32_nxv1i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2831,7 +2831,7 @@ define @test_vloxseg5_nxv1i32_nxv1i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2865,7 +2865,7 @@ define @test_vloxseg5_nxv1i32_nxv1i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2899,7 +2899,7 @@ define @test_vloxseg6_nxv1i32_nxv1i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2934,7 +2934,7 @@ define @test_vloxseg6_nxv1i32_nxv1i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2969,7 +2969,7 @@ define @test_vloxseg6_nxv1i32_nxv1i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3004,7 +3004,7 @@ define @test_vloxseg7_nxv1i32_nxv1i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3040,7 +3040,7 @@ define @test_vloxseg7_nxv1i32_nxv1i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3076,7 +3076,7 @@ define @test_vloxseg7_nxv1i32_nxv1i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3112,7 +3112,7 @@ define @test_vloxseg8_nxv1i32_nxv1i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3149,7 +3149,7 @@ define @test_vloxseg8_nxv1i32_nxv1i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3186,7 +3186,7 @@ define @test_vloxseg8_nxv1i32_nxv1i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3223,7 +3223,7 @@ define @test_vloxseg2_nxv8i16_nxv8i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -3253,7 +3253,7 @@ define @test_vloxseg2_nxv8i16_nxv8i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -3283,7 +3283,7 @@ define @test_vloxseg2_nxv8i16_nxv8i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret @@ -3313,7 +3313,7 @@ define @test_vloxseg3_nxv8i16_nxv8i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -3345,7 +3345,7 @@ define @test_vloxseg3_nxv8i16_nxv8i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -3377,7 +3377,7 @@ define @test_vloxseg3_nxv8i16_nxv8i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret @@ -3408,7 +3408,7 @@ define @test_vloxseg4_nxv8i16_nxv8i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -3441,7 +3441,7 @@ define @test_vloxseg4_nxv8i16_nxv8i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -3474,7 +3474,7 @@ define @test_vloxseg4_nxv8i16_nxv8i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret @@ -3507,7 +3507,7 @@ define @test_vloxseg2_nxv8i8_nxv8i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -3537,7 +3537,7 @@ define @test_vloxseg2_nxv8i8_nxv8i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3567,7 +3567,7 @@ define @test_vloxseg2_nxv8i8_nxv8i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -3597,7 +3597,7 @@ define @test_vloxseg3_nxv8i8_nxv8i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -3628,7 +3628,7 @@ define @test_vloxseg3_nxv8i8_nxv8i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3660,7 +3660,7 @@ define @test_vloxseg3_nxv8i8_nxv8i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -3691,7 +3691,7 @@ define @test_vloxseg4_nxv8i8_nxv8i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -3724,7 +3724,7 @@ define @test_vloxseg4_nxv8i8_nxv8i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3757,7 +3757,7 @@ define @test_vloxseg4_nxv8i8_nxv8i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -3789,7 +3789,7 @@ define @test_vloxseg5_nxv8i8_nxv8i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -3823,7 +3823,7 @@ define @test_vloxseg5_nxv8i8_nxv8i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3857,7 +3857,7 @@ define @test_vloxseg5_nxv8i8_nxv8i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxseg5ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -3890,7 +3890,7 @@ define @test_vloxseg6_nxv8i8_nxv8i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -3925,7 +3925,7 @@ define @test_vloxseg6_nxv8i8_nxv8i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3960,7 +3960,7 @@ define @test_vloxseg6_nxv8i8_nxv8i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxseg6ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -3995,7 +3995,7 @@ define @test_vloxseg7_nxv8i8_nxv8i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -4031,7 +4031,7 @@ define @test_vloxseg7_nxv8i8_nxv8i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4067,7 +4067,7 @@ define @test_vloxseg7_nxv8i8_nxv8i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxseg7ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -4103,7 +4103,7 @@ define @test_vloxseg8_nxv8i8_nxv8i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -4140,7 +4140,7 @@ define @test_vloxseg8_nxv8i8_nxv8i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4177,7 +4177,7 @@ define @test_vloxseg8_nxv8i8_nxv8i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -4214,7 +4214,7 @@ define @test_vloxseg2_nxv8i32_nxv8i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -4244,7 +4244,7 @@ define @test_vloxseg2_nxv8i32_nxv8i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -4274,7 +4274,7 @@ define @test_vloxseg2_nxv8i32_nxv8i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -4304,7 +4304,7 @@ define @test_vloxseg2_nxv4i8_nxv4i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4334,7 +4334,7 @@ define @test_vloxseg2_nxv4i8_nxv4i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4364,7 +4364,7 @@ define @test_vloxseg2_nxv4i8_nxv4i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -4394,7 +4394,7 @@ define @test_vloxseg3_nxv4i8_nxv4i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4426,7 +4426,7 @@ define @test_vloxseg3_nxv4i8_nxv4i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4458,7 +4458,7 @@ define @test_vloxseg3_nxv4i8_nxv4i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -4489,7 +4489,7 @@ define @test_vloxseg4_nxv4i8_nxv4i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4522,7 +4522,7 @@ define @test_vloxseg4_nxv4i8_nxv4i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4555,7 +4555,7 @@ define @test_vloxseg4_nxv4i8_nxv4i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -4588,7 +4588,7 @@ define @test_vloxseg5_nxv4i8_nxv4i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4622,7 +4622,7 @@ define @test_vloxseg5_nxv4i8_nxv4i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4656,7 +4656,7 @@ define @test_vloxseg5_nxv4i8_nxv4i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -4690,7 +4690,7 @@ define @test_vloxseg6_nxv4i8_nxv4i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4725,7 +4725,7 @@ define @test_vloxseg6_nxv4i8_nxv4i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4760,7 +4760,7 @@ define @test_vloxseg6_nxv4i8_nxv4i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -4795,7 +4795,7 @@ define @test_vloxseg7_nxv4i8_nxv4i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4831,7 +4831,7 @@ define @test_vloxseg7_nxv4i8_nxv4i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4867,7 +4867,7 @@ define @test_vloxseg7_nxv4i8_nxv4i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -4903,7 +4903,7 @@ define @test_vloxseg8_nxv4i8_nxv4i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4940,7 +4940,7 @@ define @test_vloxseg8_nxv4i8_nxv4i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4977,7 +4977,7 @@ define @test_vloxseg8_nxv4i8_nxv4i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -5014,7 +5014,7 @@ define @test_vloxseg2_nxv1i16_nxv1i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5044,7 +5044,7 @@ define @test_vloxseg2_nxv1i16_nxv1i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5074,7 +5074,7 @@ define @test_vloxseg2_nxv1i16_nxv1i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5104,7 +5104,7 @@ define @test_vloxseg3_nxv1i16_nxv1i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5136,7 +5136,7 @@ define @test_vloxseg3_nxv1i16_nxv1i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5168,7 +5168,7 @@ define @test_vloxseg3_nxv1i16_nxv1i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5200,7 +5200,7 @@ define @test_vloxseg4_nxv1i16_nxv1i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5233,7 +5233,7 @@ define @test_vloxseg4_nxv1i16_nxv1i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5266,7 +5266,7 @@ define @test_vloxseg4_nxv1i16_nxv1i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5299,7 +5299,7 @@ define @test_vloxseg5_nxv1i16_nxv1i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5333,7 +5333,7 @@ define @test_vloxseg5_nxv1i16_nxv1i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5367,7 +5367,7 @@ define @test_vloxseg5_nxv1i16_nxv1i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5401,7 +5401,7 @@ define @test_vloxseg6_nxv1i16_nxv1i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5436,7 +5436,7 @@ define @test_vloxseg6_nxv1i16_nxv1i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5471,7 +5471,7 @@ define @test_vloxseg6_nxv1i16_nxv1i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5506,7 +5506,7 @@ define @test_vloxseg7_nxv1i16_nxv1i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5542,7 +5542,7 @@ define @test_vloxseg7_nxv1i16_nxv1i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5578,7 +5578,7 @@ define @test_vloxseg7_nxv1i16_nxv1i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5614,7 +5614,7 @@ define @test_vloxseg8_nxv1i16_nxv1i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5651,7 +5651,7 @@ define @test_vloxseg8_nxv1i16_nxv1i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5688,7 +5688,7 @@ define @test_vloxseg8_nxv1i16_nxv1i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5725,7 +5725,7 @@ define @test_vloxseg2_nxv32i8_nxv32i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v16, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret @@ -5755,7 +5755,7 @@ define @test_vloxseg2_nxv32i8_nxv32i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -5785,7 +5785,7 @@ define @test_vloxseg2_nxv2i8_nxv2i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5815,7 +5815,7 @@ define @test_vloxseg2_nxv2i8_nxv2i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5845,7 +5845,7 @@ define @test_vloxseg2_nxv2i8_nxv2i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5875,7 +5875,7 @@ define @test_vloxseg3_nxv2i8_nxv2i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5907,7 +5907,7 @@ define @test_vloxseg3_nxv2i8_nxv2i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5939,7 +5939,7 @@ define @test_vloxseg3_nxv2i8_nxv2i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5971,7 +5971,7 @@ define @test_vloxseg4_nxv2i8_nxv2i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6004,7 +6004,7 @@ define @test_vloxseg4_nxv2i8_nxv2i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6037,7 +6037,7 @@ define @test_vloxseg4_nxv2i8_nxv2i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6070,7 +6070,7 @@ define @test_vloxseg5_nxv2i8_nxv2i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6104,7 +6104,7 @@ define @test_vloxseg5_nxv2i8_nxv2i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6138,7 +6138,7 @@ define @test_vloxseg5_nxv2i8_nxv2i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6172,7 +6172,7 @@ define @test_vloxseg6_nxv2i8_nxv2i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6207,7 +6207,7 @@ define @test_vloxseg6_nxv2i8_nxv2i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6242,7 +6242,7 @@ define @test_vloxseg6_nxv2i8_nxv2i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6277,7 +6277,7 @@ define @test_vloxseg7_nxv2i8_nxv2i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6313,7 +6313,7 @@ define @test_vloxseg7_nxv2i8_nxv2i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6349,7 +6349,7 @@ define @test_vloxseg7_nxv2i8_nxv2i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6385,7 +6385,7 @@ define @test_vloxseg8_nxv2i8_nxv2i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6422,7 +6422,7 @@ define @test_vloxseg8_nxv2i8_nxv2i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6459,7 +6459,7 @@ define @test_vloxseg8_nxv2i8_nxv2i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6496,7 +6496,7 @@ define @test_vloxseg2_nxv2i16_nxv2i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6526,7 +6526,7 @@ define @test_vloxseg2_nxv2i16_nxv2i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6556,7 +6556,7 @@ define @test_vloxseg2_nxv2i16_nxv2i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6586,7 +6586,7 @@ define @test_vloxseg3_nxv2i16_nxv2i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6618,7 +6618,7 @@ define @test_vloxseg3_nxv2i16_nxv2i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6650,7 +6650,7 @@ define @test_vloxseg3_nxv2i16_nxv2i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6682,7 +6682,7 @@ define @test_vloxseg4_nxv2i16_nxv2i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6715,7 +6715,7 @@ define @test_vloxseg4_nxv2i16_nxv2i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6748,7 +6748,7 @@ define @test_vloxseg4_nxv2i16_nxv2i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6781,7 +6781,7 @@ define @test_vloxseg5_nxv2i16_nxv2i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6815,7 +6815,7 @@ define @test_vloxseg5_nxv2i16_nxv2i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6849,7 +6849,7 @@ define @test_vloxseg5_nxv2i16_nxv2i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6883,7 +6883,7 @@ define @test_vloxseg6_nxv2i16_nxv2i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6918,7 +6918,7 @@ define @test_vloxseg6_nxv2i16_nxv2i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6953,7 +6953,7 @@ define @test_vloxseg6_nxv2i16_nxv2i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6988,7 +6988,7 @@ define @test_vloxseg7_nxv2i16_nxv2i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7024,7 +7024,7 @@ define @test_vloxseg7_nxv2i16_nxv2i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7060,7 +7060,7 @@ define @test_vloxseg7_nxv2i16_nxv2i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7096,7 +7096,7 @@ define @test_vloxseg8_nxv2i16_nxv2i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7133,7 +7133,7 @@ define @test_vloxseg8_nxv2i16_nxv2i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7170,7 +7170,7 @@ define @test_vloxseg8_nxv2i16_nxv2i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7207,7 +7207,7 @@ define @test_vloxseg2_nxv4i32_nxv4i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -7237,7 +7237,7 @@ define @test_vloxseg2_nxv4i32_nxv4i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -7267,7 +7267,7 @@ define @test_vloxseg2_nxv4i32_nxv4i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -7297,7 +7297,7 @@ define @test_vloxseg3_nxv4i32_nxv4i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -7329,7 +7329,7 @@ define @test_vloxseg3_nxv4i32_nxv4i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -7361,7 +7361,7 @@ define @test_vloxseg3_nxv4i32_nxv4i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -7393,7 +7393,7 @@ define @test_vloxseg4_nxv4i32_nxv4i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -7426,7 +7426,7 @@ define @test_vloxseg4_nxv4i32_nxv4i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -7459,7 +7459,7 @@ define @test_vloxseg4_nxv4i32_nxv4i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -7492,7 +7492,7 @@ define @test_vloxseg2_nxv16f16_nxv16i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -7522,7 +7522,7 @@ define @test_vloxseg2_nxv16f16_nxv16i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -7552,7 +7552,7 @@ define @test_vloxseg2_nxv16f16_nxv16i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret @@ -7582,7 +7582,7 @@ define @test_vloxseg2_nxv4f64_nxv4i16(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -7612,7 +7612,7 @@ define @test_vloxseg2_nxv4f64_nxv4i8(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -7642,7 +7642,7 @@ define @test_vloxseg2_nxv4f64_nxv4i32(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -7672,7 +7672,7 @@ define @test_vloxseg2_nxv1f64_nxv1i8(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7702,7 +7702,7 @@ define @test_vloxseg2_nxv1f64_nxv1i32(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7732,7 +7732,7 @@ define @test_vloxseg2_nxv1f64_nxv1i16(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7762,7 +7762,7 @@ define @test_vloxseg3_nxv1f64_nxv1i8(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7794,7 +7794,7 @@ define @test_vloxseg3_nxv1f64_nxv1i32(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7826,7 +7826,7 @@ define @test_vloxseg3_nxv1f64_nxv1i16(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7858,7 +7858,7 @@ define @test_vloxseg4_nxv1f64_nxv1i8(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7891,7 +7891,7 @@ define @test_vloxseg4_nxv1f64_nxv1i32(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7924,7 +7924,7 @@ define @test_vloxseg4_nxv1f64_nxv1i16(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7957,7 +7957,7 @@ define @test_vloxseg5_nxv1f64_nxv1i8(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7991,7 +7991,7 @@ define @test_vloxseg5_nxv1f64_nxv1i32(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8025,7 +8025,7 @@ define @test_vloxseg5_nxv1f64_nxv1i16(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8059,7 +8059,7 @@ define @test_vloxseg6_nxv1f64_nxv1i8(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8094,7 +8094,7 @@ define @test_vloxseg6_nxv1f64_nxv1i32(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8129,7 +8129,7 @@ define @test_vloxseg6_nxv1f64_nxv1i16(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8164,7 +8164,7 @@ define @test_vloxseg7_nxv1f64_nxv1i8(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8200,7 +8200,7 @@ define @test_vloxseg7_nxv1f64_nxv1i32(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8236,7 +8236,7 @@ define @test_vloxseg7_nxv1f64_nxv1i16(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8272,7 +8272,7 @@ define @test_vloxseg8_nxv1f64_nxv1i8(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8309,7 +8309,7 @@ define @test_vloxseg8_nxv1f64_nxv1i32(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8346,7 +8346,7 @@ define @test_vloxseg8_nxv1f64_nxv1i16(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8383,7 +8383,7 @@ define @test_vloxseg2_nxv2f32_nxv2i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8413,7 +8413,7 @@ define @test_vloxseg2_nxv2f32_nxv2i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8443,7 +8443,7 @@ define @test_vloxseg2_nxv2f32_nxv2i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8473,7 +8473,7 @@ define @test_vloxseg3_nxv2f32_nxv2i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8505,7 +8505,7 @@ define @test_vloxseg3_nxv2f32_nxv2i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8537,7 +8537,7 @@ define @test_vloxseg3_nxv2f32_nxv2i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8569,7 +8569,7 @@ define @test_vloxseg4_nxv2f32_nxv2i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8602,7 +8602,7 @@ define @test_vloxseg4_nxv2f32_nxv2i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8635,7 +8635,7 @@ define @test_vloxseg4_nxv2f32_nxv2i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8668,7 +8668,7 @@ define @test_vloxseg5_nxv2f32_nxv2i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8702,7 +8702,7 @@ define @test_vloxseg5_nxv2f32_nxv2i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8736,7 +8736,7 @@ define @test_vloxseg5_nxv2f32_nxv2i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8770,7 +8770,7 @@ define @test_vloxseg6_nxv2f32_nxv2i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8805,7 +8805,7 @@ define @test_vloxseg6_nxv2f32_nxv2i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8840,7 +8840,7 @@ define @test_vloxseg6_nxv2f32_nxv2i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8875,7 +8875,7 @@ define @test_vloxseg7_nxv2f32_nxv2i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8911,7 +8911,7 @@ define @test_vloxseg7_nxv2f32_nxv2i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8947,7 +8947,7 @@ define @test_vloxseg7_nxv2f32_nxv2i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8983,7 +8983,7 @@ define @test_vloxseg8_nxv2f32_nxv2i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9020,7 +9020,7 @@ define @test_vloxseg8_nxv2f32_nxv2i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9057,7 +9057,7 @@ define @test_vloxseg8_nxv2f32_nxv2i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9094,7 +9094,7 @@ define @test_vloxseg2_nxv1f16_nxv1i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9124,7 +9124,7 @@ define @test_vloxseg2_nxv1f16_nxv1i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9154,7 +9154,7 @@ define @test_vloxseg2_nxv1f16_nxv1i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9184,7 +9184,7 @@ define @test_vloxseg3_nxv1f16_nxv1i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9216,7 +9216,7 @@ define @test_vloxseg3_nxv1f16_nxv1i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9248,7 +9248,7 @@ define @test_vloxseg3_nxv1f16_nxv1i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9280,7 +9280,7 @@ define @test_vloxseg4_nxv1f16_nxv1i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9313,7 +9313,7 @@ define @test_vloxseg4_nxv1f16_nxv1i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9346,7 +9346,7 @@ define @test_vloxseg4_nxv1f16_nxv1i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9379,7 +9379,7 @@ define @test_vloxseg5_nxv1f16_nxv1i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9413,7 +9413,7 @@ define @test_vloxseg5_nxv1f16_nxv1i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9447,7 +9447,7 @@ define @test_vloxseg5_nxv1f16_nxv1i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9481,7 +9481,7 @@ define @test_vloxseg6_nxv1f16_nxv1i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9516,7 +9516,7 @@ define @test_vloxseg6_nxv1f16_nxv1i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9551,7 +9551,7 @@ define @test_vloxseg6_nxv1f16_nxv1i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9586,7 +9586,7 @@ define @test_vloxseg7_nxv1f16_nxv1i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9622,7 +9622,7 @@ define @test_vloxseg7_nxv1f16_nxv1i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9658,7 +9658,7 @@ define @test_vloxseg7_nxv1f16_nxv1i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9694,7 +9694,7 @@ define @test_vloxseg8_nxv1f16_nxv1i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9731,7 +9731,7 @@ define @test_vloxseg8_nxv1f16_nxv1i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9768,7 +9768,7 @@ define @test_vloxseg8_nxv1f16_nxv1i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9805,7 +9805,7 @@ define @test_vloxseg2_nxv1f32_nxv1i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9835,7 +9835,7 @@ define @test_vloxseg2_nxv1f32_nxv1i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9865,7 +9865,7 @@ define @test_vloxseg2_nxv1f32_nxv1i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9895,7 +9895,7 @@ define @test_vloxseg3_nxv1f32_nxv1i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9927,7 +9927,7 @@ define @test_vloxseg3_nxv1f32_nxv1i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9959,7 +9959,7 @@ define @test_vloxseg3_nxv1f32_nxv1i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9991,7 +9991,7 @@ define @test_vloxseg4_nxv1f32_nxv1i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10024,7 +10024,7 @@ define @test_vloxseg4_nxv1f32_nxv1i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10057,7 +10057,7 @@ define @test_vloxseg4_nxv1f32_nxv1i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10090,7 +10090,7 @@ define @test_vloxseg5_nxv1f32_nxv1i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10124,7 +10124,7 @@ define @test_vloxseg5_nxv1f32_nxv1i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10158,7 +10158,7 @@ define @test_vloxseg5_nxv1f32_nxv1i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10192,7 +10192,7 @@ define @test_vloxseg6_nxv1f32_nxv1i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10227,7 +10227,7 @@ define @test_vloxseg6_nxv1f32_nxv1i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10262,7 +10262,7 @@ define @test_vloxseg6_nxv1f32_nxv1i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10297,7 +10297,7 @@ define @test_vloxseg7_nxv1f32_nxv1i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10333,7 +10333,7 @@ define @test_vloxseg7_nxv1f32_nxv1i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10369,7 +10369,7 @@ define @test_vloxseg7_nxv1f32_nxv1i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10405,7 +10405,7 @@ define @test_vloxseg8_nxv1f32_nxv1i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10442,7 +10442,7 @@ define @test_vloxseg8_nxv1f32_nxv1i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10479,7 +10479,7 @@ define @test_vloxseg8_nxv1f32_nxv1i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10516,7 +10516,7 @@ define @test_vloxseg2_nxv8f16_nxv8i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -10546,7 +10546,7 @@ define @test_vloxseg2_nxv8f16_nxv8i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -10576,7 +10576,7 @@ define @test_vloxseg2_nxv8f16_nxv8i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret @@ -10606,7 +10606,7 @@ define @test_vloxseg3_nxv8f16_nxv8i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -10638,7 +10638,7 @@ define @test_vloxseg3_nxv8f16_nxv8i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -10670,7 +10670,7 @@ define @test_vloxseg3_nxv8f16_nxv8i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret @@ -10701,7 +10701,7 @@ define @test_vloxseg4_nxv8f16_nxv8i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -10734,7 +10734,7 @@ define @test_vloxseg4_nxv8f16_nxv8i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -10767,7 +10767,7 @@ define @test_vloxseg4_nxv8f16_nxv8i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret @@ -10800,7 +10800,7 @@ define @test_vloxseg2_nxv8f32_nxv8i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -10830,7 +10830,7 @@ define @test_vloxseg2_nxv8f32_nxv8i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -10860,7 +10860,7 @@ define @test_vloxseg2_nxv8f32_nxv8i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -10890,7 +10890,7 @@ define @test_vloxseg2_nxv2f64_nxv2i32(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -10920,7 +10920,7 @@ define @test_vloxseg2_nxv2f64_nxv2i8(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -10950,7 +10950,7 @@ define @test_vloxseg2_nxv2f64_nxv2i16(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -10980,7 +10980,7 @@ define @test_vloxseg3_nxv2f64_nxv2i32(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -11012,7 +11012,7 @@ define @test_vloxseg3_nxv2f64_nxv2i8(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -11044,7 +11044,7 @@ define @test_vloxseg3_nxv2f64_nxv2i16(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -11076,7 +11076,7 @@ define @test_vloxseg4_nxv2f64_nxv2i32(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -11109,7 +11109,7 @@ define @test_vloxseg4_nxv2f64_nxv2i8(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -11142,7 +11142,7 @@ define @test_vloxseg4_nxv2f64_nxv2i16(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -11175,7 +11175,7 @@ define @test_vloxseg2_nxv4f16_nxv4i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11205,7 +11205,7 @@ define @test_vloxseg2_nxv4f16_nxv4i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11235,7 +11235,7 @@ define @test_vloxseg2_nxv4f16_nxv4i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -11265,7 +11265,7 @@ define @test_vloxseg3_nxv4f16_nxv4i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11297,7 +11297,7 @@ define @test_vloxseg3_nxv4f16_nxv4i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11329,7 +11329,7 @@ define @test_vloxseg3_nxv4f16_nxv4i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -11360,7 +11360,7 @@ define @test_vloxseg4_nxv4f16_nxv4i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11393,7 +11393,7 @@ define @test_vloxseg4_nxv4f16_nxv4i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11426,7 +11426,7 @@ define @test_vloxseg4_nxv4f16_nxv4i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -11459,7 +11459,7 @@ define @test_vloxseg5_nxv4f16_nxv4i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11493,7 +11493,7 @@ define @test_vloxseg5_nxv4f16_nxv4i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11527,7 +11527,7 @@ define @test_vloxseg5_nxv4f16_nxv4i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -11561,7 +11561,7 @@ define @test_vloxseg6_nxv4f16_nxv4i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11596,7 +11596,7 @@ define @test_vloxseg6_nxv4f16_nxv4i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11631,7 +11631,7 @@ define @test_vloxseg6_nxv4f16_nxv4i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -11666,7 +11666,7 @@ define @test_vloxseg7_nxv4f16_nxv4i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11702,7 +11702,7 @@ define @test_vloxseg7_nxv4f16_nxv4i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11738,7 +11738,7 @@ define @test_vloxseg7_nxv4f16_nxv4i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -11774,7 +11774,7 @@ define @test_vloxseg8_nxv4f16_nxv4i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11811,7 +11811,7 @@ define @test_vloxseg8_nxv4f16_nxv4i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11848,7 +11848,7 @@ define @test_vloxseg8_nxv4f16_nxv4i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -11885,7 +11885,7 @@ define @test_vloxseg2_nxv2f16_nxv2i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11915,7 +11915,7 @@ define @test_vloxseg2_nxv2f16_nxv2i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11945,7 +11945,7 @@ define @test_vloxseg2_nxv2f16_nxv2i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11975,7 +11975,7 @@ define @test_vloxseg3_nxv2f16_nxv2i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12007,7 +12007,7 @@ define @test_vloxseg3_nxv2f16_nxv2i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12039,7 +12039,7 @@ define @test_vloxseg3_nxv2f16_nxv2i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12071,7 +12071,7 @@ define @test_vloxseg4_nxv2f16_nxv2i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12104,7 +12104,7 @@ define @test_vloxseg4_nxv2f16_nxv2i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12137,7 +12137,7 @@ define @test_vloxseg4_nxv2f16_nxv2i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12170,7 +12170,7 @@ define @test_vloxseg5_nxv2f16_nxv2i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12204,7 +12204,7 @@ define @test_vloxseg5_nxv2f16_nxv2i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12238,7 +12238,7 @@ define @test_vloxseg5_nxv2f16_nxv2i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12272,7 +12272,7 @@ define @test_vloxseg6_nxv2f16_nxv2i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12307,7 +12307,7 @@ define @test_vloxseg6_nxv2f16_nxv2i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12342,7 +12342,7 @@ define @test_vloxseg6_nxv2f16_nxv2i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12377,7 +12377,7 @@ define @test_vloxseg7_nxv2f16_nxv2i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12413,7 +12413,7 @@ define @test_vloxseg7_nxv2f16_nxv2i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12449,7 +12449,7 @@ define @test_vloxseg7_nxv2f16_nxv2i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12485,7 +12485,7 @@ define @test_vloxseg8_nxv2f16_nxv2i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12522,7 +12522,7 @@ define @test_vloxseg8_nxv2f16_nxv2i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12559,7 +12559,7 @@ define @test_vloxseg8_nxv2f16_nxv2i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12596,7 +12596,7 @@ define @test_vloxseg2_nxv4f32_nxv4i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -12626,7 +12626,7 @@ define @test_vloxseg2_nxv4f32_nxv4i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -12656,7 +12656,7 @@ define @test_vloxseg2_nxv4f32_nxv4i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -12686,7 +12686,7 @@ define @test_vloxseg3_nxv4f32_nxv4i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -12718,7 +12718,7 @@ define @test_vloxseg3_nxv4f32_nxv4i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -12750,7 +12750,7 @@ define @test_vloxseg3_nxv4f32_nxv4i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -12782,7 +12782,7 @@ define @test_vloxseg4_nxv4f32_nxv4i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -12815,7 +12815,7 @@ define @test_vloxseg4_nxv4f32_nxv4i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -12848,7 +12848,7 @@ define @test_vloxseg4_nxv4f32_nxv4i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv64.ll @@ -8,7 +8,7 @@ define @test_vloxseg2_nxv16i16_nxv16i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -38,7 +38,7 @@ define @test_vloxseg2_nxv16i16_nxv16i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -68,7 +68,7 @@ define @test_vloxseg2_nxv16i16_nxv16i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret @@ -98,7 +98,7 @@ define @test_vloxseg2_nxv4i32_nxv4i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -128,7 +128,7 @@ define @test_vloxseg2_nxv4i32_nxv4i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -158,7 +158,7 @@ define @test_vloxseg2_nxv4i32_nxv4i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret @@ -188,7 +188,7 @@ define @test_vloxseg2_nxv4i32_nxv4i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -218,7 +218,7 @@ define @test_vloxseg3_nxv4i32_nxv4i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -250,7 +250,7 @@ define @test_vloxseg3_nxv4i32_nxv4i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -282,7 +282,7 @@ define @test_vloxseg3_nxv4i32_nxv4i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vloxseg3ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret @@ -313,7 +313,7 @@ define @test_vloxseg3_nxv4i32_nxv4i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -345,7 +345,7 @@ define @test_vloxseg4_nxv4i32_nxv4i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -378,7 +378,7 @@ define @test_vloxseg4_nxv4i32_nxv4i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -411,7 +411,7 @@ define @test_vloxseg4_nxv4i32_nxv4i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret @@ -444,7 +444,7 @@ define @test_vloxseg4_nxv4i32_nxv4i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -477,7 +477,7 @@ define @test_vloxseg2_nxv16i8_nxv16i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret @@ -507,7 +507,7 @@ define @test_vloxseg2_nxv16i8_nxv16i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -537,7 +537,7 @@ define @test_vloxseg2_nxv16i8_nxv16i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret @@ -567,7 +567,7 @@ define @test_vloxseg3_nxv16i8_nxv16i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vloxseg3ei16.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret @@ -598,7 +598,7 @@ define @test_vloxseg3_nxv16i8_nxv16i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -630,7 +630,7 @@ define @test_vloxseg3_nxv16i8_nxv16i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vloxseg3ei32.v v16, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret @@ -661,7 +661,7 @@ define @test_vloxseg4_nxv16i8_nxv16i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret @@ -694,7 +694,7 @@ define @test_vloxseg4_nxv16i8_nxv16i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -727,7 +727,7 @@ define @test_vloxseg4_nxv16i8_nxv16i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret @@ -759,7 +759,7 @@ define @test_vloxseg2_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg2ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -789,7 +789,7 @@ define @test_vloxseg2_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -819,7 +819,7 @@ define @test_vloxseg2_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -849,7 +849,7 @@ define @test_vloxseg2_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -879,7 +879,7 @@ define @test_vloxseg3_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg3ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -911,7 +911,7 @@ define @test_vloxseg3_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -943,7 +943,7 @@ define @test_vloxseg3_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -975,7 +975,7 @@ define @test_vloxseg3_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1007,7 +1007,7 @@ define @test_vloxseg4_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg4ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1040,7 +1040,7 @@ define @test_vloxseg4_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1073,7 +1073,7 @@ define @test_vloxseg4_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1106,7 +1106,7 @@ define @test_vloxseg4_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1139,7 +1139,7 @@ define @test_vloxseg5_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg5ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1173,7 +1173,7 @@ define @test_vloxseg5_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1207,7 +1207,7 @@ define @test_vloxseg5_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1241,7 +1241,7 @@ define @test_vloxseg5_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1275,7 +1275,7 @@ define @test_vloxseg6_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg6ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1310,7 +1310,7 @@ define @test_vloxseg6_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1345,7 +1345,7 @@ define @test_vloxseg6_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1380,7 +1380,7 @@ define @test_vloxseg6_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1415,7 +1415,7 @@ define @test_vloxseg7_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg7ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1451,7 +1451,7 @@ define @test_vloxseg7_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1487,7 +1487,7 @@ define @test_vloxseg7_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1523,7 +1523,7 @@ define @test_vloxseg7_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1559,7 +1559,7 @@ define @test_vloxseg8_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1596,7 +1596,7 @@ define @test_vloxseg8_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1633,7 +1633,7 @@ define @test_vloxseg8_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1670,7 +1670,7 @@ define @test_vloxseg8_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1707,7 +1707,7 @@ define @test_vloxseg2_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg2ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1737,7 +1737,7 @@ define @test_vloxseg2_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1767,7 +1767,7 @@ define @test_vloxseg2_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1797,7 +1797,7 @@ define @test_vloxseg2_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1827,7 +1827,7 @@ define @test_vloxseg3_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg3ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1859,7 +1859,7 @@ define @test_vloxseg3_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1891,7 +1891,7 @@ define @test_vloxseg3_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1923,7 +1923,7 @@ define @test_vloxseg3_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1955,7 +1955,7 @@ define @test_vloxseg4_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg4ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1988,7 +1988,7 @@ define @test_vloxseg4_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2021,7 +2021,7 @@ define @test_vloxseg4_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2054,7 +2054,7 @@ define @test_vloxseg4_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2087,7 +2087,7 @@ define @test_vloxseg5_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg5ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2121,7 +2121,7 @@ define @test_vloxseg5_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2155,7 +2155,7 @@ define @test_vloxseg5_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2189,7 +2189,7 @@ define @test_vloxseg5_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2223,7 +2223,7 @@ define @test_vloxseg6_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg6ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2258,7 +2258,7 @@ define @test_vloxseg6_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2293,7 +2293,7 @@ define @test_vloxseg6_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2328,7 +2328,7 @@ define @test_vloxseg6_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2363,7 +2363,7 @@ define @test_vloxseg7_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg7ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2399,7 +2399,7 @@ define @test_vloxseg7_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2435,7 +2435,7 @@ define @test_vloxseg7_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2471,7 +2471,7 @@ define @test_vloxseg7_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2507,7 +2507,7 @@ define @test_vloxseg8_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2544,7 +2544,7 @@ define @test_vloxseg8_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2581,7 +2581,7 @@ define @test_vloxseg8_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2618,7 +2618,7 @@ define @test_vloxseg8_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2655,7 +2655,7 @@ define @test_vloxseg2_nxv8i16_nxv8i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -2685,7 +2685,7 @@ define @test_vloxseg2_nxv8i16_nxv8i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -2715,7 +2715,7 @@ define @test_vloxseg2_nxv8i16_nxv8i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vloxseg2ei64.v v16, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret @@ -2745,7 +2745,7 @@ define @test_vloxseg2_nxv8i16_nxv8i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret @@ -2775,7 +2775,7 @@ define @test_vloxseg3_nxv8i16_nxv8i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -2807,7 +2807,7 @@ define @test_vloxseg3_nxv8i16_nxv8i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -2839,7 +2839,7 @@ define @test_vloxseg3_nxv8i16_nxv8i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vloxseg3ei64.v v16, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret @@ -2870,7 +2870,7 @@ define @test_vloxseg3_nxv8i16_nxv8i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret @@ -2901,7 +2901,7 @@ define @test_vloxseg4_nxv8i16_nxv8i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -2934,7 +2934,7 @@ define @test_vloxseg4_nxv8i16_nxv8i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -2967,7 +2967,7 @@ define @test_vloxseg4_nxv8i16_nxv8i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vloxseg4ei64.v v16, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret @@ -2999,7 +2999,7 @@ define @test_vloxseg4_nxv8i16_nxv8i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret @@ -3032,7 +3032,7 @@ define @test_vloxseg2_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -3062,7 +3062,7 @@ define @test_vloxseg2_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3092,7 +3092,7 @@ define @test_vloxseg2_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -3122,7 +3122,7 @@ define @test_vloxseg2_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3152,7 +3152,7 @@ define @test_vloxseg3_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -3183,7 +3183,7 @@ define @test_vloxseg3_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3215,7 +3215,7 @@ define @test_vloxseg3_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg3ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -3246,7 +3246,7 @@ define @test_vloxseg3_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3278,7 +3278,7 @@ define @test_vloxseg4_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -3311,7 +3311,7 @@ define @test_vloxseg4_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3344,7 +3344,7 @@ define @test_vloxseg4_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -3376,7 +3376,7 @@ define @test_vloxseg4_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3409,7 +3409,7 @@ define @test_vloxseg5_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -3443,7 +3443,7 @@ define @test_vloxseg5_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3477,7 +3477,7 @@ define @test_vloxseg5_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg5ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -3510,7 +3510,7 @@ define @test_vloxseg5_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3544,7 +3544,7 @@ define @test_vloxseg6_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -3579,7 +3579,7 @@ define @test_vloxseg6_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3614,7 +3614,7 @@ define @test_vloxseg6_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg6ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -3649,7 +3649,7 @@ define @test_vloxseg6_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3684,7 +3684,7 @@ define @test_vloxseg7_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -3720,7 +3720,7 @@ define @test_vloxseg7_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3756,7 +3756,7 @@ define @test_vloxseg7_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg7ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -3792,7 +3792,7 @@ define @test_vloxseg7_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3828,7 +3828,7 @@ define @test_vloxseg8_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -3865,7 +3865,7 @@ define @test_vloxseg8_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3902,7 +3902,7 @@ define @test_vloxseg8_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -3939,7 +3939,7 @@ define @test_vloxseg8_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3976,7 +3976,7 @@ define @test_vloxseg2_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg2ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4006,7 +4006,7 @@ define @test_vloxseg2_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4036,7 +4036,7 @@ define @test_vloxseg2_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4066,7 +4066,7 @@ define @test_vloxseg2_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4096,7 +4096,7 @@ define @test_vloxseg3_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg3ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4128,7 +4128,7 @@ define @test_vloxseg3_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4160,7 +4160,7 @@ define @test_vloxseg3_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4192,7 +4192,7 @@ define @test_vloxseg3_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4224,7 +4224,7 @@ define @test_vloxseg4_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg4ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4257,7 +4257,7 @@ define @test_vloxseg4_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4290,7 +4290,7 @@ define @test_vloxseg4_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4323,7 +4323,7 @@ define @test_vloxseg4_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4356,7 +4356,7 @@ define @test_vloxseg5_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg5ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4390,7 +4390,7 @@ define @test_vloxseg5_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4424,7 +4424,7 @@ define @test_vloxseg5_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4458,7 +4458,7 @@ define @test_vloxseg5_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4492,7 +4492,7 @@ define @test_vloxseg6_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg6ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4527,7 +4527,7 @@ define @test_vloxseg6_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4562,7 +4562,7 @@ define @test_vloxseg6_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4597,7 +4597,7 @@ define @test_vloxseg6_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4632,7 +4632,7 @@ define @test_vloxseg7_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg7ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4668,7 +4668,7 @@ define @test_vloxseg7_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4704,7 +4704,7 @@ define @test_vloxseg7_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4740,7 +4740,7 @@ define @test_vloxseg7_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4776,7 +4776,7 @@ define @test_vloxseg8_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4813,7 +4813,7 @@ define @test_vloxseg8_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4850,7 +4850,7 @@ define @test_vloxseg8_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4887,7 +4887,7 @@ define @test_vloxseg8_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4924,7 +4924,7 @@ define @test_vloxseg2_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4954,7 +4954,7 @@ define @test_vloxseg2_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4984,7 +4984,7 @@ define @test_vloxseg2_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5014,7 +5014,7 @@ define @test_vloxseg2_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg2ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -5044,7 +5044,7 @@ define @test_vloxseg3_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5076,7 +5076,7 @@ define @test_vloxseg3_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5108,7 +5108,7 @@ define @test_vloxseg3_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5140,7 +5140,7 @@ define @test_vloxseg3_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -5171,7 +5171,7 @@ define @test_vloxseg4_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5204,7 +5204,7 @@ define @test_vloxseg4_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5237,7 +5237,7 @@ define @test_vloxseg4_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5270,7 +5270,7 @@ define @test_vloxseg4_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -5303,7 +5303,7 @@ define @test_vloxseg5_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5337,7 +5337,7 @@ define @test_vloxseg5_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5371,7 +5371,7 @@ define @test_vloxseg5_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5405,7 +5405,7 @@ define @test_vloxseg5_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -5439,7 +5439,7 @@ define @test_vloxseg6_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5474,7 +5474,7 @@ define @test_vloxseg6_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5509,7 +5509,7 @@ define @test_vloxseg6_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5544,7 +5544,7 @@ define @test_vloxseg6_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -5579,7 +5579,7 @@ define @test_vloxseg7_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5615,7 +5615,7 @@ define @test_vloxseg7_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5651,7 +5651,7 @@ define @test_vloxseg7_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5687,7 +5687,7 @@ define @test_vloxseg7_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -5723,7 +5723,7 @@ define @test_vloxseg8_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5760,7 +5760,7 @@ define @test_vloxseg8_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5797,7 +5797,7 @@ define @test_vloxseg8_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5834,7 +5834,7 @@ define @test_vloxseg8_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -5871,7 +5871,7 @@ define @test_vloxseg2_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -5901,7 +5901,7 @@ define @test_vloxseg2_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5931,7 +5931,7 @@ define @test_vloxseg2_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxseg2ei64.v v16, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret @@ -5961,7 +5961,7 @@ define @test_vloxseg2_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -5991,7 +5991,7 @@ define @test_vloxseg3_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -6022,7 +6022,7 @@ define @test_vloxseg3_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6054,7 +6054,7 @@ define @test_vloxseg3_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxseg3ei64.v v16, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret @@ -6085,7 +6085,7 @@ define @test_vloxseg3_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -6116,7 +6116,7 @@ define @test_vloxseg4_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -6149,7 +6149,7 @@ define @test_vloxseg4_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6182,7 +6182,7 @@ define @test_vloxseg4_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxseg4ei64.v v16, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret @@ -6214,7 +6214,7 @@ define @test_vloxseg4_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -6246,7 +6246,7 @@ define @test_vloxseg5_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -6280,7 +6280,7 @@ define @test_vloxseg5_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6314,7 +6314,7 @@ define @test_vloxseg5_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxseg5ei64.v v16, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret @@ -6347,7 +6347,7 @@ define @test_vloxseg5_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxseg5ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -6380,7 +6380,7 @@ define @test_vloxseg6_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -6415,7 +6415,7 @@ define @test_vloxseg6_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6450,7 +6450,7 @@ define @test_vloxseg6_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxseg6ei64.v v16, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret @@ -6484,7 +6484,7 @@ define @test_vloxseg6_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxseg6ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -6519,7 +6519,7 @@ define @test_vloxseg7_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -6555,7 +6555,7 @@ define @test_vloxseg7_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6591,7 +6591,7 @@ define @test_vloxseg7_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxseg7ei64.v v16, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret @@ -6626,7 +6626,7 @@ define @test_vloxseg7_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxseg7ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -6662,7 +6662,7 @@ define @test_vloxseg8_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -6699,7 +6699,7 @@ define @test_vloxseg8_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6736,7 +6736,7 @@ define @test_vloxseg8_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret @@ -6772,7 +6772,7 @@ define @test_vloxseg8_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -6809,7 +6809,7 @@ define @test_vloxseg2_nxv4i64_nxv4i32(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -6839,7 +6839,7 @@ define @test_vloxseg2_nxv4i64_nxv4i8(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i64_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -6869,7 +6869,7 @@ define @test_vloxseg2_nxv4i64_nxv4i64(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -6899,7 +6899,7 @@ define @test_vloxseg2_nxv4i64_nxv4i16(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i64_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -6929,7 +6929,7 @@ define @test_vloxseg2_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -6959,7 +6959,7 @@ define @test_vloxseg2_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6989,7 +6989,7 @@ define @test_vloxseg2_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -7019,7 +7019,7 @@ define @test_vloxseg2_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7049,7 +7049,7 @@ define @test_vloxseg3_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -7080,7 +7080,7 @@ define @test_vloxseg3_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7112,7 +7112,7 @@ define @test_vloxseg3_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg3ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -7143,7 +7143,7 @@ define @test_vloxseg3_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7175,7 +7175,7 @@ define @test_vloxseg4_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -7208,7 +7208,7 @@ define @test_vloxseg4_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7241,7 +7241,7 @@ define @test_vloxseg4_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -7273,7 +7273,7 @@ define @test_vloxseg4_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7306,7 +7306,7 @@ define @test_vloxseg5_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -7340,7 +7340,7 @@ define @test_vloxseg5_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7374,7 +7374,7 @@ define @test_vloxseg5_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg5ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -7407,7 +7407,7 @@ define @test_vloxseg5_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7441,7 +7441,7 @@ define @test_vloxseg6_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -7476,7 +7476,7 @@ define @test_vloxseg6_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7511,7 +7511,7 @@ define @test_vloxseg6_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg6ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -7546,7 +7546,7 @@ define @test_vloxseg6_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7581,7 +7581,7 @@ define @test_vloxseg7_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -7617,7 +7617,7 @@ define @test_vloxseg7_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7653,7 +7653,7 @@ define @test_vloxseg7_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg7ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -7689,7 +7689,7 @@ define @test_vloxseg7_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7725,7 +7725,7 @@ define @test_vloxseg8_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -7762,7 +7762,7 @@ define @test_vloxseg8_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7799,7 +7799,7 @@ define @test_vloxseg8_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -7836,7 +7836,7 @@ define @test_vloxseg8_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7873,7 +7873,7 @@ define @test_vloxseg2_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxseg2ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7903,7 +7903,7 @@ define @test_vloxseg2_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7933,7 +7933,7 @@ define @test_vloxseg2_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7963,7 +7963,7 @@ define @test_vloxseg2_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7993,7 +7993,7 @@ define @test_vloxseg3_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxseg3ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8025,7 +8025,7 @@ define @test_vloxseg3_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8057,7 +8057,7 @@ define @test_vloxseg3_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8089,7 +8089,7 @@ define @test_vloxseg3_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8121,7 +8121,7 @@ define @test_vloxseg4_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxseg4ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8154,7 +8154,7 @@ define @test_vloxseg4_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8187,7 +8187,7 @@ define @test_vloxseg4_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8220,7 +8220,7 @@ define @test_vloxseg4_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8253,7 +8253,7 @@ define @test_vloxseg5_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxseg5ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8287,7 +8287,7 @@ define @test_vloxseg5_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8321,7 +8321,7 @@ define @test_vloxseg5_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8355,7 +8355,7 @@ define @test_vloxseg5_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8389,7 +8389,7 @@ define @test_vloxseg6_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxseg6ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8424,7 +8424,7 @@ define @test_vloxseg6_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8459,7 +8459,7 @@ define @test_vloxseg6_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8494,7 +8494,7 @@ define @test_vloxseg6_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8529,7 +8529,7 @@ define @test_vloxseg7_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxseg7ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8565,7 +8565,7 @@ define @test_vloxseg7_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8601,7 +8601,7 @@ define @test_vloxseg7_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8637,7 +8637,7 @@ define @test_vloxseg7_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8673,7 +8673,7 @@ define @test_vloxseg8_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8710,7 +8710,7 @@ define @test_vloxseg8_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8747,7 +8747,7 @@ define @test_vloxseg8_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8784,7 +8784,7 @@ define @test_vloxseg8_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8821,7 +8821,7 @@ define @test_vloxseg2_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8851,7 +8851,7 @@ define @test_vloxseg2_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8881,7 +8881,7 @@ define @test_vloxseg2_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8911,7 +8911,7 @@ define @test_vloxseg2_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg2ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -8941,7 +8941,7 @@ define @test_vloxseg3_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8973,7 +8973,7 @@ define @test_vloxseg3_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9005,7 +9005,7 @@ define @test_vloxseg3_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9037,7 +9037,7 @@ define @test_vloxseg3_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -9068,7 +9068,7 @@ define @test_vloxseg4_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9101,7 +9101,7 @@ define @test_vloxseg4_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9134,7 +9134,7 @@ define @test_vloxseg4_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9167,7 +9167,7 @@ define @test_vloxseg4_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -9200,7 +9200,7 @@ define @test_vloxseg5_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9234,7 +9234,7 @@ define @test_vloxseg5_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9268,7 +9268,7 @@ define @test_vloxseg5_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9302,7 +9302,7 @@ define @test_vloxseg5_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -9336,7 +9336,7 @@ define @test_vloxseg6_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9371,7 +9371,7 @@ define @test_vloxseg6_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9406,7 +9406,7 @@ define @test_vloxseg6_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9441,7 +9441,7 @@ define @test_vloxseg6_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -9476,7 +9476,7 @@ define @test_vloxseg7_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9512,7 +9512,7 @@ define @test_vloxseg7_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9548,7 +9548,7 @@ define @test_vloxseg7_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9584,7 +9584,7 @@ define @test_vloxseg7_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -9620,7 +9620,7 @@ define @test_vloxseg8_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9657,7 +9657,7 @@ define @test_vloxseg8_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9694,7 +9694,7 @@ define @test_vloxseg8_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9731,7 +9731,7 @@ define @test_vloxseg8_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -9768,7 +9768,7 @@ define @test_vloxseg2_nxv8i32_nxv8i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -9798,7 +9798,7 @@ define @test_vloxseg2_nxv8i32_nxv8i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -9828,7 +9828,7 @@ define @test_vloxseg2_nxv8i32_nxv8i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vloxseg2ei64.v v16, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret @@ -9858,7 +9858,7 @@ define @test_vloxseg2_nxv8i32_nxv8i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -9888,7 +9888,7 @@ define @test_vloxseg2_nxv32i8_nxv32i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v16, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret @@ -9918,7 +9918,7 @@ define @test_vloxseg2_nxv32i8_nxv32i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -9948,7 +9948,7 @@ define @test_vloxseg2_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9978,7 +9978,7 @@ define @test_vloxseg2_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10008,7 +10008,7 @@ define @test_vloxseg2_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10038,7 +10038,7 @@ define @test_vloxseg2_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg2ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -10068,7 +10068,7 @@ define @test_vloxseg3_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10100,7 +10100,7 @@ define @test_vloxseg3_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10132,7 +10132,7 @@ define @test_vloxseg3_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10164,7 +10164,7 @@ define @test_vloxseg3_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -10195,7 +10195,7 @@ define @test_vloxseg4_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10228,7 +10228,7 @@ define @test_vloxseg4_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10261,7 +10261,7 @@ define @test_vloxseg4_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10294,7 +10294,7 @@ define @test_vloxseg4_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -10327,7 +10327,7 @@ define @test_vloxseg5_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10361,7 +10361,7 @@ define @test_vloxseg5_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10395,7 +10395,7 @@ define @test_vloxseg5_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10429,7 +10429,7 @@ define @test_vloxseg5_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -10463,7 +10463,7 @@ define @test_vloxseg6_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10498,7 +10498,7 @@ define @test_vloxseg6_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10533,7 +10533,7 @@ define @test_vloxseg6_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10568,7 +10568,7 @@ define @test_vloxseg6_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -10603,7 +10603,7 @@ define @test_vloxseg7_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10639,7 +10639,7 @@ define @test_vloxseg7_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10675,7 +10675,7 @@ define @test_vloxseg7_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10711,7 +10711,7 @@ define @test_vloxseg7_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -10747,7 +10747,7 @@ define @test_vloxseg8_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10784,7 +10784,7 @@ define @test_vloxseg8_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10821,7 +10821,7 @@ define @test_vloxseg8_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10858,7 +10858,7 @@ define @test_vloxseg8_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -10895,7 +10895,7 @@ define @test_vloxseg2_nxv2i64_nxv2i32(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -10925,7 +10925,7 @@ define @test_vloxseg2_nxv2i64_nxv2i8(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -10955,7 +10955,7 @@ define @test_vloxseg2_nxv2i64_nxv2i16(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -10985,7 +10985,7 @@ define @test_vloxseg2_nxv2i64_nxv2i64(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vloxseg2ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -11015,7 +11015,7 @@ define @test_vloxseg3_nxv2i64_nxv2i32(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -11047,7 +11047,7 @@ define @test_vloxseg3_nxv2i64_nxv2i8(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -11079,7 +11079,7 @@ define @test_vloxseg3_nxv2i64_nxv2i16(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -11111,7 +11111,7 @@ define @test_vloxseg3_nxv2i64_nxv2i64(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -11143,7 +11143,7 @@ define @test_vloxseg4_nxv2i64_nxv2i32(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -11176,7 +11176,7 @@ define @test_vloxseg4_nxv2i64_nxv2i8(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -11209,7 +11209,7 @@ define @test_vloxseg4_nxv2i64_nxv2i16(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -11242,7 +11242,7 @@ define @test_vloxseg4_nxv2i64_nxv2i64(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -11275,7 +11275,7 @@ define @test_vloxseg2_nxv16f16_nxv16i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -11305,7 +11305,7 @@ define @test_vloxseg2_nxv16f16_nxv16i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -11335,7 +11335,7 @@ define @test_vloxseg2_nxv16f16_nxv16i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret @@ -11365,7 +11365,7 @@ define @test_vloxseg2_nxv4f64_nxv4i32(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -11395,7 +11395,7 @@ define @test_vloxseg2_nxv4f64_nxv4i8(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -11425,7 +11425,7 @@ define @test_vloxseg2_nxv4f64_nxv4i64(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -11455,7 +11455,7 @@ define @test_vloxseg2_nxv4f64_nxv4i16(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -11485,7 +11485,7 @@ define @test_vloxseg2_nxv1f64_nxv1i64(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg2ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11515,7 +11515,7 @@ define @test_vloxseg2_nxv1f64_nxv1i32(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11545,7 +11545,7 @@ define @test_vloxseg2_nxv1f64_nxv1i16(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11575,7 +11575,7 @@ define @test_vloxseg2_nxv1f64_nxv1i8(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11605,7 +11605,7 @@ define @test_vloxseg3_nxv1f64_nxv1i64(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg3ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11637,7 +11637,7 @@ define @test_vloxseg3_nxv1f64_nxv1i32(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11669,7 +11669,7 @@ define @test_vloxseg3_nxv1f64_nxv1i16(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11701,7 +11701,7 @@ define @test_vloxseg3_nxv1f64_nxv1i8(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11733,7 +11733,7 @@ define @test_vloxseg4_nxv1f64_nxv1i64(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg4ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11766,7 +11766,7 @@ define @test_vloxseg4_nxv1f64_nxv1i32(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11799,7 +11799,7 @@ define @test_vloxseg4_nxv1f64_nxv1i16(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11832,7 +11832,7 @@ define @test_vloxseg4_nxv1f64_nxv1i8(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11865,7 +11865,7 @@ define @test_vloxseg5_nxv1f64_nxv1i64(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg5ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11899,7 +11899,7 @@ define @test_vloxseg5_nxv1f64_nxv1i32(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11933,7 +11933,7 @@ define @test_vloxseg5_nxv1f64_nxv1i16(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11967,7 +11967,7 @@ define @test_vloxseg5_nxv1f64_nxv1i8(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12001,7 +12001,7 @@ define @test_vloxseg6_nxv1f64_nxv1i64(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg6ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12036,7 +12036,7 @@ define @test_vloxseg6_nxv1f64_nxv1i32(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12071,7 +12071,7 @@ define @test_vloxseg6_nxv1f64_nxv1i16(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12106,7 +12106,7 @@ define @test_vloxseg6_nxv1f64_nxv1i8(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12141,7 +12141,7 @@ define @test_vloxseg7_nxv1f64_nxv1i64(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg7ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12177,7 +12177,7 @@ define @test_vloxseg7_nxv1f64_nxv1i32(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12213,7 +12213,7 @@ define @test_vloxseg7_nxv1f64_nxv1i16(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12249,7 +12249,7 @@ define @test_vloxseg7_nxv1f64_nxv1i8(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12285,7 +12285,7 @@ define @test_vloxseg8_nxv1f64_nxv1i64(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12322,7 +12322,7 @@ define @test_vloxseg8_nxv1f64_nxv1i32(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12359,7 +12359,7 @@ define @test_vloxseg8_nxv1f64_nxv1i16(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12396,7 +12396,7 @@ define @test_vloxseg8_nxv1f64_nxv1i8(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12433,7 +12433,7 @@ define @test_vloxseg2_nxv2f32_nxv2i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12463,7 +12463,7 @@ define @test_vloxseg2_nxv2f32_nxv2i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12493,7 +12493,7 @@ define @test_vloxseg2_nxv2f32_nxv2i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12523,7 +12523,7 @@ define @test_vloxseg2_nxv2f32_nxv2i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg2ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -12553,7 +12553,7 @@ define @test_vloxseg3_nxv2f32_nxv2i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12585,7 +12585,7 @@ define @test_vloxseg3_nxv2f32_nxv2i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12617,7 +12617,7 @@ define @test_vloxseg3_nxv2f32_nxv2i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12649,7 +12649,7 @@ define @test_vloxseg3_nxv2f32_nxv2i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -12680,7 +12680,7 @@ define @test_vloxseg4_nxv2f32_nxv2i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12713,7 +12713,7 @@ define @test_vloxseg4_nxv2f32_nxv2i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12746,7 +12746,7 @@ define @test_vloxseg4_nxv2f32_nxv2i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12779,7 +12779,7 @@ define @test_vloxseg4_nxv2f32_nxv2i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -12812,7 +12812,7 @@ define @test_vloxseg5_nxv2f32_nxv2i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12846,7 +12846,7 @@ define @test_vloxseg5_nxv2f32_nxv2i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12880,7 +12880,7 @@ define @test_vloxseg5_nxv2f32_nxv2i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12914,7 +12914,7 @@ define @test_vloxseg5_nxv2f32_nxv2i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -12948,7 +12948,7 @@ define @test_vloxseg6_nxv2f32_nxv2i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12983,7 +12983,7 @@ define @test_vloxseg6_nxv2f32_nxv2i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13018,7 +13018,7 @@ define @test_vloxseg6_nxv2f32_nxv2i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13053,7 +13053,7 @@ define @test_vloxseg6_nxv2f32_nxv2i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -13088,7 +13088,7 @@ define @test_vloxseg7_nxv2f32_nxv2i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13124,7 +13124,7 @@ define @test_vloxseg7_nxv2f32_nxv2i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13160,7 +13160,7 @@ define @test_vloxseg7_nxv2f32_nxv2i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13196,7 +13196,7 @@ define @test_vloxseg7_nxv2f32_nxv2i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -13232,7 +13232,7 @@ define @test_vloxseg8_nxv2f32_nxv2i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13269,7 +13269,7 @@ define @test_vloxseg8_nxv2f32_nxv2i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13306,7 +13306,7 @@ define @test_vloxseg8_nxv2f32_nxv2i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13343,7 +13343,7 @@ define @test_vloxseg8_nxv2f32_nxv2i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -13380,7 +13380,7 @@ define @test_vloxseg2_nxv1f16_nxv1i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg2ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13410,7 +13410,7 @@ define @test_vloxseg2_nxv1f16_nxv1i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13440,7 +13440,7 @@ define @test_vloxseg2_nxv1f16_nxv1i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13470,7 +13470,7 @@ define @test_vloxseg2_nxv1f16_nxv1i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13500,7 +13500,7 @@ define @test_vloxseg3_nxv1f16_nxv1i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg3ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13532,7 +13532,7 @@ define @test_vloxseg3_nxv1f16_nxv1i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13564,7 +13564,7 @@ define @test_vloxseg3_nxv1f16_nxv1i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13596,7 +13596,7 @@ define @test_vloxseg3_nxv1f16_nxv1i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13628,7 +13628,7 @@ define @test_vloxseg4_nxv1f16_nxv1i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg4ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13661,7 +13661,7 @@ define @test_vloxseg4_nxv1f16_nxv1i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13694,7 +13694,7 @@ define @test_vloxseg4_nxv1f16_nxv1i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13727,7 +13727,7 @@ define @test_vloxseg4_nxv1f16_nxv1i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13760,7 +13760,7 @@ define @test_vloxseg5_nxv1f16_nxv1i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg5ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13794,7 +13794,7 @@ define @test_vloxseg5_nxv1f16_nxv1i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13828,7 +13828,7 @@ define @test_vloxseg5_nxv1f16_nxv1i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13862,7 +13862,7 @@ define @test_vloxseg5_nxv1f16_nxv1i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13896,7 +13896,7 @@ define @test_vloxseg6_nxv1f16_nxv1i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg6ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13931,7 +13931,7 @@ define @test_vloxseg6_nxv1f16_nxv1i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13966,7 +13966,7 @@ define @test_vloxseg6_nxv1f16_nxv1i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14001,7 +14001,7 @@ define @test_vloxseg6_nxv1f16_nxv1i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14036,7 +14036,7 @@ define @test_vloxseg7_nxv1f16_nxv1i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg7ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14072,7 +14072,7 @@ define @test_vloxseg7_nxv1f16_nxv1i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14108,7 +14108,7 @@ define @test_vloxseg7_nxv1f16_nxv1i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14144,7 +14144,7 @@ define @test_vloxseg7_nxv1f16_nxv1i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14180,7 +14180,7 @@ define @test_vloxseg8_nxv1f16_nxv1i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14217,7 +14217,7 @@ define @test_vloxseg8_nxv1f16_nxv1i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14254,7 +14254,7 @@ define @test_vloxseg8_nxv1f16_nxv1i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14291,7 +14291,7 @@ define @test_vloxseg8_nxv1f16_nxv1i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14328,7 +14328,7 @@ define @test_vloxseg2_nxv1f32_nxv1i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg2ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14358,7 +14358,7 @@ define @test_vloxseg2_nxv1f32_nxv1i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14388,7 +14388,7 @@ define @test_vloxseg2_nxv1f32_nxv1i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14418,7 +14418,7 @@ define @test_vloxseg2_nxv1f32_nxv1i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14448,7 +14448,7 @@ define @test_vloxseg3_nxv1f32_nxv1i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg3ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14480,7 +14480,7 @@ define @test_vloxseg3_nxv1f32_nxv1i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14512,7 +14512,7 @@ define @test_vloxseg3_nxv1f32_nxv1i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14544,7 +14544,7 @@ define @test_vloxseg3_nxv1f32_nxv1i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14576,7 +14576,7 @@ define @test_vloxseg4_nxv1f32_nxv1i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg4ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14609,7 +14609,7 @@ define @test_vloxseg4_nxv1f32_nxv1i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14642,7 +14642,7 @@ define @test_vloxseg4_nxv1f32_nxv1i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14675,7 +14675,7 @@ define @test_vloxseg4_nxv1f32_nxv1i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14708,7 +14708,7 @@ define @test_vloxseg5_nxv1f32_nxv1i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg5ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14742,7 +14742,7 @@ define @test_vloxseg5_nxv1f32_nxv1i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14776,7 +14776,7 @@ define @test_vloxseg5_nxv1f32_nxv1i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14810,7 +14810,7 @@ define @test_vloxseg5_nxv1f32_nxv1i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14844,7 +14844,7 @@ define @test_vloxseg6_nxv1f32_nxv1i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg6ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14879,7 +14879,7 @@ define @test_vloxseg6_nxv1f32_nxv1i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14914,7 +14914,7 @@ define @test_vloxseg6_nxv1f32_nxv1i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14949,7 +14949,7 @@ define @test_vloxseg6_nxv1f32_nxv1i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14984,7 +14984,7 @@ define @test_vloxseg7_nxv1f32_nxv1i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg7ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -15020,7 +15020,7 @@ define @test_vloxseg7_nxv1f32_nxv1i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -15056,7 +15056,7 @@ define @test_vloxseg7_nxv1f32_nxv1i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -15092,7 +15092,7 @@ define @test_vloxseg7_nxv1f32_nxv1i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -15128,7 +15128,7 @@ define @test_vloxseg8_nxv1f32_nxv1i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg8ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -15165,7 +15165,7 @@ define @test_vloxseg8_nxv1f32_nxv1i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -15202,7 +15202,7 @@ define @test_vloxseg8_nxv1f32_nxv1i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -15239,7 +15239,7 @@ define @test_vloxseg8_nxv1f32_nxv1i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -15276,7 +15276,7 @@ define @test_vloxseg2_nxv8f16_nxv8i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -15306,7 +15306,7 @@ define @test_vloxseg2_nxv8f16_nxv8i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -15336,7 +15336,7 @@ define @test_vloxseg2_nxv8f16_nxv8i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vloxseg2ei64.v v16, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret @@ -15366,7 +15366,7 @@ define @test_vloxseg2_nxv8f16_nxv8i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret @@ -15396,7 +15396,7 @@ define @test_vloxseg3_nxv8f16_nxv8i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -15428,7 +15428,7 @@ define @test_vloxseg3_nxv8f16_nxv8i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -15460,7 +15460,7 @@ define @test_vloxseg3_nxv8f16_nxv8i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vloxseg3ei64.v v16, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret @@ -15491,7 +15491,7 @@ define @test_vloxseg3_nxv8f16_nxv8i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret @@ -15522,7 +15522,7 @@ define @test_vloxseg4_nxv8f16_nxv8i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -15555,7 +15555,7 @@ define @test_vloxseg4_nxv8f16_nxv8i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -15588,7 +15588,7 @@ define @test_vloxseg4_nxv8f16_nxv8i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vloxseg4ei64.v v16, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret @@ -15620,7 +15620,7 @@ define @test_vloxseg4_nxv8f16_nxv8i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret @@ -15653,7 +15653,7 @@ define @test_vloxseg2_nxv8f32_nxv8i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -15683,7 +15683,7 @@ define @test_vloxseg2_nxv8f32_nxv8i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -15713,7 +15713,7 @@ define @test_vloxseg2_nxv8f32_nxv8i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vloxseg2ei64.v v16, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret @@ -15743,7 +15743,7 @@ define @test_vloxseg2_nxv8f32_nxv8i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -15773,7 +15773,7 @@ define @test_vloxseg2_nxv2f64_nxv2i32(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -15803,7 +15803,7 @@ define @test_vloxseg2_nxv2f64_nxv2i8(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -15833,7 +15833,7 @@ define @test_vloxseg2_nxv2f64_nxv2i16(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -15863,7 +15863,7 @@ define @test_vloxseg2_nxv2f64_nxv2i64(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vloxseg2ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -15893,7 +15893,7 @@ define @test_vloxseg3_nxv2f64_nxv2i32(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -15925,7 +15925,7 @@ define @test_vloxseg3_nxv2f64_nxv2i8(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -15957,7 +15957,7 @@ define @test_vloxseg3_nxv2f64_nxv2i16(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -15989,7 +15989,7 @@ define @test_vloxseg3_nxv2f64_nxv2i64(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -16021,7 +16021,7 @@ define @test_vloxseg4_nxv2f64_nxv2i32(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -16054,7 +16054,7 @@ define @test_vloxseg4_nxv2f64_nxv2i8(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -16087,7 +16087,7 @@ define @test_vloxseg4_nxv2f64_nxv2i16(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -16120,7 +16120,7 @@ define @test_vloxseg4_nxv2f64_nxv2i64(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -16153,7 +16153,7 @@ define @test_vloxseg2_nxv4f16_nxv4i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -16183,7 +16183,7 @@ define @test_vloxseg2_nxv4f16_nxv4i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -16213,7 +16213,7 @@ define @test_vloxseg2_nxv4f16_nxv4i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -16243,7 +16243,7 @@ define @test_vloxseg2_nxv4f16_nxv4i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -16273,7 +16273,7 @@ define @test_vloxseg3_nxv4f16_nxv4i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -16304,7 +16304,7 @@ define @test_vloxseg3_nxv4f16_nxv4i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -16336,7 +16336,7 @@ define @test_vloxseg3_nxv4f16_nxv4i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg3ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -16367,7 +16367,7 @@ define @test_vloxseg3_nxv4f16_nxv4i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -16399,7 +16399,7 @@ define @test_vloxseg4_nxv4f16_nxv4i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -16432,7 +16432,7 @@ define @test_vloxseg4_nxv4f16_nxv4i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -16465,7 +16465,7 @@ define @test_vloxseg4_nxv4f16_nxv4i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -16497,7 +16497,7 @@ define @test_vloxseg4_nxv4f16_nxv4i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -16530,7 +16530,7 @@ define @test_vloxseg5_nxv4f16_nxv4i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -16564,7 +16564,7 @@ define @test_vloxseg5_nxv4f16_nxv4i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -16598,7 +16598,7 @@ define @test_vloxseg5_nxv4f16_nxv4i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg5ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -16631,7 +16631,7 @@ define @test_vloxseg5_nxv4f16_nxv4i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -16665,7 +16665,7 @@ define @test_vloxseg6_nxv4f16_nxv4i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -16700,7 +16700,7 @@ define @test_vloxseg6_nxv4f16_nxv4i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -16735,7 +16735,7 @@ define @test_vloxseg6_nxv4f16_nxv4i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg6ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -16770,7 +16770,7 @@ define @test_vloxseg6_nxv4f16_nxv4i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -16805,7 +16805,7 @@ define @test_vloxseg7_nxv4f16_nxv4i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -16841,7 +16841,7 @@ define @test_vloxseg7_nxv4f16_nxv4i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -16877,7 +16877,7 @@ define @test_vloxseg7_nxv4f16_nxv4i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg7ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -16913,7 +16913,7 @@ define @test_vloxseg7_nxv4f16_nxv4i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -16949,7 +16949,7 @@ define @test_vloxseg8_nxv4f16_nxv4i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -16986,7 +16986,7 @@ define @test_vloxseg8_nxv4f16_nxv4i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -17023,7 +17023,7 @@ define @test_vloxseg8_nxv4f16_nxv4i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -17060,7 +17060,7 @@ define @test_vloxseg8_nxv4f16_nxv4i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -17097,7 +17097,7 @@ define @test_vloxseg2_nxv2f16_nxv2i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -17127,7 +17127,7 @@ define @test_vloxseg2_nxv2f16_nxv2i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -17157,7 +17157,7 @@ define @test_vloxseg2_nxv2f16_nxv2i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -17187,7 +17187,7 @@ define @test_vloxseg2_nxv2f16_nxv2i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg2ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -17217,7 +17217,7 @@ define @test_vloxseg3_nxv2f16_nxv2i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -17249,7 +17249,7 @@ define @test_vloxseg3_nxv2f16_nxv2i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -17281,7 +17281,7 @@ define @test_vloxseg3_nxv2f16_nxv2i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -17313,7 +17313,7 @@ define @test_vloxseg3_nxv2f16_nxv2i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -17344,7 +17344,7 @@ define @test_vloxseg4_nxv2f16_nxv2i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -17377,7 +17377,7 @@ define @test_vloxseg4_nxv2f16_nxv2i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -17410,7 +17410,7 @@ define @test_vloxseg4_nxv2f16_nxv2i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -17443,7 +17443,7 @@ define @test_vloxseg4_nxv2f16_nxv2i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -17476,7 +17476,7 @@ define @test_vloxseg5_nxv2f16_nxv2i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -17510,7 +17510,7 @@ define @test_vloxseg5_nxv2f16_nxv2i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -17544,7 +17544,7 @@ define @test_vloxseg5_nxv2f16_nxv2i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -17578,7 +17578,7 @@ define @test_vloxseg5_nxv2f16_nxv2i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg5_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -17612,7 +17612,7 @@ define @test_vloxseg6_nxv2f16_nxv2i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -17647,7 +17647,7 @@ define @test_vloxseg6_nxv2f16_nxv2i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -17682,7 +17682,7 @@ define @test_vloxseg6_nxv2f16_nxv2i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -17717,7 +17717,7 @@ define @test_vloxseg6_nxv2f16_nxv2i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg6_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -17752,7 +17752,7 @@ define @test_vloxseg7_nxv2f16_nxv2i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -17788,7 +17788,7 @@ define @test_vloxseg7_nxv2f16_nxv2i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -17824,7 +17824,7 @@ define @test_vloxseg7_nxv2f16_nxv2i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -17860,7 +17860,7 @@ define @test_vloxseg7_nxv2f16_nxv2i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg7_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -17896,7 +17896,7 @@ define @test_vloxseg8_nxv2f16_nxv2i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -17933,7 +17933,7 @@ define @test_vloxseg8_nxv2f16_nxv2i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -17970,7 +17970,7 @@ define @test_vloxseg8_nxv2f16_nxv2i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -18007,7 +18007,7 @@ define @test_vloxseg8_nxv2f16_nxv2i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg8_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -18044,7 +18044,7 @@ define @test_vloxseg2_nxv4f32_nxv4i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vloxseg2ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -18074,7 +18074,7 @@ define @test_vloxseg2_nxv4f32_nxv4i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vloxseg2ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -18104,7 +18104,7 @@ define @test_vloxseg2_nxv4f32_nxv4i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vloxseg2ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret @@ -18134,7 +18134,7 @@ define @test_vloxseg2_nxv4f32_nxv4i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg2_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vloxseg2ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -18164,7 +18164,7 @@ define @test_vloxseg3_nxv4f32_nxv4i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -18196,7 +18196,7 @@ define @test_vloxseg3_nxv4f32_nxv4i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -18228,7 +18228,7 @@ define @test_vloxseg3_nxv4f32_nxv4i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vloxseg3ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret @@ -18259,7 +18259,7 @@ define @test_vloxseg3_nxv4f32_nxv4i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg3_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -18291,7 +18291,7 @@ define @test_vloxseg4_nxv4f32_nxv4i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -18324,7 +18324,7 @@ define @test_vloxseg4_nxv4f32_nxv4i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -18357,7 +18357,7 @@ define @test_vloxseg4_nxv4f32_nxv4i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret @@ -18390,7 +18390,7 @@ define @test_vloxseg4_nxv4f32_nxv4i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vloxseg4_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vlse.ll b/llvm/test/CodeGen/RISCV/rvv/vlse.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlse.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlse.ll @@ -12,7 +12,7 @@ define @intrinsic_vlse_v_nxv1i64_nxv1i64(* %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -59,7 +59,7 @@ define @intrinsic_vlse_v_nxv2i64_nxv2i64(* %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlse64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -106,7 +106,7 @@ define @intrinsic_vlse_v_nxv4i64_nxv4i64(* %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -153,7 +153,7 @@ define @intrinsic_vlse_v_nxv8i64_nxv8i64(* %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vlse64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -200,7 +200,7 @@ define @intrinsic_vlse_v_nxv1f64_nxv1f64(* %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -247,7 +247,7 @@ define @intrinsic_vlse_v_nxv2f64_nxv2f64(* %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlse64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -294,7 +294,7 @@ define @intrinsic_vlse_v_nxv4f64_nxv4f64(* %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -341,7 +341,7 @@ define @intrinsic_vlse_v_nxv8f64_nxv8f64(* %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vlse64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -388,7 +388,7 @@ define @intrinsic_vlse_v_nxv1i32_nxv1i32(* %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vlse32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -435,7 +435,7 @@ define @intrinsic_vlse_v_nxv2i32_nxv2i32(* %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vlse32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -482,7 +482,7 @@ define @intrinsic_vlse_v_nxv4i32_nxv4i32(* %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vlse32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -529,7 +529,7 @@ define @intrinsic_vlse_v_nxv8i32_nxv8i32(* %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma ; CHECK-NEXT: vlse32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -576,7 +576,7 @@ define @intrinsic_vlse_v_nxv16i32_nxv16i32(* %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: vlse32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vlse_v_nxv1f32_nxv1f32(* %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vlse32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vlse_v_nxv2f32_nxv2f32(* %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vlse32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -717,7 +717,7 @@ define @intrinsic_vlse_v_nxv4f32_nxv4f32(* %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vlse32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -764,7 +764,7 @@ define @intrinsic_vlse_v_nxv8f32_nxv8f32(* %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma ; CHECK-NEXT: vlse32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -811,7 +811,7 @@ define @intrinsic_vlse_v_nxv16f32_nxv16f32(* %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: vlse32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -858,7 +858,7 @@ define @intrinsic_vlse_v_nxv1i16_nxv1i16(* %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vlse16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -905,7 +905,7 @@ define @intrinsic_vlse_v_nxv2i16_nxv2i16(* %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlse16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -952,7 +952,7 @@ define @intrinsic_vlse_v_nxv4i16_nxv4i16(* %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vlse16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -999,7 +999,7 @@ define @intrinsic_vlse_v_nxv8i16_nxv8i16(* %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-NEXT: vlse16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1046,7 +1046,7 @@ define @intrinsic_vlse_v_nxv16i16_nxv16i16(* %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vlse16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1093,7 +1093,7 @@ define @intrinsic_vlse_v_nxv32i16_nxv32i16(* %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma ; CHECK-NEXT: vlse16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1140,7 +1140,7 @@ define @intrinsic_vlse_v_nxv1f16_nxv1f16(* %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vlse16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1187,7 +1187,7 @@ define @intrinsic_vlse_v_nxv2f16_nxv2f16(* %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlse16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1234,7 +1234,7 @@ define @intrinsic_vlse_v_nxv4f16_nxv4f16(* %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vlse16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1281,7 +1281,7 @@ define @intrinsic_vlse_v_nxv8f16_nxv8f16(* %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-NEXT: vlse16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1328,7 +1328,7 @@ define @intrinsic_vlse_v_nxv16f16_nxv16f16(* %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vlse16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1375,7 +1375,7 @@ define @intrinsic_vlse_v_nxv32f16_nxv32f16(* %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma ; CHECK-NEXT: vlse16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1422,7 +1422,7 @@ define @intrinsic_vlse_v_nxv1i8_nxv1i8(* %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vlse8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1469,7 +1469,7 @@ define @intrinsic_vlse_v_nxv2i8_nxv2i8(* %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vlse8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1516,7 +1516,7 @@ define @intrinsic_vlse_v_nxv4i8_nxv4i8(* %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-NEXT: vlse8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1563,7 +1563,7 @@ define @intrinsic_vlse_v_nxv8i8_nxv8i8(* %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vlse8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1610,7 +1610,7 @@ define @intrinsic_vlse_v_nxv16i8_nxv16i8(* %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vlse8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1657,7 +1657,7 @@ define @intrinsic_vlse_v_nxv32i8_nxv32i8(* %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma ; CHECK-NEXT: vlse8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1704,7 +1704,7 @@ define @intrinsic_vlse_v_nxv64i8_nxv64i8(* %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vlse_v_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma ; CHECK-NEXT: vlse8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vlseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlseg-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlseg-rv32.ll @@ -8,7 +8,7 @@ define @test_vlseg2_nxv16i16(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vlseg2e16.v v4, (a0) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret @@ -41,7 +41,7 @@ define @test_vlseg2_nxv1i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vlseg2e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -74,7 +74,7 @@ define @test_vlseg3_nxv1i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg3_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vlseg3e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -108,7 +108,7 @@ define @test_vlseg4_nxv1i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg4_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vlseg4e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -143,7 +143,7 @@ define @test_vlseg5_nxv1i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg5_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vlseg5e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -179,7 +179,7 @@ define @test_vlseg6_nxv1i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg6_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vlseg6e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -216,7 +216,7 @@ define @test_vlseg7_nxv1i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg7_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vlseg7e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -254,7 +254,7 @@ define @test_vlseg8_nxv1i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vlseg8e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -293,7 +293,7 @@ define @test_vlseg2_nxv16i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vlseg2e8.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret @@ -326,7 +326,7 @@ define @test_vlseg3_nxv16i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg3_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vlseg3e8.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret @@ -360,7 +360,7 @@ define @test_vlseg4_nxv16i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg4_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vlseg4e8.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret @@ -395,7 +395,7 @@ define @test_vlseg2_nxv2i32(i32* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg2e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -428,7 +428,7 @@ define @test_vlseg3_nxv2i32(i32* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg3_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg3e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -462,7 +462,7 @@ define @test_vlseg4_nxv2i32(i32* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg4_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg4e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -497,7 +497,7 @@ define @test_vlseg5_nxv2i32(i32* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg5_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg5e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -533,7 +533,7 @@ define @test_vlseg6_nxv2i32(i32* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg6_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg6e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -570,7 +570,7 @@ define @test_vlseg7_nxv2i32(i32* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg7_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg7e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -608,7 +608,7 @@ define @test_vlseg8_nxv2i32(i32* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg8e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -647,7 +647,7 @@ define @test_vlseg2_nxv4i16(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg2e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -680,7 +680,7 @@ define @test_vlseg3_nxv4i16(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg3_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg3e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -714,7 +714,7 @@ define @test_vlseg4_nxv4i16(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg4_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg4e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -749,7 +749,7 @@ define @test_vlseg5_nxv4i16(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg5_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg5e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -785,7 +785,7 @@ define @test_vlseg6_nxv4i16(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg6_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg6e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -822,7 +822,7 @@ define @test_vlseg7_nxv4i16(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg7_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg7e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -860,7 +860,7 @@ define @test_vlseg8_nxv4i16(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg8e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -899,7 +899,7 @@ define @test_vlseg2_nxv1i32(i32* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg2e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -932,7 +932,7 @@ define @test_vlseg3_nxv1i32(i32* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg3_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg3e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -966,7 +966,7 @@ define @test_vlseg4_nxv1i32(i32* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg4_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg4e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -1001,7 +1001,7 @@ define @test_vlseg5_nxv1i32(i32* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg5_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg5e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -1037,7 +1037,7 @@ define @test_vlseg6_nxv1i32(i32* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg6_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg6e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -1074,7 +1074,7 @@ define @test_vlseg7_nxv1i32(i32* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg7_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg7e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -1112,7 +1112,7 @@ define @test_vlseg8_nxv1i32(i32* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg8e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -1151,7 +1151,7 @@ define @test_vlseg2_nxv8i16(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vlseg2e16.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret @@ -1184,7 +1184,7 @@ define @test_vlseg3_nxv8i16(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg3_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vlseg3e16.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret @@ -1218,7 +1218,7 @@ define @test_vlseg4_nxv8i16(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg4_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vlseg4e16.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret @@ -1253,7 +1253,7 @@ define @test_vlseg2_nxv8i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vlseg2e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -1286,7 +1286,7 @@ define @test_vlseg3_nxv8i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg3_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vlseg3e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -1320,7 +1320,7 @@ define @test_vlseg4_nxv8i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg4_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vlseg4e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -1355,7 +1355,7 @@ define @test_vlseg5_nxv8i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg5_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vlseg5e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -1391,7 +1391,7 @@ define @test_vlseg6_nxv8i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg6_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vlseg6e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -1428,7 +1428,7 @@ define @test_vlseg7_nxv8i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg7_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vlseg7e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -1466,7 +1466,7 @@ define @test_vlseg8_nxv8i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vlseg8e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -1505,7 +1505,7 @@ define @test_vlseg2_nxv8i32(i32* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vlseg2e32.v v4, (a0) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret @@ -1538,7 +1538,7 @@ define @test_vlseg2_nxv4i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vlseg2e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -1571,7 +1571,7 @@ define @test_vlseg3_nxv4i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg3_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vlseg3e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -1605,7 +1605,7 @@ define @test_vlseg4_nxv4i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg4_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vlseg4e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -1640,7 +1640,7 @@ define @test_vlseg5_nxv4i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg5_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vlseg5e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -1676,7 +1676,7 @@ define @test_vlseg6_nxv4i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg6_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vlseg6e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -1713,7 +1713,7 @@ define @test_vlseg7_nxv4i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg7_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vlseg7e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -1751,7 +1751,7 @@ define @test_vlseg8_nxv4i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vlseg8e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -1790,7 +1790,7 @@ define @test_vlseg2_nxv1i16(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg2e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -1823,7 +1823,7 @@ define @test_vlseg3_nxv1i16(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg3_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg3e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -1857,7 +1857,7 @@ define @test_vlseg4_nxv1i16(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg4_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg4e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -1892,7 +1892,7 @@ define @test_vlseg5_nxv1i16(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg5_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg5e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -1928,7 +1928,7 @@ define @test_vlseg6_nxv1i16(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg6_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg6e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -1965,7 +1965,7 @@ define @test_vlseg7_nxv1i16(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg7_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg7e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -2003,7 +2003,7 @@ define @test_vlseg8_nxv1i16(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg8e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -2042,7 +2042,7 @@ define @test_vlseg2_nxv32i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vlseg2e8.v v4, (a0) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret @@ -2075,7 +2075,7 @@ define @test_vlseg2_nxv2i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vlseg2e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -2108,7 +2108,7 @@ define @test_vlseg3_nxv2i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg3_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vlseg3e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -2142,7 +2142,7 @@ define @test_vlseg4_nxv2i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg4_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vlseg4e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -2177,7 +2177,7 @@ define @test_vlseg5_nxv2i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg5_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vlseg5e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -2213,7 +2213,7 @@ define @test_vlseg6_nxv2i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg6_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vlseg6e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -2250,7 +2250,7 @@ define @test_vlseg7_nxv2i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg7_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vlseg7e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -2288,7 +2288,7 @@ define @test_vlseg8_nxv2i8(i8* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vlseg8e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -2327,7 +2327,7 @@ define @test_vlseg2_nxv2i16(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg2e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -2360,7 +2360,7 @@ define @test_vlseg3_nxv2i16(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg3_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg3e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -2394,7 +2394,7 @@ define @test_vlseg4_nxv2i16(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg4_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg4e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -2429,7 +2429,7 @@ define @test_vlseg5_nxv2i16(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg5_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg5e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -2465,7 +2465,7 @@ define @test_vlseg6_nxv2i16(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg6_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg6e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -2502,7 +2502,7 @@ define @test_vlseg7_nxv2i16(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg7_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg7e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -2540,7 +2540,7 @@ define @test_vlseg8_nxv2i16(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg8e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -2579,7 +2579,7 @@ define @test_vlseg2_nxv4i32(i32* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vlseg2e32.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret @@ -2612,7 +2612,7 @@ define @test_vlseg3_nxv4i32(i32* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg3_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vlseg3e32.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret @@ -2646,7 +2646,7 @@ define @test_vlseg4_nxv4i32(i32* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg4_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vlseg4e32.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret @@ -2681,7 +2681,7 @@ define @test_vlseg2_nxv16f16(half* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vlseg2e16.v v4, (a0) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret @@ -2714,7 +2714,7 @@ define @test_vlseg2_nxv4f64(double* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vlseg2e64.v v4, (a0) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret @@ -2747,7 +2747,7 @@ define @test_vlseg2_nxv1f64(double* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vlseg2e64.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -2780,7 +2780,7 @@ define @test_vlseg3_nxv1f64(double* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg3_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vlseg3e64.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -2814,7 +2814,7 @@ define @test_vlseg4_nxv1f64(double* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg4_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vlseg4e64.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -2849,7 +2849,7 @@ define @test_vlseg5_nxv1f64(double* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg5_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vlseg5e64.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -2885,7 +2885,7 @@ define @test_vlseg6_nxv1f64(double* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg6_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vlseg6e64.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -2922,7 +2922,7 @@ define @test_vlseg7_nxv1f64(double* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg7_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vlseg7e64.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -2960,7 +2960,7 @@ define @test_vlseg8_nxv1f64(double* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg8_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vlseg8e64.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -2999,7 +2999,7 @@ define @test_vlseg2_nxv2f32(float* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg2e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -3032,7 +3032,7 @@ define @test_vlseg3_nxv2f32(float* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg3_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg3e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -3066,7 +3066,7 @@ define @test_vlseg4_nxv2f32(float* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg4_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg4e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -3101,7 +3101,7 @@ define @test_vlseg5_nxv2f32(float* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg5_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg5e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -3137,7 +3137,7 @@ define @test_vlseg6_nxv2f32(float* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg6_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg6e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -3174,7 +3174,7 @@ define @test_vlseg7_nxv2f32(float* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg7_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg7e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -3212,7 +3212,7 @@ define @test_vlseg8_nxv2f32(float* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg8_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg8e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -3251,7 +3251,7 @@ define @test_vlseg2_nxv1f16(half* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg2e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -3284,7 +3284,7 @@ define @test_vlseg3_nxv1f16(half* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg3_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg3e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -3318,7 +3318,7 @@ define @test_vlseg4_nxv1f16(half* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg4_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg4e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -3353,7 +3353,7 @@ define @test_vlseg5_nxv1f16(half* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg5_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg5e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -3389,7 +3389,7 @@ define @test_vlseg6_nxv1f16(half* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg6_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg6e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -3426,7 +3426,7 @@ define @test_vlseg7_nxv1f16(half* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg7_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg7e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -3464,7 +3464,7 @@ define @test_vlseg8_nxv1f16(half* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg8_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg8e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -3503,7 +3503,7 @@ define @test_vlseg2_nxv1f32(float* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg2e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -3536,7 +3536,7 @@ define @test_vlseg3_nxv1f32(float* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg3_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg3e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -3570,7 +3570,7 @@ define @test_vlseg4_nxv1f32(float* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg4_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg4e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -3605,7 +3605,7 @@ define @test_vlseg5_nxv1f32(float* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg5_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg5e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -3641,7 +3641,7 @@ define @test_vlseg6_nxv1f32(float* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg6_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg6e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -3678,7 +3678,7 @@ define @test_vlseg7_nxv1f32(float* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg7_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg7e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -3716,7 +3716,7 @@ define @test_vlseg8_nxv1f32(float* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg8_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg8e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -3755,7 +3755,7 @@ define @test_vlseg2_nxv8f16(half* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vlseg2e16.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret @@ -3788,7 +3788,7 @@ define @test_vlseg3_nxv8f16(half* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg3_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vlseg3e16.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret @@ -3822,7 +3822,7 @@ define @test_vlseg4_nxv8f16(half* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg4_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vlseg4e16.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret @@ -3857,7 +3857,7 @@ define @test_vlseg2_nxv8f32(float* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vlseg2e32.v v4, (a0) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret @@ -3890,7 +3890,7 @@ define @test_vlseg2_nxv2f64(double* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vlseg2e64.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret @@ -3923,7 +3923,7 @@ define @test_vlseg3_nxv2f64(double* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg3_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vlseg3e64.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret @@ -3957,7 +3957,7 @@ define @test_vlseg4_nxv2f64(double* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg4_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vlseg4e64.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret @@ -3992,7 +3992,7 @@ define @test_vlseg2_nxv4f16(half* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg2e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -4025,7 +4025,7 @@ define @test_vlseg3_nxv4f16(half* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg3_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg3e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -4059,7 +4059,7 @@ define @test_vlseg4_nxv4f16(half* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg4_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg4e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -4094,7 +4094,7 @@ define @test_vlseg5_nxv4f16(half* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg5_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg5e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -4130,7 +4130,7 @@ define @test_vlseg6_nxv4f16(half* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg6_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg6e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -4167,7 +4167,7 @@ define @test_vlseg7_nxv4f16(half* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg7_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg7e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -4205,7 +4205,7 @@ define @test_vlseg8_nxv4f16(half* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg8_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg8e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -4244,7 +4244,7 @@ define @test_vlseg2_nxv2f16(half* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg2e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -4277,7 +4277,7 @@ define @test_vlseg3_nxv2f16(half* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg3_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg3e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -4311,7 +4311,7 @@ define @test_vlseg4_nxv2f16(half* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg4_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg4e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -4346,7 +4346,7 @@ define @test_vlseg5_nxv2f16(half* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg5_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg5e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -4382,7 +4382,7 @@ define @test_vlseg6_nxv2f16(half* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg6_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg6e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -4419,7 +4419,7 @@ define @test_vlseg7_nxv2f16(half* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg7_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg7e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -4457,7 +4457,7 @@ define @test_vlseg8_nxv2f16(half* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg8_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg8e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -4496,7 +4496,7 @@ define @test_vlseg2_nxv4f32(float* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vlseg2e32.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret @@ -4529,7 +4529,7 @@ define @test_vlseg3_nxv4f32(float* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg3_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vlseg3e32.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret @@ -4563,7 +4563,7 @@ define @test_vlseg4_nxv4f32(float* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg4_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vlseg4e32.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll @@ -8,7 +8,7 @@ define @test_vlseg2_nxv16i16(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vlseg2e16.v v4, (a0) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret @@ -41,7 +41,7 @@ define @test_vlseg2_nxv4i32(i32* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vlseg2e32.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret @@ -74,7 +74,7 @@ define @test_vlseg3_nxv4i32(i32* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg3_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vlseg3e32.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret @@ -108,7 +108,7 @@ define @test_vlseg4_nxv4i32(i32* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg4_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vlseg4e32.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret @@ -143,7 +143,7 @@ define @test_vlseg2_nxv16i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vlseg2e8.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret @@ -176,7 +176,7 @@ define @test_vlseg3_nxv16i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg3_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vlseg3e8.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret @@ -210,7 +210,7 @@ define @test_vlseg4_nxv16i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg4_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vlseg4e8.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret @@ -245,7 +245,7 @@ define @test_vlseg2_nxv1i64(i64* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vlseg2e64.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -278,7 +278,7 @@ define @test_vlseg3_nxv1i64(i64* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg3_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vlseg3e64.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -312,7 +312,7 @@ define @test_vlseg4_nxv1i64(i64* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg4_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vlseg4e64.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -347,7 +347,7 @@ define @test_vlseg5_nxv1i64(i64* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg5_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vlseg5e64.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -383,7 +383,7 @@ define @test_vlseg6_nxv1i64(i64* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg6_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vlseg6e64.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -420,7 +420,7 @@ define @test_vlseg7_nxv1i64(i64* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg7_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vlseg7e64.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -458,7 +458,7 @@ define @test_vlseg8_nxv1i64(i64* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vlseg8e64.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -497,7 +497,7 @@ define @test_vlseg2_nxv1i32(i32* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg2e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -530,7 +530,7 @@ define @test_vlseg3_nxv1i32(i32* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg3_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg3e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -564,7 +564,7 @@ define @test_vlseg4_nxv1i32(i32* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg4_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg4e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -599,7 +599,7 @@ define @test_vlseg5_nxv1i32(i32* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg5_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg5e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -635,7 +635,7 @@ define @test_vlseg6_nxv1i32(i32* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg6_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg6e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -672,7 +672,7 @@ define @test_vlseg7_nxv1i32(i32* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg7_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg7e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -710,7 +710,7 @@ define @test_vlseg8_nxv1i32(i32* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg8e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -749,7 +749,7 @@ define @test_vlseg2_nxv8i16(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vlseg2e16.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret @@ -782,7 +782,7 @@ define @test_vlseg3_nxv8i16(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg3_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vlseg3e16.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret @@ -816,7 +816,7 @@ define @test_vlseg4_nxv8i16(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg4_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vlseg4e16.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret @@ -851,7 +851,7 @@ define @test_vlseg2_nxv4i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vlseg2e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -884,7 +884,7 @@ define @test_vlseg3_nxv4i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg3_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vlseg3e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -918,7 +918,7 @@ define @test_vlseg4_nxv4i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg4_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vlseg4e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -953,7 +953,7 @@ define @test_vlseg5_nxv4i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg5_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vlseg5e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -989,7 +989,7 @@ define @test_vlseg6_nxv4i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg6_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vlseg6e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -1026,7 +1026,7 @@ define @test_vlseg7_nxv4i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg7_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vlseg7e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -1064,7 +1064,7 @@ define @test_vlseg8_nxv4i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vlseg8e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -1103,7 +1103,7 @@ define @test_vlseg2_nxv1i16(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg2e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -1136,7 +1136,7 @@ define @test_vlseg3_nxv1i16(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg3_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg3e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -1170,7 +1170,7 @@ define @test_vlseg4_nxv1i16(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg4_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg4e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -1205,7 +1205,7 @@ define @test_vlseg5_nxv1i16(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg5_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg5e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -1241,7 +1241,7 @@ define @test_vlseg6_nxv1i16(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg6_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg6e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -1278,7 +1278,7 @@ define @test_vlseg7_nxv1i16(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg7_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg7e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -1316,7 +1316,7 @@ define @test_vlseg8_nxv1i16(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg8e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -1355,7 +1355,7 @@ define @test_vlseg2_nxv2i32(i32* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg2e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -1388,7 +1388,7 @@ define @test_vlseg3_nxv2i32(i32* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg3_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg3e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -1422,7 +1422,7 @@ define @test_vlseg4_nxv2i32(i32* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg4_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg4e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -1457,7 +1457,7 @@ define @test_vlseg5_nxv2i32(i32* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg5_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg5e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -1493,7 +1493,7 @@ define @test_vlseg6_nxv2i32(i32* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg6_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg6e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -1530,7 +1530,7 @@ define @test_vlseg7_nxv2i32(i32* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg7_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg7e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -1568,7 +1568,7 @@ define @test_vlseg8_nxv2i32(i32* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg8e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -1607,7 +1607,7 @@ define @test_vlseg2_nxv8i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vlseg2e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -1640,7 +1640,7 @@ define @test_vlseg3_nxv8i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg3_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vlseg3e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -1674,7 +1674,7 @@ define @test_vlseg4_nxv8i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg4_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vlseg4e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -1709,7 +1709,7 @@ define @test_vlseg5_nxv8i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg5_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vlseg5e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -1745,7 +1745,7 @@ define @test_vlseg6_nxv8i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg6_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vlseg6e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -1782,7 +1782,7 @@ define @test_vlseg7_nxv8i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg7_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vlseg7e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -1820,7 +1820,7 @@ define @test_vlseg8_nxv8i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vlseg8e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -1859,7 +1859,7 @@ define @test_vlseg2_nxv4i64(i64* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vlseg2e64.v v4, (a0) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret @@ -1892,7 +1892,7 @@ define @test_vlseg2_nxv4i16(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg2e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -1925,7 +1925,7 @@ define @test_vlseg3_nxv4i16(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg3_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg3e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -1959,7 +1959,7 @@ define @test_vlseg4_nxv4i16(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg4_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg4e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -1994,7 +1994,7 @@ define @test_vlseg5_nxv4i16(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg5_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg5e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -2030,7 +2030,7 @@ define @test_vlseg6_nxv4i16(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg6_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg6e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -2067,7 +2067,7 @@ define @test_vlseg7_nxv4i16(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg7_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg7e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -2105,7 +2105,7 @@ define @test_vlseg8_nxv4i16(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg8e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -2144,7 +2144,7 @@ define @test_vlseg2_nxv1i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vlseg2e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -2177,7 +2177,7 @@ define @test_vlseg3_nxv1i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg3_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vlseg3e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -2211,7 +2211,7 @@ define @test_vlseg4_nxv1i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg4_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vlseg4e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -2246,7 +2246,7 @@ define @test_vlseg5_nxv1i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg5_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vlseg5e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -2282,7 +2282,7 @@ define @test_vlseg6_nxv1i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg6_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vlseg6e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -2319,7 +2319,7 @@ define @test_vlseg7_nxv1i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg7_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vlseg7e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -2357,7 +2357,7 @@ define @test_vlseg8_nxv1i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vlseg8e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -2396,7 +2396,7 @@ define @test_vlseg2_nxv2i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vlseg2e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -2429,7 +2429,7 @@ define @test_vlseg3_nxv2i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg3_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vlseg3e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -2463,7 +2463,7 @@ define @test_vlseg4_nxv2i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg4_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vlseg4e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -2498,7 +2498,7 @@ define @test_vlseg5_nxv2i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg5_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vlseg5e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -2534,7 +2534,7 @@ define @test_vlseg6_nxv2i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg6_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vlseg6e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -2571,7 +2571,7 @@ define @test_vlseg7_nxv2i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg7_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vlseg7e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -2609,7 +2609,7 @@ define @test_vlseg8_nxv2i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vlseg8e8.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -2648,7 +2648,7 @@ define @test_vlseg2_nxv8i32(i32* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vlseg2e32.v v4, (a0) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret @@ -2681,7 +2681,7 @@ define @test_vlseg2_nxv32i8(i8* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vlseg2e8.v v4, (a0) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret @@ -2714,7 +2714,7 @@ define @test_vlseg2_nxv2i16(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg2e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -2747,7 +2747,7 @@ define @test_vlseg3_nxv2i16(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg3_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg3e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -2781,7 +2781,7 @@ define @test_vlseg4_nxv2i16(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg4_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg4e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -2816,7 +2816,7 @@ define @test_vlseg5_nxv2i16(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg5_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg5e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -2852,7 +2852,7 @@ define @test_vlseg6_nxv2i16(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg6_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg6e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -2889,7 +2889,7 @@ define @test_vlseg7_nxv2i16(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg7_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg7e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -2927,7 +2927,7 @@ define @test_vlseg8_nxv2i16(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg8e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -2966,7 +2966,7 @@ define @test_vlseg2_nxv2i64(i64* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vlseg2e64.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret @@ -2999,7 +2999,7 @@ define @test_vlseg3_nxv2i64(i64* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg3_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vlseg3e64.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret @@ -3033,7 +3033,7 @@ define @test_vlseg4_nxv2i64(i64* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg4_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vlseg4e64.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret @@ -3068,7 +3068,7 @@ define @test_vlseg2_nxv16f16(half* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vlseg2e16.v v4, (a0) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret @@ -3101,7 +3101,7 @@ define @test_vlseg2_nxv4f64(double* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vlseg2e64.v v4, (a0) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret @@ -3134,7 +3134,7 @@ define @test_vlseg2_nxv1f64(double* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vlseg2e64.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -3167,7 +3167,7 @@ define @test_vlseg3_nxv1f64(double* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg3_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vlseg3e64.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -3201,7 +3201,7 @@ define @test_vlseg4_nxv1f64(double* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg4_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vlseg4e64.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -3236,7 +3236,7 @@ define @test_vlseg5_nxv1f64(double* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg5_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vlseg5e64.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -3272,7 +3272,7 @@ define @test_vlseg6_nxv1f64(double* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg6_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vlseg6e64.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -3309,7 +3309,7 @@ define @test_vlseg7_nxv1f64(double* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg7_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vlseg7e64.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -3347,7 +3347,7 @@ define @test_vlseg8_nxv1f64(double* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg8_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vlseg8e64.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -3386,7 +3386,7 @@ define @test_vlseg2_nxv2f32(float* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg2e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -3419,7 +3419,7 @@ define @test_vlseg3_nxv2f32(float* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg3_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg3e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -3453,7 +3453,7 @@ define @test_vlseg4_nxv2f32(float* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg4_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg4e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -3488,7 +3488,7 @@ define @test_vlseg5_nxv2f32(float* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg5_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg5e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -3524,7 +3524,7 @@ define @test_vlseg6_nxv2f32(float* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg6_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg6e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -3561,7 +3561,7 @@ define @test_vlseg7_nxv2f32(float* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg7_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg7e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -3599,7 +3599,7 @@ define @test_vlseg8_nxv2f32(float* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg8_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg8e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -3638,7 +3638,7 @@ define @test_vlseg2_nxv1f16(half* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg2e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -3671,7 +3671,7 @@ define @test_vlseg3_nxv1f16(half* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg3_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg3e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -3705,7 +3705,7 @@ define @test_vlseg4_nxv1f16(half* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg4_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg4e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -3740,7 +3740,7 @@ define @test_vlseg5_nxv1f16(half* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg5_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg5e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -3776,7 +3776,7 @@ define @test_vlseg6_nxv1f16(half* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg6_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg6e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -3813,7 +3813,7 @@ define @test_vlseg7_nxv1f16(half* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg7_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg7e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -3851,7 +3851,7 @@ define @test_vlseg8_nxv1f16(half* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg8_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg8e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -3890,7 +3890,7 @@ define @test_vlseg2_nxv1f32(float* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg2e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -3923,7 +3923,7 @@ define @test_vlseg3_nxv1f32(float* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg3_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg3e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -3957,7 +3957,7 @@ define @test_vlseg4_nxv1f32(float* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg4_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg4e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -3992,7 +3992,7 @@ define @test_vlseg5_nxv1f32(float* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg5_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg5e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -4028,7 +4028,7 @@ define @test_vlseg6_nxv1f32(float* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg6_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg6e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -4065,7 +4065,7 @@ define @test_vlseg7_nxv1f32(float* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg7_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg7e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -4103,7 +4103,7 @@ define @test_vlseg8_nxv1f32(float* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg8_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg8e32.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -4142,7 +4142,7 @@ define @test_vlseg2_nxv8f16(half* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vlseg2e16.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret @@ -4175,7 +4175,7 @@ define @test_vlseg3_nxv8f16(half* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg3_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vlseg3e16.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret @@ -4209,7 +4209,7 @@ define @test_vlseg4_nxv8f16(half* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg4_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vlseg4e16.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret @@ -4244,7 +4244,7 @@ define @test_vlseg2_nxv8f32(float* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vlseg2e32.v v4, (a0) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret @@ -4277,7 +4277,7 @@ define @test_vlseg2_nxv2f64(double* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vlseg2e64.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret @@ -4310,7 +4310,7 @@ define @test_vlseg3_nxv2f64(double* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg3_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vlseg3e64.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret @@ -4344,7 +4344,7 @@ define @test_vlseg4_nxv2f64(double* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg4_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vlseg4e64.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret @@ -4379,7 +4379,7 @@ define @test_vlseg2_nxv4f16(half* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg2e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -4412,7 +4412,7 @@ define @test_vlseg3_nxv4f16(half* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg3_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg3e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -4446,7 +4446,7 @@ define @test_vlseg4_nxv4f16(half* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg4_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg4e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -4481,7 +4481,7 @@ define @test_vlseg5_nxv4f16(half* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg5_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg5e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -4517,7 +4517,7 @@ define @test_vlseg6_nxv4f16(half* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg6_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg6e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -4554,7 +4554,7 @@ define @test_vlseg7_nxv4f16(half* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg7_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg7e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -4592,7 +4592,7 @@ define @test_vlseg8_nxv4f16(half* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg8_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg8e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -4631,7 +4631,7 @@ define @test_vlseg2_nxv2f16(half* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg2e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -4664,7 +4664,7 @@ define @test_vlseg3_nxv2f16(half* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg3_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg3e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -4698,7 +4698,7 @@ define @test_vlseg4_nxv2f16(half* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg4_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg4e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -4733,7 +4733,7 @@ define @test_vlseg5_nxv2f16(half* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg5_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg5e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -4769,7 +4769,7 @@ define @test_vlseg6_nxv2f16(half* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg6_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg6e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -4806,7 +4806,7 @@ define @test_vlseg7_nxv2f16(half* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg7_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg7e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -4844,7 +4844,7 @@ define @test_vlseg8_nxv2f16(half* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg8_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg8e16.v v7, (a0) ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -4883,7 +4883,7 @@ define @test_vlseg2_nxv4f32(float* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vlseg2e32.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret @@ -4916,7 +4916,7 @@ define @test_vlseg3_nxv4f32(float* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg3_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vlseg3e32.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret @@ -4950,7 +4950,7 @@ define @test_vlseg4_nxv4f32(float* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg4_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vlseg4e32.v v6, (a0) ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll @@ -8,7 +8,7 @@ define void @test_vlseg2ff_dead_value(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_dead_value: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vlseg2e16ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -40,7 +40,7 @@ define @test_vlseg2ff_dead_vl(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2ff_dead_vl: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vlseg2e16ff.v v4, (a0) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret @@ -67,7 +67,7 @@ define void @test_vlseg2ff_dead_all(i16* %base, i32 %vl) { ; CHECK-LABEL: test_vlseg2ff_dead_all: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vlseg2e16ff.v v8, (a0) ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll @@ -8,7 +8,7 @@ define @test_vlseg2ff_nxv16i16(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vlseg2e16ff.v v4, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -46,7 +46,7 @@ define @test_vlseg2ff_nxv1i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vlseg2e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -84,7 +84,7 @@ define @test_vlseg3ff_nxv1i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vlseg3e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -123,7 +123,7 @@ define @test_vlseg4ff_nxv1i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vlseg4e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -163,7 +163,7 @@ define @test_vlseg5ff_nxv1i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vlseg5e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -204,7 +204,7 @@ define @test_vlseg6ff_nxv1i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vlseg6e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -246,7 +246,7 @@ define @test_vlseg7ff_nxv1i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vlseg7e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -289,7 +289,7 @@ define @test_vlseg8ff_nxv1i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vlseg8e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -333,7 +333,7 @@ define @test_vlseg2ff_nxv16i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vlseg2e8ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -371,7 +371,7 @@ define @test_vlseg3ff_nxv16i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vlseg3e8ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -410,7 +410,7 @@ define @test_vlseg4ff_nxv16i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vlseg4e8ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -450,7 +450,7 @@ define @test_vlseg2ff_nxv2i32(i32* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg2e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -488,7 +488,7 @@ define @test_vlseg3ff_nxv2i32(i32* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg3e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -527,7 +527,7 @@ define @test_vlseg4ff_nxv2i32(i32* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg4e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -567,7 +567,7 @@ define @test_vlseg5ff_nxv2i32(i32* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg5e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -608,7 +608,7 @@ define @test_vlseg6ff_nxv2i32(i32* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg6e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -650,7 +650,7 @@ define @test_vlseg7ff_nxv2i32(i32* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg7e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -693,7 +693,7 @@ define @test_vlseg8ff_nxv2i32(i32* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg8e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -737,7 +737,7 @@ define @test_vlseg2ff_nxv4i16(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg2e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -775,7 +775,7 @@ define @test_vlseg3ff_nxv4i16(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg3e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -814,7 +814,7 @@ define @test_vlseg4ff_nxv4i16(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg4e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -854,7 +854,7 @@ define @test_vlseg5ff_nxv4i16(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg5e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -895,7 +895,7 @@ define @test_vlseg6ff_nxv4i16(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg6e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -937,7 +937,7 @@ define @test_vlseg7ff_nxv4i16(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg7e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -980,7 +980,7 @@ define @test_vlseg8ff_nxv4i16(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg8e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1024,7 +1024,7 @@ define @test_vlseg2ff_nxv1i32(i32* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg2e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1062,7 +1062,7 @@ define @test_vlseg3ff_nxv1i32(i32* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg3e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1101,7 +1101,7 @@ define @test_vlseg4ff_nxv1i32(i32* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg4e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1141,7 +1141,7 @@ define @test_vlseg5ff_nxv1i32(i32* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg5e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1182,7 +1182,7 @@ define @test_vlseg6ff_nxv1i32(i32* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg6e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1224,7 +1224,7 @@ define @test_vlseg7ff_nxv1i32(i32* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg7e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1267,7 +1267,7 @@ define @test_vlseg8ff_nxv1i32(i32* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg8e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1311,7 +1311,7 @@ define @test_vlseg2ff_nxv8i16(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vlseg2e16ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1349,7 +1349,7 @@ define @test_vlseg3ff_nxv8i16(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vlseg3e16ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1388,7 +1388,7 @@ define @test_vlseg4ff_nxv8i16(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vlseg4e16ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1428,7 +1428,7 @@ define @test_vlseg2ff_nxv8i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vlseg2e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1466,7 +1466,7 @@ define @test_vlseg3ff_nxv8i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vlseg3e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1505,7 +1505,7 @@ define @test_vlseg4ff_nxv8i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vlseg4e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1545,7 +1545,7 @@ define @test_vlseg5ff_nxv8i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vlseg5e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1586,7 +1586,7 @@ define @test_vlseg6ff_nxv8i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vlseg6e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1628,7 +1628,7 @@ define @test_vlseg7ff_nxv8i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vlseg7e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1671,7 +1671,7 @@ define @test_vlseg8ff_nxv8i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vlseg8e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1715,7 +1715,7 @@ define @test_vlseg2ff_nxv8i32(i32* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vlseg2e32ff.v v4, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1753,7 +1753,7 @@ define @test_vlseg2ff_nxv4i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vlseg2e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1791,7 +1791,7 @@ define @test_vlseg3ff_nxv4i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vlseg3e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1830,7 +1830,7 @@ define @test_vlseg4ff_nxv4i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vlseg4e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1870,7 +1870,7 @@ define @test_vlseg5ff_nxv4i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vlseg5e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1911,7 +1911,7 @@ define @test_vlseg6ff_nxv4i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vlseg6e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1953,7 +1953,7 @@ define @test_vlseg7ff_nxv4i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vlseg7e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -1996,7 +1996,7 @@ define @test_vlseg8ff_nxv4i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vlseg8e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -2040,7 +2040,7 @@ define @test_vlseg2ff_nxv1i16(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg2e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -2078,7 +2078,7 @@ define @test_vlseg3ff_nxv1i16(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg3e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -2117,7 +2117,7 @@ define @test_vlseg4ff_nxv1i16(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg4e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -2157,7 +2157,7 @@ define @test_vlseg5ff_nxv1i16(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg5e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -2198,7 +2198,7 @@ define @test_vlseg6ff_nxv1i16(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg6e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -2240,7 +2240,7 @@ define @test_vlseg7ff_nxv1i16(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg7e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -2283,7 +2283,7 @@ define @test_vlseg8ff_nxv1i16(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg8e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -2327,7 +2327,7 @@ define @test_vlseg2ff_nxv32i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vlseg2e8ff.v v4, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -2365,7 +2365,7 @@ define @test_vlseg2ff_nxv2i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vlseg2e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -2403,7 +2403,7 @@ define @test_vlseg3ff_nxv2i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vlseg3e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -2442,7 +2442,7 @@ define @test_vlseg4ff_nxv2i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vlseg4e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -2482,7 +2482,7 @@ define @test_vlseg5ff_nxv2i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vlseg5e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -2523,7 +2523,7 @@ define @test_vlseg6ff_nxv2i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vlseg6e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -2565,7 +2565,7 @@ define @test_vlseg7ff_nxv2i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vlseg7e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -2608,7 +2608,7 @@ define @test_vlseg8ff_nxv2i8(i8* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vlseg8e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -2652,7 +2652,7 @@ define @test_vlseg2ff_nxv2i16(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg2e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -2690,7 +2690,7 @@ define @test_vlseg3ff_nxv2i16(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg3e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -2729,7 +2729,7 @@ define @test_vlseg4ff_nxv2i16(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg4e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -2769,7 +2769,7 @@ define @test_vlseg5ff_nxv2i16(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg5e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -2810,7 +2810,7 @@ define @test_vlseg6ff_nxv2i16(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg6e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -2852,7 +2852,7 @@ define @test_vlseg7ff_nxv2i16(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg7e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -2895,7 +2895,7 @@ define @test_vlseg8ff_nxv2i16(i16* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg8e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -2939,7 +2939,7 @@ define @test_vlseg2ff_nxv4i32(i32* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vlseg2e32ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -2977,7 +2977,7 @@ define @test_vlseg3ff_nxv4i32(i32* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vlseg3e32ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3016,7 +3016,7 @@ define @test_vlseg4ff_nxv4i32(i32* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vlseg4e32ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3056,7 +3056,7 @@ define @test_vlseg2ff_nxv16f16(half* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vlseg2e16ff.v v4, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3094,7 +3094,7 @@ define @test_vlseg2ff_nxv4f64(double* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vlseg2e64ff.v v4, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3132,7 +3132,7 @@ define @test_vlseg2ff_nxv1f64(double* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vlseg2e64ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3170,7 +3170,7 @@ define @test_vlseg3ff_nxv1f64(double* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vlseg3e64ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3209,7 +3209,7 @@ define @test_vlseg4ff_nxv1f64(double* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vlseg4e64ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3249,7 +3249,7 @@ define @test_vlseg5ff_nxv1f64(double* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vlseg5e64ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3290,7 +3290,7 @@ define @test_vlseg6ff_nxv1f64(double* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vlseg6e64ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3332,7 +3332,7 @@ define @test_vlseg7ff_nxv1f64(double* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vlseg7e64ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3375,7 +3375,7 @@ define @test_vlseg8ff_nxv1f64(double* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vlseg8e64ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3419,7 +3419,7 @@ define @test_vlseg2ff_nxv2f32(float* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg2e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3457,7 +3457,7 @@ define @test_vlseg3ff_nxv2f32(float* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg3e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3496,7 +3496,7 @@ define @test_vlseg4ff_nxv2f32(float* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg4e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3536,7 +3536,7 @@ define @test_vlseg5ff_nxv2f32(float* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg5e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3577,7 +3577,7 @@ define @test_vlseg6ff_nxv2f32(float* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg6e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3619,7 +3619,7 @@ define @test_vlseg7ff_nxv2f32(float* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg7e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3662,7 +3662,7 @@ define @test_vlseg8ff_nxv2f32(float* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg8e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3706,7 +3706,7 @@ define @test_vlseg2ff_nxv1f16(half* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg2e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3744,7 +3744,7 @@ define @test_vlseg3ff_nxv1f16(half* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg3e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3783,7 +3783,7 @@ define @test_vlseg4ff_nxv1f16(half* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg4e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3823,7 +3823,7 @@ define @test_vlseg5ff_nxv1f16(half* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg5e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3864,7 +3864,7 @@ define @test_vlseg6ff_nxv1f16(half* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg6e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3906,7 +3906,7 @@ define @test_vlseg7ff_nxv1f16(half* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg7e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3949,7 +3949,7 @@ define @test_vlseg8ff_nxv1f16(half* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg8e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -3993,7 +3993,7 @@ define @test_vlseg2ff_nxv1f32(float* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg2e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -4031,7 +4031,7 @@ define @test_vlseg3ff_nxv1f32(float* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg3e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -4070,7 +4070,7 @@ define @test_vlseg4ff_nxv1f32(float* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg4e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -4110,7 +4110,7 @@ define @test_vlseg5ff_nxv1f32(float* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg5e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -4151,7 +4151,7 @@ define @test_vlseg6ff_nxv1f32(float* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg6e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -4193,7 +4193,7 @@ define @test_vlseg7ff_nxv1f32(float* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg7e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -4236,7 +4236,7 @@ define @test_vlseg8ff_nxv1f32(float* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg8e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -4280,7 +4280,7 @@ define @test_vlseg2ff_nxv8f16(half* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vlseg2e16ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -4318,7 +4318,7 @@ define @test_vlseg3ff_nxv8f16(half* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vlseg3e16ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -4357,7 +4357,7 @@ define @test_vlseg4ff_nxv8f16(half* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vlseg4e16ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -4397,7 +4397,7 @@ define @test_vlseg2ff_nxv8f32(float* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vlseg2e32ff.v v4, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -4435,7 +4435,7 @@ define @test_vlseg2ff_nxv2f64(double* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vlseg2e64ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -4473,7 +4473,7 @@ define @test_vlseg3ff_nxv2f64(double* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vlseg3e64ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -4512,7 +4512,7 @@ define @test_vlseg4ff_nxv2f64(double* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vlseg4e64ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -4552,7 +4552,7 @@ define @test_vlseg2ff_nxv4f16(half* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg2e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -4590,7 +4590,7 @@ define @test_vlseg3ff_nxv4f16(half* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg3e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -4629,7 +4629,7 @@ define @test_vlseg4ff_nxv4f16(half* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg4e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -4669,7 +4669,7 @@ define @test_vlseg5ff_nxv4f16(half* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg5e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -4710,7 +4710,7 @@ define @test_vlseg6ff_nxv4f16(half* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg6e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -4752,7 +4752,7 @@ define @test_vlseg7ff_nxv4f16(half* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg7e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -4795,7 +4795,7 @@ define @test_vlseg8ff_nxv4f16(half* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg8e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -4839,7 +4839,7 @@ define @test_vlseg2ff_nxv2f16(half* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg2e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -4877,7 +4877,7 @@ define @test_vlseg3ff_nxv2f16(half* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg3e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -4916,7 +4916,7 @@ define @test_vlseg4ff_nxv2f16(half* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg4e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -4956,7 +4956,7 @@ define @test_vlseg5ff_nxv2f16(half* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg5e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -4997,7 +4997,7 @@ define @test_vlseg6ff_nxv2f16(half* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg6e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -5039,7 +5039,7 @@ define @test_vlseg7ff_nxv2f16(half* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg7e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -5082,7 +5082,7 @@ define @test_vlseg8ff_nxv2f16(half* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg8e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -5126,7 +5126,7 @@ define @test_vlseg2ff_nxv4f32(float* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vlseg2e32ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -5164,7 +5164,7 @@ define @test_vlseg3ff_nxv4f32(float* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vlseg3e32ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) @@ -5203,7 +5203,7 @@ define @test_vlseg4ff_nxv4f32(float* %base, i32 %vl, i32* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vlseg4e32ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sw a0, 0(a2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll @@ -8,7 +8,7 @@ define void @test_vlseg2ff_dead_value(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_dead_value: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vlseg2e16ff.v v8, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -40,7 +40,7 @@ define @test_vlseg2ff_dead_vl(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2ff_dead_vl: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vlseg2e16ff.v v4, (a0) ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret @@ -67,7 +67,7 @@ define void @test_vlseg2ff_dead_all(i16* %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2ff_dead_all: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vlseg2e16ff.v v8, (a0) ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll @@ -8,7 +8,7 @@ define @test_vlseg2ff_nxv16i16(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vlseg2e16ff.v v4, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -46,7 +46,7 @@ define @test_vlseg2ff_nxv4i32(i32* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vlseg2e32ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -84,7 +84,7 @@ define @test_vlseg3ff_nxv4i32(i32* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vlseg3e32ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -123,7 +123,7 @@ define @test_vlseg4ff_nxv4i32(i32* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vlseg4e32ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -163,7 +163,7 @@ define @test_vlseg2ff_nxv16i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vlseg2e8ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -201,7 +201,7 @@ define @test_vlseg3ff_nxv16i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vlseg3e8ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -240,7 +240,7 @@ define @test_vlseg4ff_nxv16i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vlseg4e8ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -280,7 +280,7 @@ define @test_vlseg2ff_nxv1i64(i64* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vlseg2e64ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -318,7 +318,7 @@ define @test_vlseg3ff_nxv1i64(i64* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vlseg3e64ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -357,7 +357,7 @@ define @test_vlseg4ff_nxv1i64(i64* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vlseg4e64ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -397,7 +397,7 @@ define @test_vlseg5ff_nxv1i64(i64* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vlseg5e64ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -438,7 +438,7 @@ define @test_vlseg6ff_nxv1i64(i64* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vlseg6e64ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -480,7 +480,7 @@ define @test_vlseg7ff_nxv1i64(i64* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vlseg7e64ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -523,7 +523,7 @@ define @test_vlseg8ff_nxv1i64(i64* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vlseg8e64ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -567,7 +567,7 @@ define @test_vlseg2ff_nxv1i32(i32* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg2e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -605,7 +605,7 @@ define @test_vlseg3ff_nxv1i32(i32* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg3e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -644,7 +644,7 @@ define @test_vlseg4ff_nxv1i32(i32* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg4e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -684,7 +684,7 @@ define @test_vlseg5ff_nxv1i32(i32* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg5e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -725,7 +725,7 @@ define @test_vlseg6ff_nxv1i32(i32* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg6e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -767,7 +767,7 @@ define @test_vlseg7ff_nxv1i32(i32* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg7e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -810,7 +810,7 @@ define @test_vlseg8ff_nxv1i32(i32* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg8e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -854,7 +854,7 @@ define @test_vlseg2ff_nxv8i16(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vlseg2e16ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -892,7 +892,7 @@ define @test_vlseg3ff_nxv8i16(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vlseg3e16ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -931,7 +931,7 @@ define @test_vlseg4ff_nxv8i16(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vlseg4e16ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -971,7 +971,7 @@ define @test_vlseg2ff_nxv4i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vlseg2e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1009,7 +1009,7 @@ define @test_vlseg3ff_nxv4i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vlseg3e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1048,7 +1048,7 @@ define @test_vlseg4ff_nxv4i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vlseg4e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1088,7 +1088,7 @@ define @test_vlseg5ff_nxv4i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vlseg5e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1129,7 +1129,7 @@ define @test_vlseg6ff_nxv4i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vlseg6e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1171,7 +1171,7 @@ define @test_vlseg7ff_nxv4i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vlseg7e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1214,7 +1214,7 @@ define @test_vlseg8ff_nxv4i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vlseg8e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1258,7 +1258,7 @@ define @test_vlseg2ff_nxv1i16(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg2e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1296,7 +1296,7 @@ define @test_vlseg3ff_nxv1i16(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg3e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1335,7 +1335,7 @@ define @test_vlseg4ff_nxv1i16(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg4e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1375,7 +1375,7 @@ define @test_vlseg5ff_nxv1i16(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg5e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1416,7 +1416,7 @@ define @test_vlseg6ff_nxv1i16(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg6e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1458,7 +1458,7 @@ define @test_vlseg7ff_nxv1i16(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg7e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1501,7 +1501,7 @@ define @test_vlseg8ff_nxv1i16(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg8e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1545,7 +1545,7 @@ define @test_vlseg2ff_nxv2i32(i32* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg2e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1583,7 +1583,7 @@ define @test_vlseg3ff_nxv2i32(i32* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg3e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1622,7 +1622,7 @@ define @test_vlseg4ff_nxv2i32(i32* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg4e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1662,7 +1662,7 @@ define @test_vlseg5ff_nxv2i32(i32* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg5e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1703,7 +1703,7 @@ define @test_vlseg6ff_nxv2i32(i32* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg6e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1745,7 +1745,7 @@ define @test_vlseg7ff_nxv2i32(i32* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg7e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1788,7 +1788,7 @@ define @test_vlseg8ff_nxv2i32(i32* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg8e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1832,7 +1832,7 @@ define @test_vlseg2ff_nxv8i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vlseg2e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1870,7 +1870,7 @@ define @test_vlseg3ff_nxv8i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vlseg3e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1909,7 +1909,7 @@ define @test_vlseg4ff_nxv8i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vlseg4e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1949,7 +1949,7 @@ define @test_vlseg5ff_nxv8i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vlseg5e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -1990,7 +1990,7 @@ define @test_vlseg6ff_nxv8i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vlseg6e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -2032,7 +2032,7 @@ define @test_vlseg7ff_nxv8i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vlseg7e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -2075,7 +2075,7 @@ define @test_vlseg8ff_nxv8i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vlseg8e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -2119,7 +2119,7 @@ define @test_vlseg2ff_nxv4i64(i64* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vlseg2e64ff.v v4, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -2157,7 +2157,7 @@ define @test_vlseg2ff_nxv4i16(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg2e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -2195,7 +2195,7 @@ define @test_vlseg3ff_nxv4i16(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg3e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -2234,7 +2234,7 @@ define @test_vlseg4ff_nxv4i16(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg4e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -2274,7 +2274,7 @@ define @test_vlseg5ff_nxv4i16(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg5e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -2315,7 +2315,7 @@ define @test_vlseg6ff_nxv4i16(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg6e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -2357,7 +2357,7 @@ define @test_vlseg7ff_nxv4i16(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg7e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -2400,7 +2400,7 @@ define @test_vlseg8ff_nxv4i16(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg8e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -2444,7 +2444,7 @@ define @test_vlseg2ff_nxv1i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vlseg2e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -2482,7 +2482,7 @@ define @test_vlseg3ff_nxv1i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vlseg3e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -2521,7 +2521,7 @@ define @test_vlseg4ff_nxv1i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vlseg4e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -2561,7 +2561,7 @@ define @test_vlseg5ff_nxv1i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vlseg5e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -2602,7 +2602,7 @@ define @test_vlseg6ff_nxv1i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vlseg6e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -2644,7 +2644,7 @@ define @test_vlseg7ff_nxv1i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vlseg7e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -2687,7 +2687,7 @@ define @test_vlseg8ff_nxv1i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vlseg8e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -2731,7 +2731,7 @@ define @test_vlseg2ff_nxv2i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vlseg2e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -2769,7 +2769,7 @@ define @test_vlseg3ff_nxv2i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vlseg3e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -2808,7 +2808,7 @@ define @test_vlseg4ff_nxv2i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vlseg4e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -2848,7 +2848,7 @@ define @test_vlseg5ff_nxv2i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vlseg5e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -2889,7 +2889,7 @@ define @test_vlseg6ff_nxv2i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vlseg6e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -2931,7 +2931,7 @@ define @test_vlseg7ff_nxv2i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vlseg7e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -2974,7 +2974,7 @@ define @test_vlseg8ff_nxv2i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vlseg8e8ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3018,7 +3018,7 @@ define @test_vlseg2ff_nxv8i32(i32* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vlseg2e32ff.v v4, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3056,7 +3056,7 @@ define @test_vlseg2ff_nxv32i8(i8* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vlseg2e8ff.v v4, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3094,7 +3094,7 @@ define @test_vlseg2ff_nxv2i16(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg2e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3132,7 +3132,7 @@ define @test_vlseg3ff_nxv2i16(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg3e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3171,7 +3171,7 @@ define @test_vlseg4ff_nxv2i16(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg4e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3211,7 +3211,7 @@ define @test_vlseg5ff_nxv2i16(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg5e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3252,7 +3252,7 @@ define @test_vlseg6ff_nxv2i16(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg6e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3294,7 +3294,7 @@ define @test_vlseg7ff_nxv2i16(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg7e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3337,7 +3337,7 @@ define @test_vlseg8ff_nxv2i16(i16* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg8e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3381,7 +3381,7 @@ define @test_vlseg2ff_nxv2i64(i64* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vlseg2e64ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3419,7 +3419,7 @@ define @test_vlseg3ff_nxv2i64(i64* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vlseg3e64ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3458,7 +3458,7 @@ define @test_vlseg4ff_nxv2i64(i64* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vlseg4e64ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3498,7 +3498,7 @@ define @test_vlseg2ff_nxv16f16(half* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vlseg2e16ff.v v4, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3536,7 +3536,7 @@ define @test_vlseg2ff_nxv4f64(double* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vlseg2e64ff.v v4, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3574,7 +3574,7 @@ define @test_vlseg2ff_nxv1f64(double* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vlseg2e64ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3612,7 +3612,7 @@ define @test_vlseg3ff_nxv1f64(double* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vlseg3e64ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3651,7 +3651,7 @@ define @test_vlseg4ff_nxv1f64(double* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vlseg4e64ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3691,7 +3691,7 @@ define @test_vlseg5ff_nxv1f64(double* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vlseg5e64ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3732,7 +3732,7 @@ define @test_vlseg6ff_nxv1f64(double* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vlseg6e64ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3774,7 +3774,7 @@ define @test_vlseg7ff_nxv1f64(double* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vlseg7e64ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3817,7 +3817,7 @@ define @test_vlseg8ff_nxv1f64(double* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vlseg8e64ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3861,7 +3861,7 @@ define @test_vlseg2ff_nxv2f32(float* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg2e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3899,7 +3899,7 @@ define @test_vlseg3ff_nxv2f32(float* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg3e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3938,7 +3938,7 @@ define @test_vlseg4ff_nxv2f32(float* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg4e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -3978,7 +3978,7 @@ define @test_vlseg5ff_nxv2f32(float* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg5e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -4019,7 +4019,7 @@ define @test_vlseg6ff_nxv2f32(float* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg6e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -4061,7 +4061,7 @@ define @test_vlseg7ff_nxv2f32(float* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg7e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -4104,7 +4104,7 @@ define @test_vlseg8ff_nxv2f32(float* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vlseg8e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -4148,7 +4148,7 @@ define @test_vlseg2ff_nxv1f16(half* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg2e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -4186,7 +4186,7 @@ define @test_vlseg3ff_nxv1f16(half* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg3e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -4225,7 +4225,7 @@ define @test_vlseg4ff_nxv1f16(half* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg4e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -4265,7 +4265,7 @@ define @test_vlseg5ff_nxv1f16(half* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg5e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -4306,7 +4306,7 @@ define @test_vlseg6ff_nxv1f16(half* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg6e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -4348,7 +4348,7 @@ define @test_vlseg7ff_nxv1f16(half* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg7e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -4391,7 +4391,7 @@ define @test_vlseg8ff_nxv1f16(half* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vlseg8e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -4435,7 +4435,7 @@ define @test_vlseg2ff_nxv1f32(float* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg2e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -4473,7 +4473,7 @@ define @test_vlseg3ff_nxv1f32(float* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg3e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -4512,7 +4512,7 @@ define @test_vlseg4ff_nxv1f32(float* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg4e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -4552,7 +4552,7 @@ define @test_vlseg5ff_nxv1f32(float* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg5e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -4593,7 +4593,7 @@ define @test_vlseg6ff_nxv1f32(float* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg6e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -4635,7 +4635,7 @@ define @test_vlseg7ff_nxv1f32(float* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg7e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -4678,7 +4678,7 @@ define @test_vlseg8ff_nxv1f32(float* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vlseg8e32ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -4722,7 +4722,7 @@ define @test_vlseg2ff_nxv8f16(half* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vlseg2e16ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -4760,7 +4760,7 @@ define @test_vlseg3ff_nxv8f16(half* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vlseg3e16ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -4799,7 +4799,7 @@ define @test_vlseg4ff_nxv8f16(half* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vlseg4e16ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -4839,7 +4839,7 @@ define @test_vlseg2ff_nxv8f32(float* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vlseg2e32ff.v v4, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -4877,7 +4877,7 @@ define @test_vlseg2ff_nxv2f64(double* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vlseg2e64ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -4915,7 +4915,7 @@ define @test_vlseg3ff_nxv2f64(double* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vlseg3e64ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -4954,7 +4954,7 @@ define @test_vlseg4ff_nxv2f64(double* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vlseg4e64ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -4994,7 +4994,7 @@ define @test_vlseg2ff_nxv4f16(half* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg2e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -5032,7 +5032,7 @@ define @test_vlseg3ff_nxv4f16(half* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg3e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -5071,7 +5071,7 @@ define @test_vlseg4ff_nxv4f16(half* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg4e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -5111,7 +5111,7 @@ define @test_vlseg5ff_nxv4f16(half* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg5e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -5152,7 +5152,7 @@ define @test_vlseg6ff_nxv4f16(half* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg6e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -5194,7 +5194,7 @@ define @test_vlseg7ff_nxv4f16(half* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg7e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -5237,7 +5237,7 @@ define @test_vlseg8ff_nxv4f16(half* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vlseg8e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -5281,7 +5281,7 @@ define @test_vlseg2ff_nxv2f16(half* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg2e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -5319,7 +5319,7 @@ define @test_vlseg3ff_nxv2f16(half* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg3e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -5358,7 +5358,7 @@ define @test_vlseg4ff_nxv2f16(half* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg4e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -5398,7 +5398,7 @@ define @test_vlseg5ff_nxv2f16(half* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg5ff_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg5e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -5439,7 +5439,7 @@ define @test_vlseg6ff_nxv2f16(half* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg6ff_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg6e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -5481,7 +5481,7 @@ define @test_vlseg7ff_nxv2f16(half* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg7ff_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg7e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -5524,7 +5524,7 @@ define @test_vlseg8ff_nxv2f16(half* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg8ff_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vlseg8e16ff.v v7, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -5568,7 +5568,7 @@ define @test_vlseg2ff_nxv4f32(float* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vlseg2e32ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -5606,7 +5606,7 @@ define @test_vlseg3ff_nxv4f32(float* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg3ff_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vlseg3e32ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) @@ -5645,7 +5645,7 @@ define @test_vlseg4ff_nxv4f32(float* %base, i64 %vl, i64* %outvl) { ; CHECK-LABEL: test_vlseg4ff_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vlseg4e32ff.v v6, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv32.ll @@ -8,7 +8,7 @@ define @test_vlsseg2_nxv16i16(i16* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1 ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret @@ -41,7 +41,7 @@ define @test_vlsseg2_nxv1i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -74,7 +74,7 @@ define @test_vlsseg3_nxv1i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -108,7 +108,7 @@ define @test_vlsseg4_nxv1i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -143,7 +143,7 @@ define @test_vlsseg5_nxv1i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -179,7 +179,7 @@ define @test_vlsseg6_nxv1i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -216,7 +216,7 @@ define @test_vlsseg7_nxv1i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -254,7 +254,7 @@ define @test_vlsseg8_nxv1i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -293,7 +293,7 @@ define @test_vlsseg2_nxv16i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vlsseg2e8.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret @@ -326,7 +326,7 @@ define @test_vlsseg3_nxv16i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vlsseg3e8.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret @@ -360,7 +360,7 @@ define @test_vlsseg4_nxv16i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vlsseg4e8.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret @@ -395,7 +395,7 @@ define @test_vlsseg2_nxv2i32(i32* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -428,7 +428,7 @@ define @test_vlsseg3_nxv2i32(i32* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -462,7 +462,7 @@ define @test_vlsseg4_nxv2i32(i32* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -497,7 +497,7 @@ define @test_vlsseg5_nxv2i32(i32* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -533,7 +533,7 @@ define @test_vlsseg6_nxv2i32(i32* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -570,7 +570,7 @@ define @test_vlsseg7_nxv2i32(i32* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -608,7 +608,7 @@ define @test_vlsseg8_nxv2i32(i32* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -647,7 +647,7 @@ define @test_vlsseg2_nxv4i16(i16* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -680,7 +680,7 @@ define @test_vlsseg3_nxv4i16(i16* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -714,7 +714,7 @@ define @test_vlsseg4_nxv4i16(i16* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -749,7 +749,7 @@ define @test_vlsseg5_nxv4i16(i16* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -785,7 +785,7 @@ define @test_vlsseg6_nxv4i16(i16* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -822,7 +822,7 @@ define @test_vlsseg7_nxv4i16(i16* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -860,7 +860,7 @@ define @test_vlsseg8_nxv4i16(i16* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -899,7 +899,7 @@ define @test_vlsseg2_nxv1i32(i32* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -932,7 +932,7 @@ define @test_vlsseg3_nxv1i32(i32* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -966,7 +966,7 @@ define @test_vlsseg4_nxv1i32(i32* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -1001,7 +1001,7 @@ define @test_vlsseg5_nxv1i32(i32* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -1037,7 +1037,7 @@ define @test_vlsseg6_nxv1i32(i32* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -1074,7 +1074,7 @@ define @test_vlsseg7_nxv1i32(i32* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -1112,7 +1112,7 @@ define @test_vlsseg8_nxv1i32(i32* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -1151,7 +1151,7 @@ define @test_vlsseg2_nxv8i16(i16* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret @@ -1184,7 +1184,7 @@ define @test_vlsseg3_nxv8i16(i16* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret @@ -1218,7 +1218,7 @@ define @test_vlsseg4_nxv8i16(i16* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret @@ -1253,7 +1253,7 @@ define @test_vlsseg2_nxv8i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -1286,7 +1286,7 @@ define @test_vlsseg3_nxv8i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -1320,7 +1320,7 @@ define @test_vlsseg4_nxv8i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -1355,7 +1355,7 @@ define @test_vlsseg5_nxv8i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -1391,7 +1391,7 @@ define @test_vlsseg6_nxv8i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -1428,7 +1428,7 @@ define @test_vlsseg7_nxv8i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -1466,7 +1466,7 @@ define @test_vlsseg8_nxv8i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -1505,7 +1505,7 @@ define @test_vlsseg2_nxv8i32(i32* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma ; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1 ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret @@ -1538,7 +1538,7 @@ define @test_vlsseg2_nxv4i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -1571,7 +1571,7 @@ define @test_vlsseg3_nxv4i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -1605,7 +1605,7 @@ define @test_vlsseg4_nxv4i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -1640,7 +1640,7 @@ define @test_vlsseg5_nxv4i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -1676,7 +1676,7 @@ define @test_vlsseg6_nxv4i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -1713,7 +1713,7 @@ define @test_vlsseg7_nxv4i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -1751,7 +1751,7 @@ define @test_vlsseg8_nxv4i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -1790,7 +1790,7 @@ define @test_vlsseg2_nxv1i16(i16* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -1823,7 +1823,7 @@ define @test_vlsseg3_nxv1i16(i16* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -1857,7 +1857,7 @@ define @test_vlsseg4_nxv1i16(i16* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -1892,7 +1892,7 @@ define @test_vlsseg5_nxv1i16(i16* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -1928,7 +1928,7 @@ define @test_vlsseg6_nxv1i16(i16* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -1965,7 +1965,7 @@ define @test_vlsseg7_nxv1i16(i16* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -2003,7 +2003,7 @@ define @test_vlsseg8_nxv1i16(i16* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -2042,7 +2042,7 @@ define @test_vlsseg2_nxv32i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma ; CHECK-NEXT: vlsseg2e8.v v4, (a0), a1 ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret @@ -2075,7 +2075,7 @@ define @test_vlsseg2_nxv2i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -2108,7 +2108,7 @@ define @test_vlsseg3_nxv2i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -2142,7 +2142,7 @@ define @test_vlsseg4_nxv2i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -2177,7 +2177,7 @@ define @test_vlsseg5_nxv2i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -2213,7 +2213,7 @@ define @test_vlsseg6_nxv2i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -2250,7 +2250,7 @@ define @test_vlsseg7_nxv2i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -2288,7 +2288,7 @@ define @test_vlsseg8_nxv2i8(i8* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -2327,7 +2327,7 @@ define @test_vlsseg2_nxv2i16(i16* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -2360,7 +2360,7 @@ define @test_vlsseg3_nxv2i16(i16* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -2394,7 +2394,7 @@ define @test_vlsseg4_nxv2i16(i16* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -2429,7 +2429,7 @@ define @test_vlsseg5_nxv2i16(i16* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -2465,7 +2465,7 @@ define @test_vlsseg6_nxv2i16(i16* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -2502,7 +2502,7 @@ define @test_vlsseg7_nxv2i16(i16* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -2540,7 +2540,7 @@ define @test_vlsseg8_nxv2i16(i16* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -2579,7 +2579,7 @@ define @test_vlsseg2_nxv4i32(i32* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret @@ -2612,7 +2612,7 @@ define @test_vlsseg3_nxv4i32(i32* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret @@ -2646,7 +2646,7 @@ define @test_vlsseg4_nxv4i32(i32* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret @@ -2681,7 +2681,7 @@ define @test_vlsseg2_nxv16f16(half* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1 ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret @@ -2714,7 +2714,7 @@ define @test_vlsseg2_nxv4f64(double* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1 ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret @@ -2747,7 +2747,7 @@ define @test_vlsseg2_nxv1f64(double* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -2780,7 +2780,7 @@ define @test_vlsseg3_nxv1f64(double* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -2814,7 +2814,7 @@ define @test_vlsseg4_nxv1f64(double* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -2849,7 +2849,7 @@ define @test_vlsseg5_nxv1f64(double* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -2885,7 +2885,7 @@ define @test_vlsseg6_nxv1f64(double* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -2922,7 +2922,7 @@ define @test_vlsseg7_nxv1f64(double* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -2960,7 +2960,7 @@ define @test_vlsseg8_nxv1f64(double* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -2999,7 +2999,7 @@ define @test_vlsseg2_nxv2f32(float* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -3032,7 +3032,7 @@ define @test_vlsseg3_nxv2f32(float* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -3066,7 +3066,7 @@ define @test_vlsseg4_nxv2f32(float* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -3101,7 +3101,7 @@ define @test_vlsseg5_nxv2f32(float* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -3137,7 +3137,7 @@ define @test_vlsseg6_nxv2f32(float* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -3174,7 +3174,7 @@ define @test_vlsseg7_nxv2f32(float* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -3212,7 +3212,7 @@ define @test_vlsseg8_nxv2f32(float* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -3251,7 +3251,7 @@ define @test_vlsseg2_nxv1f16(half* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -3284,7 +3284,7 @@ define @test_vlsseg3_nxv1f16(half* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -3318,7 +3318,7 @@ define @test_vlsseg4_nxv1f16(half* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -3353,7 +3353,7 @@ define @test_vlsseg5_nxv1f16(half* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -3389,7 +3389,7 @@ define @test_vlsseg6_nxv1f16(half* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -3426,7 +3426,7 @@ define @test_vlsseg7_nxv1f16(half* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -3464,7 +3464,7 @@ define @test_vlsseg8_nxv1f16(half* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -3503,7 +3503,7 @@ define @test_vlsseg2_nxv1f32(float* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -3536,7 +3536,7 @@ define @test_vlsseg3_nxv1f32(float* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -3570,7 +3570,7 @@ define @test_vlsseg4_nxv1f32(float* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -3605,7 +3605,7 @@ define @test_vlsseg5_nxv1f32(float* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -3641,7 +3641,7 @@ define @test_vlsseg6_nxv1f32(float* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -3678,7 +3678,7 @@ define @test_vlsseg7_nxv1f32(float* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -3716,7 +3716,7 @@ define @test_vlsseg8_nxv1f32(float* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -3755,7 +3755,7 @@ define @test_vlsseg2_nxv8f16(half* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret @@ -3788,7 +3788,7 @@ define @test_vlsseg3_nxv8f16(half* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret @@ -3822,7 +3822,7 @@ define @test_vlsseg4_nxv8f16(half* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret @@ -3857,7 +3857,7 @@ define @test_vlsseg2_nxv8f32(float* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma ; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1 ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret @@ -3890,7 +3890,7 @@ define @test_vlsseg2_nxv2f64(double* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret @@ -3923,7 +3923,7 @@ define @test_vlsseg3_nxv2f64(double* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret @@ -3957,7 +3957,7 @@ define @test_vlsseg4_nxv2f64(double* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret @@ -3992,7 +3992,7 @@ define @test_vlsseg2_nxv4f16(half* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -4025,7 +4025,7 @@ define @test_vlsseg3_nxv4f16(half* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -4059,7 +4059,7 @@ define @test_vlsseg4_nxv4f16(half* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -4094,7 +4094,7 @@ define @test_vlsseg5_nxv4f16(half* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -4130,7 +4130,7 @@ define @test_vlsseg6_nxv4f16(half* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -4167,7 +4167,7 @@ define @test_vlsseg7_nxv4f16(half* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -4205,7 +4205,7 @@ define @test_vlsseg8_nxv4f16(half* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -4244,7 +4244,7 @@ define @test_vlsseg2_nxv2f16(half* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -4277,7 +4277,7 @@ define @test_vlsseg3_nxv2f16(half* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -4311,7 +4311,7 @@ define @test_vlsseg4_nxv2f16(half* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -4346,7 +4346,7 @@ define @test_vlsseg5_nxv2f16(half* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -4382,7 +4382,7 @@ define @test_vlsseg6_nxv2f16(half* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -4419,7 +4419,7 @@ define @test_vlsseg7_nxv2f16(half* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -4457,7 +4457,7 @@ define @test_vlsseg8_nxv2f16(half* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -4496,7 +4496,7 @@ define @test_vlsseg2_nxv4f32(float* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret @@ -4529,7 +4529,7 @@ define @test_vlsseg3_nxv4f32(float* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret @@ -4563,7 +4563,7 @@ define @test_vlsseg4_nxv4f32(float* %base, i32 %offset, i32 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv64.ll @@ -8,7 +8,7 @@ define @test_vlsseg2_nxv16i16(i16* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1 ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret @@ -41,7 +41,7 @@ define @test_vlsseg2_nxv4i32(i32* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret @@ -74,7 +74,7 @@ define @test_vlsseg3_nxv4i32(i32* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret @@ -108,7 +108,7 @@ define @test_vlsseg4_nxv4i32(i32* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret @@ -143,7 +143,7 @@ define @test_vlsseg2_nxv16i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vlsseg2e8.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret @@ -176,7 +176,7 @@ define @test_vlsseg3_nxv16i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vlsseg3e8.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret @@ -210,7 +210,7 @@ define @test_vlsseg4_nxv16i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vlsseg4e8.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret @@ -245,7 +245,7 @@ define @test_vlsseg2_nxv1i64(i64* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -278,7 +278,7 @@ define @test_vlsseg3_nxv1i64(i64* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -312,7 +312,7 @@ define @test_vlsseg4_nxv1i64(i64* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -347,7 +347,7 @@ define @test_vlsseg5_nxv1i64(i64* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -383,7 +383,7 @@ define @test_vlsseg6_nxv1i64(i64* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -420,7 +420,7 @@ define @test_vlsseg7_nxv1i64(i64* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -458,7 +458,7 @@ define @test_vlsseg8_nxv1i64(i64* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -497,7 +497,7 @@ define @test_vlsseg2_nxv1i32(i32* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -530,7 +530,7 @@ define @test_vlsseg3_nxv1i32(i32* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -564,7 +564,7 @@ define @test_vlsseg4_nxv1i32(i32* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -599,7 +599,7 @@ define @test_vlsseg5_nxv1i32(i32* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -635,7 +635,7 @@ define @test_vlsseg6_nxv1i32(i32* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -672,7 +672,7 @@ define @test_vlsseg7_nxv1i32(i32* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -710,7 +710,7 @@ define @test_vlsseg8_nxv1i32(i32* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -749,7 +749,7 @@ define @test_vlsseg2_nxv8i16(i16* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret @@ -782,7 +782,7 @@ define @test_vlsseg3_nxv8i16(i16* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret @@ -816,7 +816,7 @@ define @test_vlsseg4_nxv8i16(i16* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret @@ -851,7 +851,7 @@ define @test_vlsseg2_nxv4i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -884,7 +884,7 @@ define @test_vlsseg3_nxv4i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -918,7 +918,7 @@ define @test_vlsseg4_nxv4i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -953,7 +953,7 @@ define @test_vlsseg5_nxv4i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -989,7 +989,7 @@ define @test_vlsseg6_nxv4i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -1026,7 +1026,7 @@ define @test_vlsseg7_nxv4i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -1064,7 +1064,7 @@ define @test_vlsseg8_nxv4i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -1103,7 +1103,7 @@ define @test_vlsseg2_nxv1i16(i16* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -1136,7 +1136,7 @@ define @test_vlsseg3_nxv1i16(i16* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -1170,7 +1170,7 @@ define @test_vlsseg4_nxv1i16(i16* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -1205,7 +1205,7 @@ define @test_vlsseg5_nxv1i16(i16* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -1241,7 +1241,7 @@ define @test_vlsseg6_nxv1i16(i16* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -1278,7 +1278,7 @@ define @test_vlsseg7_nxv1i16(i16* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -1316,7 +1316,7 @@ define @test_vlsseg8_nxv1i16(i16* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -1355,7 +1355,7 @@ define @test_vlsseg2_nxv2i32(i32* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -1388,7 +1388,7 @@ define @test_vlsseg3_nxv2i32(i32* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -1422,7 +1422,7 @@ define @test_vlsseg4_nxv2i32(i32* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -1457,7 +1457,7 @@ define @test_vlsseg5_nxv2i32(i32* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -1493,7 +1493,7 @@ define @test_vlsseg6_nxv2i32(i32* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -1530,7 +1530,7 @@ define @test_vlsseg7_nxv2i32(i32* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -1568,7 +1568,7 @@ define @test_vlsseg8_nxv2i32(i32* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -1607,7 +1607,7 @@ define @test_vlsseg2_nxv8i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -1640,7 +1640,7 @@ define @test_vlsseg3_nxv8i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -1674,7 +1674,7 @@ define @test_vlsseg4_nxv8i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -1709,7 +1709,7 @@ define @test_vlsseg5_nxv8i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -1745,7 +1745,7 @@ define @test_vlsseg6_nxv8i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -1782,7 +1782,7 @@ define @test_vlsseg7_nxv8i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -1820,7 +1820,7 @@ define @test_vlsseg8_nxv8i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -1859,7 +1859,7 @@ define @test_vlsseg2_nxv4i64(i64* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1 ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret @@ -1892,7 +1892,7 @@ define @test_vlsseg2_nxv4i16(i16* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -1925,7 +1925,7 @@ define @test_vlsseg3_nxv4i16(i16* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -1959,7 +1959,7 @@ define @test_vlsseg4_nxv4i16(i16* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -1994,7 +1994,7 @@ define @test_vlsseg5_nxv4i16(i16* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -2030,7 +2030,7 @@ define @test_vlsseg6_nxv4i16(i16* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -2067,7 +2067,7 @@ define @test_vlsseg7_nxv4i16(i16* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -2105,7 +2105,7 @@ define @test_vlsseg8_nxv4i16(i16* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -2144,7 +2144,7 @@ define @test_vlsseg2_nxv1i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -2177,7 +2177,7 @@ define @test_vlsseg3_nxv1i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -2211,7 +2211,7 @@ define @test_vlsseg4_nxv1i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -2246,7 +2246,7 @@ define @test_vlsseg5_nxv1i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -2282,7 +2282,7 @@ define @test_vlsseg6_nxv1i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -2319,7 +2319,7 @@ define @test_vlsseg7_nxv1i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -2357,7 +2357,7 @@ define @test_vlsseg8_nxv1i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -2396,7 +2396,7 @@ define @test_vlsseg2_nxv2i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -2429,7 +2429,7 @@ define @test_vlsseg3_nxv2i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -2463,7 +2463,7 @@ define @test_vlsseg4_nxv2i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -2498,7 +2498,7 @@ define @test_vlsseg5_nxv2i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -2534,7 +2534,7 @@ define @test_vlsseg6_nxv2i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -2571,7 +2571,7 @@ define @test_vlsseg7_nxv2i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -2609,7 +2609,7 @@ define @test_vlsseg8_nxv2i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -2648,7 +2648,7 @@ define @test_vlsseg2_nxv8i32(i32* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma ; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1 ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret @@ -2681,7 +2681,7 @@ define @test_vlsseg2_nxv32i8(i8* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma ; CHECK-NEXT: vlsseg2e8.v v4, (a0), a1 ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret @@ -2714,7 +2714,7 @@ define @test_vlsseg2_nxv2i16(i16* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -2747,7 +2747,7 @@ define @test_vlsseg3_nxv2i16(i16* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -2781,7 +2781,7 @@ define @test_vlsseg4_nxv2i16(i16* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -2816,7 +2816,7 @@ define @test_vlsseg5_nxv2i16(i16* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -2852,7 +2852,7 @@ define @test_vlsseg6_nxv2i16(i16* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -2889,7 +2889,7 @@ define @test_vlsseg7_nxv2i16(i16* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -2927,7 +2927,7 @@ define @test_vlsseg8_nxv2i16(i16* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -2966,7 +2966,7 @@ define @test_vlsseg2_nxv2i64(i64* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret @@ -2999,7 +2999,7 @@ define @test_vlsseg3_nxv2i64(i64* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret @@ -3033,7 +3033,7 @@ define @test_vlsseg4_nxv2i64(i64* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret @@ -3068,7 +3068,7 @@ define @test_vlsseg2_nxv16f16(half* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1 ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret @@ -3101,7 +3101,7 @@ define @test_vlsseg2_nxv4f64(double* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1 ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret @@ -3134,7 +3134,7 @@ define @test_vlsseg2_nxv1f64(double* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -3167,7 +3167,7 @@ define @test_vlsseg3_nxv1f64(double* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -3201,7 +3201,7 @@ define @test_vlsseg4_nxv1f64(double* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -3236,7 +3236,7 @@ define @test_vlsseg5_nxv1f64(double* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -3272,7 +3272,7 @@ define @test_vlsseg6_nxv1f64(double* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -3309,7 +3309,7 @@ define @test_vlsseg7_nxv1f64(double* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -3347,7 +3347,7 @@ define @test_vlsseg8_nxv1f64(double* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -3386,7 +3386,7 @@ define @test_vlsseg2_nxv2f32(float* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -3419,7 +3419,7 @@ define @test_vlsseg3_nxv2f32(float* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -3453,7 +3453,7 @@ define @test_vlsseg4_nxv2f32(float* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -3488,7 +3488,7 @@ define @test_vlsseg5_nxv2f32(float* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -3524,7 +3524,7 @@ define @test_vlsseg6_nxv2f32(float* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -3561,7 +3561,7 @@ define @test_vlsseg7_nxv2f32(float* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -3599,7 +3599,7 @@ define @test_vlsseg8_nxv2f32(float* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -3638,7 +3638,7 @@ define @test_vlsseg2_nxv1f16(half* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -3671,7 +3671,7 @@ define @test_vlsseg3_nxv1f16(half* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -3705,7 +3705,7 @@ define @test_vlsseg4_nxv1f16(half* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -3740,7 +3740,7 @@ define @test_vlsseg5_nxv1f16(half* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -3776,7 +3776,7 @@ define @test_vlsseg6_nxv1f16(half* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -3813,7 +3813,7 @@ define @test_vlsseg7_nxv1f16(half* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -3851,7 +3851,7 @@ define @test_vlsseg8_nxv1f16(half* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -3890,7 +3890,7 @@ define @test_vlsseg2_nxv1f32(float* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -3923,7 +3923,7 @@ define @test_vlsseg3_nxv1f32(float* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -3957,7 +3957,7 @@ define @test_vlsseg4_nxv1f32(float* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -3992,7 +3992,7 @@ define @test_vlsseg5_nxv1f32(float* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -4028,7 +4028,7 @@ define @test_vlsseg6_nxv1f32(float* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -4065,7 +4065,7 @@ define @test_vlsseg7_nxv1f32(float* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -4103,7 +4103,7 @@ define @test_vlsseg8_nxv1f32(float* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -4142,7 +4142,7 @@ define @test_vlsseg2_nxv8f16(half* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret @@ -4175,7 +4175,7 @@ define @test_vlsseg3_nxv8f16(half* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret @@ -4209,7 +4209,7 @@ define @test_vlsseg4_nxv8f16(half* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret @@ -4244,7 +4244,7 @@ define @test_vlsseg2_nxv8f32(float* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma ; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1 ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 ; CHECK-NEXT: ret @@ -4277,7 +4277,7 @@ define @test_vlsseg2_nxv2f64(double* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret @@ -4310,7 +4310,7 @@ define @test_vlsseg3_nxv2f64(double* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret @@ -4344,7 +4344,7 @@ define @test_vlsseg4_nxv2f64(double* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret @@ -4379,7 +4379,7 @@ define @test_vlsseg2_nxv4f16(half* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -4412,7 +4412,7 @@ define @test_vlsseg3_nxv4f16(half* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -4446,7 +4446,7 @@ define @test_vlsseg4_nxv4f16(half* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -4481,7 +4481,7 @@ define @test_vlsseg5_nxv4f16(half* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -4517,7 +4517,7 @@ define @test_vlsseg6_nxv4f16(half* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -4554,7 +4554,7 @@ define @test_vlsseg7_nxv4f16(half* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -4592,7 +4592,7 @@ define @test_vlsseg8_nxv4f16(half* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -4631,7 +4631,7 @@ define @test_vlsseg2_nxv2f16(half* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 ; CHECK-NEXT: ret @@ -4664,7 +4664,7 @@ define @test_vlsseg3_nxv2f16(half* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 ; CHECK-NEXT: ret @@ -4698,7 +4698,7 @@ define @test_vlsseg4_nxv2f16(half* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 ; CHECK-NEXT: ret @@ -4733,7 +4733,7 @@ define @test_vlsseg5_nxv2f16(half* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg5_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 ; CHECK-NEXT: ret @@ -4769,7 +4769,7 @@ define @test_vlsseg6_nxv2f16(half* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg6_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 ; CHECK-NEXT: ret @@ -4806,7 +4806,7 @@ define @test_vlsseg7_nxv2f16(half* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg7_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 ; CHECK-NEXT: ret @@ -4844,7 +4844,7 @@ define @test_vlsseg8_nxv2f16(half* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg8_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 ; CHECK-NEXT: ret @@ -4883,7 +4883,7 @@ define @test_vlsseg2_nxv4f32(float* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg2_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 ; CHECK-NEXT: ret @@ -4916,7 +4916,7 @@ define @test_vlsseg3_nxv4f32(float* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg3_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 ; CHECK-NEXT: ret @@ -4950,7 +4950,7 @@ define @test_vlsseg4_nxv4f32(float* %base, i64 %offset, i64 %vl) { ; CHECK-LABEL: test_vlsseg4_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1 ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll @@ -13,7 +13,7 @@ define @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -61,7 +61,7 @@ define @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -109,7 +109,7 @@ define @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxei64.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v12 ; CHECK-NEXT: ret @@ -157,7 +157,7 @@ define @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxei64.v v16, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -205,7 +205,7 @@ define @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -253,7 +253,7 @@ define @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -301,7 +301,7 @@ define @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxei64.v v12, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -349,7 +349,7 @@ define @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vluxei64.v v16, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -397,7 +397,7 @@ define @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -445,7 +445,7 @@ define @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxei64.v v10, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -493,7 +493,7 @@ define @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vluxei64.v v12, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -541,7 +541,7 @@ define @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vluxei64.v v16, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -589,7 +589,7 @@ define @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxei64.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -636,7 +636,7 @@ define @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vluxei64.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -683,7 +683,7 @@ define @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vluxei64.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -730,7 +730,7 @@ define @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vluxei64.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -777,7 +777,7 @@ define @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -825,7 +825,7 @@ define @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -873,7 +873,7 @@ define @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxei64.v v12, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -921,7 +921,7 @@ define @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vluxei64.v v16, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -969,7 +969,7 @@ define @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1017,7 +1017,7 @@ define @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxei64.v v10, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1065,7 +1065,7 @@ define @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vluxei64.v v12, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1113,7 +1113,7 @@ define @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vluxei64.v v16, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1161,7 +1161,7 @@ define @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxei64.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -1208,7 +1208,7 @@ define @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vluxei64.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -1255,7 +1255,7 @@ define @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vluxei64.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -1302,7 +1302,7 @@ define @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i64(* %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vluxei64.v v8, (a0), v8 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vluxei.ll b/llvm/test/CodeGen/RISCV/rvv/vluxei.ll --- a/llvm/test/CodeGen/RISCV/rvv/vluxei.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vluxei.ll @@ -13,7 +13,7 @@ define @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -61,7 +61,7 @@ define @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -109,7 +109,7 @@ define @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -157,7 +157,7 @@ define @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxei32.v v12, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -205,7 +205,7 @@ define @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vluxei32.v v16, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -253,7 +253,7 @@ define @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -301,7 +301,7 @@ define @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -349,7 +349,7 @@ define @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxei32.v v10, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -397,7 +397,7 @@ define @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vluxei32.v v12, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -445,7 +445,7 @@ define @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vluxei32.v v16, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -493,7 +493,7 @@ define @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxei32.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -540,7 +540,7 @@ define @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxei32.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -587,7 +587,7 @@ define @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vluxei32.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -634,7 +634,7 @@ define @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vluxei32.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -681,7 +681,7 @@ define @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vluxei32.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -728,7 +728,7 @@ define @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxei32.v v9, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -776,7 +776,7 @@ define @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vluxei32.v v10, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -824,7 +824,7 @@ define @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vluxei32.v v12, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -872,7 +872,7 @@ define @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vluxei32.v v16, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -920,7 +920,7 @@ define @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -968,7 +968,7 @@ define @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1016,7 +1016,7 @@ define @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxei32.v v10, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1064,7 +1064,7 @@ define @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vluxei32.v v12, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1112,7 +1112,7 @@ define @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vluxei32.v v16, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1160,7 +1160,7 @@ define @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxei32.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -1207,7 +1207,7 @@ define @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxei32.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -1254,7 +1254,7 @@ define @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vluxei32.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -1301,7 +1301,7 @@ define @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vluxei32.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -1348,7 +1348,7 @@ define @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vluxei32.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -1395,7 +1395,7 @@ define @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxei32.v v9, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -1443,7 +1443,7 @@ define @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vluxei32.v v10, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1491,7 +1491,7 @@ define @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vluxei32.v v12, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1539,7 +1539,7 @@ define @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i32(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vluxei32.v v16, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1587,7 +1587,7 @@ define @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1635,7 +1635,7 @@ define @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1683,7 +1683,7 @@ define @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1731,7 +1731,7 @@ define @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxei16.v v10, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1779,7 +1779,7 @@ define @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vluxei16.v v12, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1827,7 +1827,7 @@ define @intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vluxei16.v v16, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1875,7 +1875,7 @@ define @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxei16.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -1922,7 +1922,7 @@ define @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxei16.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -1969,7 +1969,7 @@ define @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxei16.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -2016,7 +2016,7 @@ define @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vluxei16.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -2063,7 +2063,7 @@ define @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vluxei16.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -2110,7 +2110,7 @@ define @intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vluxei16.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -2157,7 +2157,7 @@ define @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -2205,7 +2205,7 @@ define @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxei16.v v9, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -2253,7 +2253,7 @@ define @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vluxei16.v v10, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -2301,7 +2301,7 @@ define @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vluxei16.v v12, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -2349,7 +2349,7 @@ define @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vluxei16.v v16, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -2397,7 +2397,7 @@ define @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxei16.v v9, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -2445,7 +2445,7 @@ define @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vluxei16.v v10, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -2493,7 +2493,7 @@ define @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vluxei16.v v12, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -2541,7 +2541,7 @@ define @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vluxei16.v v16, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -2589,7 +2589,7 @@ define @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxei16.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -2636,7 +2636,7 @@ define @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxei16.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -2683,7 +2683,7 @@ define @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxei16.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -2730,7 +2730,7 @@ define @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vluxei16.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -2777,7 +2777,7 @@ define @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vluxei16.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -2824,7 +2824,7 @@ define @intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vluxei16.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -2871,7 +2871,7 @@ define @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -2919,7 +2919,7 @@ define @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxei16.v v9, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -2967,7 +2967,7 @@ define @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vluxei16.v v10, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -3015,7 +3015,7 @@ define @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vluxei16.v v12, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -3063,7 +3063,7 @@ define @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vluxei16.v v16, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -3111,7 +3111,7 @@ define @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxei16.v v9, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -3159,7 +3159,7 @@ define @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vluxei16.v v10, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -3207,7 +3207,7 @@ define @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vluxei16.v v12, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -3255,7 +3255,7 @@ define @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i16(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vluxei16.v v16, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -3303,7 +3303,7 @@ define @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxei8.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -3350,7 +3350,7 @@ define @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxei8.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -3397,7 +3397,7 @@ define @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxei8.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -3444,7 +3444,7 @@ define @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxei8.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -3491,7 +3491,7 @@ define @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vluxei8.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -3538,7 +3538,7 @@ define @intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vluxei8.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -3585,7 +3585,7 @@ define @intrinsic_vluxei_v_nxv64i8_nxv64i8_nxv64i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vluxei8.v v8, (a0), v8 ; CHECK-NEXT: ret entry: @@ -3632,7 +3632,7 @@ define @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -3680,7 +3680,7 @@ define @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -3728,7 +3728,7 @@ define @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxei8.v v9, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -3776,7 +3776,7 @@ define @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vluxei8.v v10, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -3824,7 +3824,7 @@ define @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vluxei8.v v12, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -3872,7 +3872,7 @@ define @intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vluxei8.v v16, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -3920,7 +3920,7 @@ define @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -3968,7 +3968,7 @@ define @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxei8.v v9, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -4016,7 +4016,7 @@ define @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vluxei8.v v10, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -4064,7 +4064,7 @@ define @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vluxei8.v v12, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -4112,7 +4112,7 @@ define @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vluxei8.v v16, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -4160,7 +4160,7 @@ define @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxei8.v v9, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -4208,7 +4208,7 @@ define @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vluxei8.v v10, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -4256,7 +4256,7 @@ define @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vluxei8.v v12, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -4304,7 +4304,7 @@ define @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vluxei8.v v16, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -4352,7 +4352,7 @@ define @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -4400,7 +4400,7 @@ define @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -4448,7 +4448,7 @@ define @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxei8.v v9, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -4496,7 +4496,7 @@ define @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vluxei8.v v10, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -4544,7 +4544,7 @@ define @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vluxei8.v v12, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -4592,7 +4592,7 @@ define @intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vluxei8.v v16, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -4640,7 +4640,7 @@ define @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -4688,7 +4688,7 @@ define @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxei8.v v9, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -4736,7 +4736,7 @@ define @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vluxei8.v v10, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -4784,7 +4784,7 @@ define @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vluxei8.v v12, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -4832,7 +4832,7 @@ define @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vluxei8.v v16, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -4880,7 +4880,7 @@ define @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxei8.v v9, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -4928,7 +4928,7 @@ define @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vluxei8.v v10, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -4976,7 +4976,7 @@ define @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vluxei8.v v12, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -5024,7 +5024,7 @@ define @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i8(* %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vluxei8.v v16, (a0), v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv32.ll @@ -8,7 +8,7 @@ define @test_vluxseg2_nxv16i16_nxv16i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -38,7 +38,7 @@ define @test_vluxseg2_nxv16i16_nxv16i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -68,7 +68,7 @@ define @test_vluxseg2_nxv16i16_nxv16i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v16, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret @@ -98,7 +98,7 @@ define @test_vluxseg2_nxv1i8_nxv1i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -128,7 +128,7 @@ define @test_vluxseg2_nxv1i8_nxv1i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -158,7 +158,7 @@ define @test_vluxseg2_nxv1i8_nxv1i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -188,7 +188,7 @@ define @test_vluxseg3_nxv1i8_nxv1i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -220,7 +220,7 @@ define @test_vluxseg3_nxv1i8_nxv1i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -252,7 +252,7 @@ define @test_vluxseg3_nxv1i8_nxv1i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -284,7 +284,7 @@ define @test_vluxseg4_nxv1i8_nxv1i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -317,7 +317,7 @@ define @test_vluxseg4_nxv1i8_nxv1i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -350,7 +350,7 @@ define @test_vluxseg4_nxv1i8_nxv1i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -383,7 +383,7 @@ define @test_vluxseg5_nxv1i8_nxv1i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -417,7 +417,7 @@ define @test_vluxseg5_nxv1i8_nxv1i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -451,7 +451,7 @@ define @test_vluxseg5_nxv1i8_nxv1i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -485,7 +485,7 @@ define @test_vluxseg6_nxv1i8_nxv1i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -520,7 +520,7 @@ define @test_vluxseg6_nxv1i8_nxv1i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -555,7 +555,7 @@ define @test_vluxseg6_nxv1i8_nxv1i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -590,7 +590,7 @@ define @test_vluxseg7_nxv1i8_nxv1i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -626,7 +626,7 @@ define @test_vluxseg7_nxv1i8_nxv1i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -662,7 +662,7 @@ define @test_vluxseg7_nxv1i8_nxv1i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -698,7 +698,7 @@ define @test_vluxseg8_nxv1i8_nxv1i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -735,7 +735,7 @@ define @test_vluxseg8_nxv1i8_nxv1i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -772,7 +772,7 @@ define @test_vluxseg8_nxv1i8_nxv1i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -809,7 +809,7 @@ define @test_vluxseg2_nxv16i8_nxv16i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret @@ -839,7 +839,7 @@ define @test_vluxseg2_nxv16i8_nxv16i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -869,7 +869,7 @@ define @test_vluxseg2_nxv16i8_nxv16i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v16, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret @@ -899,7 +899,7 @@ define @test_vluxseg3_nxv16i8_nxv16i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vluxseg3ei16.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret @@ -930,7 +930,7 @@ define @test_vluxseg3_nxv16i8_nxv16i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -962,7 +962,7 @@ define @test_vluxseg3_nxv16i8_nxv16i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vluxseg3ei32.v v16, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret @@ -993,7 +993,7 @@ define @test_vluxseg4_nxv16i8_nxv16i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret @@ -1026,7 +1026,7 @@ define @test_vluxseg4_nxv16i8_nxv16i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -1059,7 +1059,7 @@ define @test_vluxseg4_nxv16i8_nxv16i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vluxseg4ei32.v v16, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret @@ -1091,7 +1091,7 @@ define @test_vluxseg2_nxv2i32_nxv2i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1121,7 +1121,7 @@ define @test_vluxseg2_nxv2i32_nxv2i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1151,7 +1151,7 @@ define @test_vluxseg2_nxv2i32_nxv2i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1181,7 +1181,7 @@ define @test_vluxseg3_nxv2i32_nxv2i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1213,7 +1213,7 @@ define @test_vluxseg3_nxv2i32_nxv2i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1245,7 +1245,7 @@ define @test_vluxseg3_nxv2i32_nxv2i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1277,7 +1277,7 @@ define @test_vluxseg4_nxv2i32_nxv2i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1310,7 +1310,7 @@ define @test_vluxseg4_nxv2i32_nxv2i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1343,7 +1343,7 @@ define @test_vluxseg4_nxv2i32_nxv2i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1376,7 +1376,7 @@ define @test_vluxseg5_nxv2i32_nxv2i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1410,7 +1410,7 @@ define @test_vluxseg5_nxv2i32_nxv2i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1444,7 +1444,7 @@ define @test_vluxseg5_nxv2i32_nxv2i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1478,7 +1478,7 @@ define @test_vluxseg6_nxv2i32_nxv2i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1513,7 +1513,7 @@ define @test_vluxseg6_nxv2i32_nxv2i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1548,7 +1548,7 @@ define @test_vluxseg6_nxv2i32_nxv2i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1583,7 +1583,7 @@ define @test_vluxseg7_nxv2i32_nxv2i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1619,7 +1619,7 @@ define @test_vluxseg7_nxv2i32_nxv2i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1655,7 +1655,7 @@ define @test_vluxseg7_nxv2i32_nxv2i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1691,7 +1691,7 @@ define @test_vluxseg8_nxv2i32_nxv2i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1728,7 +1728,7 @@ define @test_vluxseg8_nxv2i32_nxv2i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1765,7 +1765,7 @@ define @test_vluxseg8_nxv2i32_nxv2i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1802,7 +1802,7 @@ define @test_vluxseg2_nxv4i16_nxv4i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1832,7 +1832,7 @@ define @test_vluxseg2_nxv4i16_nxv4i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1862,7 +1862,7 @@ define @test_vluxseg2_nxv4i16_nxv4i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -1892,7 +1892,7 @@ define @test_vluxseg3_nxv4i16_nxv4i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1924,7 +1924,7 @@ define @test_vluxseg3_nxv4i16_nxv4i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1956,7 +1956,7 @@ define @test_vluxseg3_nxv4i16_nxv4i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -1987,7 +1987,7 @@ define @test_vluxseg4_nxv4i16_nxv4i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2020,7 +2020,7 @@ define @test_vluxseg4_nxv4i16_nxv4i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2053,7 +2053,7 @@ define @test_vluxseg4_nxv4i16_nxv4i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -2086,7 +2086,7 @@ define @test_vluxseg5_nxv4i16_nxv4i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2120,7 +2120,7 @@ define @test_vluxseg5_nxv4i16_nxv4i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2154,7 +2154,7 @@ define @test_vluxseg5_nxv4i16_nxv4i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -2188,7 +2188,7 @@ define @test_vluxseg6_nxv4i16_nxv4i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2223,7 +2223,7 @@ define @test_vluxseg6_nxv4i16_nxv4i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2258,7 +2258,7 @@ define @test_vluxseg6_nxv4i16_nxv4i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -2293,7 +2293,7 @@ define @test_vluxseg7_nxv4i16_nxv4i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2329,7 +2329,7 @@ define @test_vluxseg7_nxv4i16_nxv4i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2365,7 +2365,7 @@ define @test_vluxseg7_nxv4i16_nxv4i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -2401,7 +2401,7 @@ define @test_vluxseg8_nxv4i16_nxv4i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2438,7 +2438,7 @@ define @test_vluxseg8_nxv4i16_nxv4i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2475,7 +2475,7 @@ define @test_vluxseg8_nxv4i16_nxv4i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -2512,7 +2512,7 @@ define @test_vluxseg2_nxv1i32_nxv1i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2542,7 +2542,7 @@ define @test_vluxseg2_nxv1i32_nxv1i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2572,7 +2572,7 @@ define @test_vluxseg2_nxv1i32_nxv1i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2602,7 +2602,7 @@ define @test_vluxseg3_nxv1i32_nxv1i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2634,7 +2634,7 @@ define @test_vluxseg3_nxv1i32_nxv1i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2666,7 +2666,7 @@ define @test_vluxseg3_nxv1i32_nxv1i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2698,7 +2698,7 @@ define @test_vluxseg4_nxv1i32_nxv1i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2731,7 +2731,7 @@ define @test_vluxseg4_nxv1i32_nxv1i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2764,7 +2764,7 @@ define @test_vluxseg4_nxv1i32_nxv1i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2797,7 +2797,7 @@ define @test_vluxseg5_nxv1i32_nxv1i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2831,7 +2831,7 @@ define @test_vluxseg5_nxv1i32_nxv1i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2865,7 +2865,7 @@ define @test_vluxseg5_nxv1i32_nxv1i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2899,7 +2899,7 @@ define @test_vluxseg6_nxv1i32_nxv1i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2934,7 +2934,7 @@ define @test_vluxseg6_nxv1i32_nxv1i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2969,7 +2969,7 @@ define @test_vluxseg6_nxv1i32_nxv1i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3004,7 +3004,7 @@ define @test_vluxseg7_nxv1i32_nxv1i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3040,7 +3040,7 @@ define @test_vluxseg7_nxv1i32_nxv1i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3076,7 +3076,7 @@ define @test_vluxseg7_nxv1i32_nxv1i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3112,7 +3112,7 @@ define @test_vluxseg8_nxv1i32_nxv1i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3149,7 +3149,7 @@ define @test_vluxseg8_nxv1i32_nxv1i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3186,7 +3186,7 @@ define @test_vluxseg8_nxv1i32_nxv1i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3223,7 +3223,7 @@ define @test_vluxseg2_nxv8i16_nxv8i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -3253,7 +3253,7 @@ define @test_vluxseg2_nxv8i16_nxv8i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -3283,7 +3283,7 @@ define @test_vluxseg2_nxv8i16_nxv8i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret @@ -3313,7 +3313,7 @@ define @test_vluxseg3_nxv8i16_nxv8i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -3345,7 +3345,7 @@ define @test_vluxseg3_nxv8i16_nxv8i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -3377,7 +3377,7 @@ define @test_vluxseg3_nxv8i16_nxv8i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret @@ -3408,7 +3408,7 @@ define @test_vluxseg4_nxv8i16_nxv8i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -3441,7 +3441,7 @@ define @test_vluxseg4_nxv8i16_nxv8i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -3474,7 +3474,7 @@ define @test_vluxseg4_nxv8i16_nxv8i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret @@ -3507,7 +3507,7 @@ define @test_vluxseg2_nxv8i8_nxv8i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -3537,7 +3537,7 @@ define @test_vluxseg2_nxv8i8_nxv8i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3567,7 +3567,7 @@ define @test_vluxseg2_nxv8i8_nxv8i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -3597,7 +3597,7 @@ define @test_vluxseg3_nxv8i8_nxv8i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -3628,7 +3628,7 @@ define @test_vluxseg3_nxv8i8_nxv8i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3660,7 +3660,7 @@ define @test_vluxseg3_nxv8i8_nxv8i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -3691,7 +3691,7 @@ define @test_vluxseg4_nxv8i8_nxv8i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -3724,7 +3724,7 @@ define @test_vluxseg4_nxv8i8_nxv8i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3757,7 +3757,7 @@ define @test_vluxseg4_nxv8i8_nxv8i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -3789,7 +3789,7 @@ define @test_vluxseg5_nxv8i8_nxv8i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -3823,7 +3823,7 @@ define @test_vluxseg5_nxv8i8_nxv8i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3857,7 +3857,7 @@ define @test_vluxseg5_nxv8i8_nxv8i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxseg5ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -3890,7 +3890,7 @@ define @test_vluxseg6_nxv8i8_nxv8i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -3925,7 +3925,7 @@ define @test_vluxseg6_nxv8i8_nxv8i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3960,7 +3960,7 @@ define @test_vluxseg6_nxv8i8_nxv8i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxseg6ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -3995,7 +3995,7 @@ define @test_vluxseg7_nxv8i8_nxv8i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -4031,7 +4031,7 @@ define @test_vluxseg7_nxv8i8_nxv8i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4067,7 +4067,7 @@ define @test_vluxseg7_nxv8i8_nxv8i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxseg7ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -4103,7 +4103,7 @@ define @test_vluxseg8_nxv8i8_nxv8i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -4140,7 +4140,7 @@ define @test_vluxseg8_nxv8i8_nxv8i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4177,7 +4177,7 @@ define @test_vluxseg8_nxv8i8_nxv8i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxseg8ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -4214,7 +4214,7 @@ define @test_vluxseg2_nxv8i32_nxv8i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -4244,7 +4244,7 @@ define @test_vluxseg2_nxv8i32_nxv8i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -4274,7 +4274,7 @@ define @test_vluxseg2_nxv8i32_nxv8i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -4304,7 +4304,7 @@ define @test_vluxseg2_nxv4i8_nxv4i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4334,7 +4334,7 @@ define @test_vluxseg2_nxv4i8_nxv4i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4364,7 +4364,7 @@ define @test_vluxseg2_nxv4i8_nxv4i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -4394,7 +4394,7 @@ define @test_vluxseg3_nxv4i8_nxv4i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4426,7 +4426,7 @@ define @test_vluxseg3_nxv4i8_nxv4i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4458,7 +4458,7 @@ define @test_vluxseg3_nxv4i8_nxv4i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -4489,7 +4489,7 @@ define @test_vluxseg4_nxv4i8_nxv4i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4522,7 +4522,7 @@ define @test_vluxseg4_nxv4i8_nxv4i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4555,7 +4555,7 @@ define @test_vluxseg4_nxv4i8_nxv4i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -4588,7 +4588,7 @@ define @test_vluxseg5_nxv4i8_nxv4i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4622,7 +4622,7 @@ define @test_vluxseg5_nxv4i8_nxv4i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4656,7 +4656,7 @@ define @test_vluxseg5_nxv4i8_nxv4i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -4690,7 +4690,7 @@ define @test_vluxseg6_nxv4i8_nxv4i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4725,7 +4725,7 @@ define @test_vluxseg6_nxv4i8_nxv4i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4760,7 +4760,7 @@ define @test_vluxseg6_nxv4i8_nxv4i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -4795,7 +4795,7 @@ define @test_vluxseg7_nxv4i8_nxv4i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4831,7 +4831,7 @@ define @test_vluxseg7_nxv4i8_nxv4i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4867,7 +4867,7 @@ define @test_vluxseg7_nxv4i8_nxv4i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -4903,7 +4903,7 @@ define @test_vluxseg8_nxv4i8_nxv4i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4940,7 +4940,7 @@ define @test_vluxseg8_nxv4i8_nxv4i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4977,7 +4977,7 @@ define @test_vluxseg8_nxv4i8_nxv4i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -5014,7 +5014,7 @@ define @test_vluxseg2_nxv1i16_nxv1i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5044,7 +5044,7 @@ define @test_vluxseg2_nxv1i16_nxv1i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5074,7 +5074,7 @@ define @test_vluxseg2_nxv1i16_nxv1i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5104,7 +5104,7 @@ define @test_vluxseg3_nxv1i16_nxv1i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5136,7 +5136,7 @@ define @test_vluxseg3_nxv1i16_nxv1i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5168,7 +5168,7 @@ define @test_vluxseg3_nxv1i16_nxv1i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5200,7 +5200,7 @@ define @test_vluxseg4_nxv1i16_nxv1i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5233,7 +5233,7 @@ define @test_vluxseg4_nxv1i16_nxv1i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5266,7 +5266,7 @@ define @test_vluxseg4_nxv1i16_nxv1i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5299,7 +5299,7 @@ define @test_vluxseg5_nxv1i16_nxv1i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5333,7 +5333,7 @@ define @test_vluxseg5_nxv1i16_nxv1i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5367,7 +5367,7 @@ define @test_vluxseg5_nxv1i16_nxv1i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5401,7 +5401,7 @@ define @test_vluxseg6_nxv1i16_nxv1i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5436,7 +5436,7 @@ define @test_vluxseg6_nxv1i16_nxv1i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5471,7 +5471,7 @@ define @test_vluxseg6_nxv1i16_nxv1i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5506,7 +5506,7 @@ define @test_vluxseg7_nxv1i16_nxv1i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5542,7 +5542,7 @@ define @test_vluxseg7_nxv1i16_nxv1i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5578,7 +5578,7 @@ define @test_vluxseg7_nxv1i16_nxv1i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5614,7 +5614,7 @@ define @test_vluxseg8_nxv1i16_nxv1i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5651,7 +5651,7 @@ define @test_vluxseg8_nxv1i16_nxv1i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5688,7 +5688,7 @@ define @test_vluxseg8_nxv1i16_nxv1i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5725,7 +5725,7 @@ define @test_vluxseg2_nxv32i8_nxv32i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v16, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret @@ -5755,7 +5755,7 @@ define @test_vluxseg2_nxv32i8_nxv32i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -5785,7 +5785,7 @@ define @test_vluxseg2_nxv2i8_nxv2i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5815,7 +5815,7 @@ define @test_vluxseg2_nxv2i8_nxv2i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5845,7 +5845,7 @@ define @test_vluxseg2_nxv2i8_nxv2i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5875,7 +5875,7 @@ define @test_vluxseg3_nxv2i8_nxv2i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5907,7 +5907,7 @@ define @test_vluxseg3_nxv2i8_nxv2i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5939,7 +5939,7 @@ define @test_vluxseg3_nxv2i8_nxv2i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5971,7 +5971,7 @@ define @test_vluxseg4_nxv2i8_nxv2i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6004,7 +6004,7 @@ define @test_vluxseg4_nxv2i8_nxv2i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6037,7 +6037,7 @@ define @test_vluxseg4_nxv2i8_nxv2i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6070,7 +6070,7 @@ define @test_vluxseg5_nxv2i8_nxv2i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6104,7 +6104,7 @@ define @test_vluxseg5_nxv2i8_nxv2i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6138,7 +6138,7 @@ define @test_vluxseg5_nxv2i8_nxv2i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6172,7 +6172,7 @@ define @test_vluxseg6_nxv2i8_nxv2i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6207,7 +6207,7 @@ define @test_vluxseg6_nxv2i8_nxv2i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6242,7 +6242,7 @@ define @test_vluxseg6_nxv2i8_nxv2i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6277,7 +6277,7 @@ define @test_vluxseg7_nxv2i8_nxv2i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6313,7 +6313,7 @@ define @test_vluxseg7_nxv2i8_nxv2i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6349,7 +6349,7 @@ define @test_vluxseg7_nxv2i8_nxv2i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6385,7 +6385,7 @@ define @test_vluxseg8_nxv2i8_nxv2i32(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6422,7 +6422,7 @@ define @test_vluxseg8_nxv2i8_nxv2i8(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6459,7 +6459,7 @@ define @test_vluxseg8_nxv2i8_nxv2i16(i8* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6496,7 +6496,7 @@ define @test_vluxseg2_nxv2i16_nxv2i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6526,7 +6526,7 @@ define @test_vluxseg2_nxv2i16_nxv2i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6556,7 +6556,7 @@ define @test_vluxseg2_nxv2i16_nxv2i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6586,7 +6586,7 @@ define @test_vluxseg3_nxv2i16_nxv2i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6618,7 +6618,7 @@ define @test_vluxseg3_nxv2i16_nxv2i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6650,7 +6650,7 @@ define @test_vluxseg3_nxv2i16_nxv2i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6682,7 +6682,7 @@ define @test_vluxseg4_nxv2i16_nxv2i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6715,7 +6715,7 @@ define @test_vluxseg4_nxv2i16_nxv2i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6748,7 +6748,7 @@ define @test_vluxseg4_nxv2i16_nxv2i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6781,7 +6781,7 @@ define @test_vluxseg5_nxv2i16_nxv2i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6815,7 +6815,7 @@ define @test_vluxseg5_nxv2i16_nxv2i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6849,7 +6849,7 @@ define @test_vluxseg5_nxv2i16_nxv2i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6883,7 +6883,7 @@ define @test_vluxseg6_nxv2i16_nxv2i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6918,7 +6918,7 @@ define @test_vluxseg6_nxv2i16_nxv2i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6953,7 +6953,7 @@ define @test_vluxseg6_nxv2i16_nxv2i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6988,7 +6988,7 @@ define @test_vluxseg7_nxv2i16_nxv2i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7024,7 +7024,7 @@ define @test_vluxseg7_nxv2i16_nxv2i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7060,7 +7060,7 @@ define @test_vluxseg7_nxv2i16_nxv2i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7096,7 +7096,7 @@ define @test_vluxseg8_nxv2i16_nxv2i32(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7133,7 +7133,7 @@ define @test_vluxseg8_nxv2i16_nxv2i8(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7170,7 +7170,7 @@ define @test_vluxseg8_nxv2i16_nxv2i16(i16* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7207,7 +7207,7 @@ define @test_vluxseg2_nxv4i32_nxv4i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -7237,7 +7237,7 @@ define @test_vluxseg2_nxv4i32_nxv4i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -7267,7 +7267,7 @@ define @test_vluxseg2_nxv4i32_nxv4i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -7297,7 +7297,7 @@ define @test_vluxseg3_nxv4i32_nxv4i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -7329,7 +7329,7 @@ define @test_vluxseg3_nxv4i32_nxv4i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -7361,7 +7361,7 @@ define @test_vluxseg3_nxv4i32_nxv4i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -7393,7 +7393,7 @@ define @test_vluxseg4_nxv4i32_nxv4i16(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -7426,7 +7426,7 @@ define @test_vluxseg4_nxv4i32_nxv4i8(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -7459,7 +7459,7 @@ define @test_vluxseg4_nxv4i32_nxv4i32(i32* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -7492,7 +7492,7 @@ define @test_vluxseg2_nxv16f16_nxv16i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -7522,7 +7522,7 @@ define @test_vluxseg2_nxv16f16_nxv16i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -7552,7 +7552,7 @@ define @test_vluxseg2_nxv16f16_nxv16i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v16, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret @@ -7582,7 +7582,7 @@ define @test_vluxseg2_nxv4f64_nxv4i16(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -7612,7 +7612,7 @@ define @test_vluxseg2_nxv4f64_nxv4i8(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -7642,7 +7642,7 @@ define @test_vluxseg2_nxv4f64_nxv4i32(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -7672,7 +7672,7 @@ define @test_vluxseg2_nxv1f64_nxv1i8(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7702,7 +7702,7 @@ define @test_vluxseg2_nxv1f64_nxv1i32(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7732,7 +7732,7 @@ define @test_vluxseg2_nxv1f64_nxv1i16(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7762,7 +7762,7 @@ define @test_vluxseg3_nxv1f64_nxv1i8(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7794,7 +7794,7 @@ define @test_vluxseg3_nxv1f64_nxv1i32(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7826,7 +7826,7 @@ define @test_vluxseg3_nxv1f64_nxv1i16(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7858,7 +7858,7 @@ define @test_vluxseg4_nxv1f64_nxv1i8(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7891,7 +7891,7 @@ define @test_vluxseg4_nxv1f64_nxv1i32(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7924,7 +7924,7 @@ define @test_vluxseg4_nxv1f64_nxv1i16(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7957,7 +7957,7 @@ define @test_vluxseg5_nxv1f64_nxv1i8(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7991,7 +7991,7 @@ define @test_vluxseg5_nxv1f64_nxv1i32(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8025,7 +8025,7 @@ define @test_vluxseg5_nxv1f64_nxv1i16(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8059,7 +8059,7 @@ define @test_vluxseg6_nxv1f64_nxv1i8(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8094,7 +8094,7 @@ define @test_vluxseg6_nxv1f64_nxv1i32(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8129,7 +8129,7 @@ define @test_vluxseg6_nxv1f64_nxv1i16(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8164,7 +8164,7 @@ define @test_vluxseg7_nxv1f64_nxv1i8(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8200,7 +8200,7 @@ define @test_vluxseg7_nxv1f64_nxv1i32(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8236,7 +8236,7 @@ define @test_vluxseg7_nxv1f64_nxv1i16(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8272,7 +8272,7 @@ define @test_vluxseg8_nxv1f64_nxv1i8(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8309,7 +8309,7 @@ define @test_vluxseg8_nxv1f64_nxv1i32(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8346,7 +8346,7 @@ define @test_vluxseg8_nxv1f64_nxv1i16(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8383,7 +8383,7 @@ define @test_vluxseg2_nxv2f32_nxv2i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8413,7 +8413,7 @@ define @test_vluxseg2_nxv2f32_nxv2i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8443,7 +8443,7 @@ define @test_vluxseg2_nxv2f32_nxv2i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8473,7 +8473,7 @@ define @test_vluxseg3_nxv2f32_nxv2i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8505,7 +8505,7 @@ define @test_vluxseg3_nxv2f32_nxv2i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8537,7 +8537,7 @@ define @test_vluxseg3_nxv2f32_nxv2i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8569,7 +8569,7 @@ define @test_vluxseg4_nxv2f32_nxv2i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8602,7 +8602,7 @@ define @test_vluxseg4_nxv2f32_nxv2i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8635,7 +8635,7 @@ define @test_vluxseg4_nxv2f32_nxv2i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8668,7 +8668,7 @@ define @test_vluxseg5_nxv2f32_nxv2i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8702,7 +8702,7 @@ define @test_vluxseg5_nxv2f32_nxv2i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8736,7 +8736,7 @@ define @test_vluxseg5_nxv2f32_nxv2i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8770,7 +8770,7 @@ define @test_vluxseg6_nxv2f32_nxv2i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8805,7 +8805,7 @@ define @test_vluxseg6_nxv2f32_nxv2i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8840,7 +8840,7 @@ define @test_vluxseg6_nxv2f32_nxv2i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8875,7 +8875,7 @@ define @test_vluxseg7_nxv2f32_nxv2i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8911,7 +8911,7 @@ define @test_vluxseg7_nxv2f32_nxv2i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8947,7 +8947,7 @@ define @test_vluxseg7_nxv2f32_nxv2i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8983,7 +8983,7 @@ define @test_vluxseg8_nxv2f32_nxv2i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9020,7 +9020,7 @@ define @test_vluxseg8_nxv2f32_nxv2i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9057,7 +9057,7 @@ define @test_vluxseg8_nxv2f32_nxv2i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9094,7 +9094,7 @@ define @test_vluxseg2_nxv1f16_nxv1i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9124,7 +9124,7 @@ define @test_vluxseg2_nxv1f16_nxv1i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9154,7 +9154,7 @@ define @test_vluxseg2_nxv1f16_nxv1i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9184,7 +9184,7 @@ define @test_vluxseg3_nxv1f16_nxv1i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9216,7 +9216,7 @@ define @test_vluxseg3_nxv1f16_nxv1i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9248,7 +9248,7 @@ define @test_vluxseg3_nxv1f16_nxv1i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9280,7 +9280,7 @@ define @test_vluxseg4_nxv1f16_nxv1i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9313,7 +9313,7 @@ define @test_vluxseg4_nxv1f16_nxv1i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9346,7 +9346,7 @@ define @test_vluxseg4_nxv1f16_nxv1i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9379,7 +9379,7 @@ define @test_vluxseg5_nxv1f16_nxv1i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9413,7 +9413,7 @@ define @test_vluxseg5_nxv1f16_nxv1i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9447,7 +9447,7 @@ define @test_vluxseg5_nxv1f16_nxv1i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9481,7 +9481,7 @@ define @test_vluxseg6_nxv1f16_nxv1i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9516,7 +9516,7 @@ define @test_vluxseg6_nxv1f16_nxv1i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9551,7 +9551,7 @@ define @test_vluxseg6_nxv1f16_nxv1i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9586,7 +9586,7 @@ define @test_vluxseg7_nxv1f16_nxv1i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9622,7 +9622,7 @@ define @test_vluxseg7_nxv1f16_nxv1i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9658,7 +9658,7 @@ define @test_vluxseg7_nxv1f16_nxv1i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9694,7 +9694,7 @@ define @test_vluxseg8_nxv1f16_nxv1i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9731,7 +9731,7 @@ define @test_vluxseg8_nxv1f16_nxv1i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9768,7 +9768,7 @@ define @test_vluxseg8_nxv1f16_nxv1i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9805,7 +9805,7 @@ define @test_vluxseg2_nxv1f32_nxv1i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9835,7 +9835,7 @@ define @test_vluxseg2_nxv1f32_nxv1i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9865,7 +9865,7 @@ define @test_vluxseg2_nxv1f32_nxv1i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9895,7 +9895,7 @@ define @test_vluxseg3_nxv1f32_nxv1i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9927,7 +9927,7 @@ define @test_vluxseg3_nxv1f32_nxv1i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9959,7 +9959,7 @@ define @test_vluxseg3_nxv1f32_nxv1i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9991,7 +9991,7 @@ define @test_vluxseg4_nxv1f32_nxv1i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10024,7 +10024,7 @@ define @test_vluxseg4_nxv1f32_nxv1i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10057,7 +10057,7 @@ define @test_vluxseg4_nxv1f32_nxv1i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10090,7 +10090,7 @@ define @test_vluxseg5_nxv1f32_nxv1i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10124,7 +10124,7 @@ define @test_vluxseg5_nxv1f32_nxv1i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10158,7 +10158,7 @@ define @test_vluxseg5_nxv1f32_nxv1i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10192,7 +10192,7 @@ define @test_vluxseg6_nxv1f32_nxv1i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10227,7 +10227,7 @@ define @test_vluxseg6_nxv1f32_nxv1i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10262,7 +10262,7 @@ define @test_vluxseg6_nxv1f32_nxv1i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10297,7 +10297,7 @@ define @test_vluxseg7_nxv1f32_nxv1i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10333,7 +10333,7 @@ define @test_vluxseg7_nxv1f32_nxv1i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10369,7 +10369,7 @@ define @test_vluxseg7_nxv1f32_nxv1i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10405,7 +10405,7 @@ define @test_vluxseg8_nxv1f32_nxv1i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10442,7 +10442,7 @@ define @test_vluxseg8_nxv1f32_nxv1i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10479,7 +10479,7 @@ define @test_vluxseg8_nxv1f32_nxv1i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10516,7 +10516,7 @@ define @test_vluxseg2_nxv8f16_nxv8i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -10546,7 +10546,7 @@ define @test_vluxseg2_nxv8f16_nxv8i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -10576,7 +10576,7 @@ define @test_vluxseg2_nxv8f16_nxv8i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret @@ -10606,7 +10606,7 @@ define @test_vluxseg3_nxv8f16_nxv8i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -10638,7 +10638,7 @@ define @test_vluxseg3_nxv8f16_nxv8i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -10670,7 +10670,7 @@ define @test_vluxseg3_nxv8f16_nxv8i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret @@ -10701,7 +10701,7 @@ define @test_vluxseg4_nxv8f16_nxv8i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -10734,7 +10734,7 @@ define @test_vluxseg4_nxv8f16_nxv8i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -10767,7 +10767,7 @@ define @test_vluxseg4_nxv8f16_nxv8i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret @@ -10800,7 +10800,7 @@ define @test_vluxseg2_nxv8f32_nxv8i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -10830,7 +10830,7 @@ define @test_vluxseg2_nxv8f32_nxv8i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -10860,7 +10860,7 @@ define @test_vluxseg2_nxv8f32_nxv8i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -10890,7 +10890,7 @@ define @test_vluxseg2_nxv2f64_nxv2i32(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -10920,7 +10920,7 @@ define @test_vluxseg2_nxv2f64_nxv2i8(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -10950,7 +10950,7 @@ define @test_vluxseg2_nxv2f64_nxv2i16(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -10980,7 +10980,7 @@ define @test_vluxseg3_nxv2f64_nxv2i32(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -11012,7 +11012,7 @@ define @test_vluxseg3_nxv2f64_nxv2i8(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -11044,7 +11044,7 @@ define @test_vluxseg3_nxv2f64_nxv2i16(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -11076,7 +11076,7 @@ define @test_vluxseg4_nxv2f64_nxv2i32(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -11109,7 +11109,7 @@ define @test_vluxseg4_nxv2f64_nxv2i8(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -11142,7 +11142,7 @@ define @test_vluxseg4_nxv2f64_nxv2i16(double* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -11175,7 +11175,7 @@ define @test_vluxseg2_nxv4f16_nxv4i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11205,7 +11205,7 @@ define @test_vluxseg2_nxv4f16_nxv4i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11235,7 +11235,7 @@ define @test_vluxseg2_nxv4f16_nxv4i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -11265,7 +11265,7 @@ define @test_vluxseg3_nxv4f16_nxv4i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11297,7 +11297,7 @@ define @test_vluxseg3_nxv4f16_nxv4i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11329,7 +11329,7 @@ define @test_vluxseg3_nxv4f16_nxv4i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -11360,7 +11360,7 @@ define @test_vluxseg4_nxv4f16_nxv4i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11393,7 +11393,7 @@ define @test_vluxseg4_nxv4f16_nxv4i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11426,7 +11426,7 @@ define @test_vluxseg4_nxv4f16_nxv4i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -11459,7 +11459,7 @@ define @test_vluxseg5_nxv4f16_nxv4i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11493,7 +11493,7 @@ define @test_vluxseg5_nxv4f16_nxv4i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11527,7 +11527,7 @@ define @test_vluxseg5_nxv4f16_nxv4i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -11561,7 +11561,7 @@ define @test_vluxseg6_nxv4f16_nxv4i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11596,7 +11596,7 @@ define @test_vluxseg6_nxv4f16_nxv4i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11631,7 +11631,7 @@ define @test_vluxseg6_nxv4f16_nxv4i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -11666,7 +11666,7 @@ define @test_vluxseg7_nxv4f16_nxv4i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11702,7 +11702,7 @@ define @test_vluxseg7_nxv4f16_nxv4i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11738,7 +11738,7 @@ define @test_vluxseg7_nxv4f16_nxv4i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -11774,7 +11774,7 @@ define @test_vluxseg8_nxv4f16_nxv4i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11811,7 +11811,7 @@ define @test_vluxseg8_nxv4f16_nxv4i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11848,7 +11848,7 @@ define @test_vluxseg8_nxv4f16_nxv4i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -11885,7 +11885,7 @@ define @test_vluxseg2_nxv2f16_nxv2i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11915,7 +11915,7 @@ define @test_vluxseg2_nxv2f16_nxv2i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11945,7 +11945,7 @@ define @test_vluxseg2_nxv2f16_nxv2i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11975,7 +11975,7 @@ define @test_vluxseg3_nxv2f16_nxv2i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12007,7 +12007,7 @@ define @test_vluxseg3_nxv2f16_nxv2i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12039,7 +12039,7 @@ define @test_vluxseg3_nxv2f16_nxv2i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12071,7 +12071,7 @@ define @test_vluxseg4_nxv2f16_nxv2i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12104,7 +12104,7 @@ define @test_vluxseg4_nxv2f16_nxv2i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12137,7 +12137,7 @@ define @test_vluxseg4_nxv2f16_nxv2i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12170,7 +12170,7 @@ define @test_vluxseg5_nxv2f16_nxv2i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12204,7 +12204,7 @@ define @test_vluxseg5_nxv2f16_nxv2i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12238,7 +12238,7 @@ define @test_vluxseg5_nxv2f16_nxv2i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12272,7 +12272,7 @@ define @test_vluxseg6_nxv2f16_nxv2i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12307,7 +12307,7 @@ define @test_vluxseg6_nxv2f16_nxv2i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12342,7 +12342,7 @@ define @test_vluxseg6_nxv2f16_nxv2i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12377,7 +12377,7 @@ define @test_vluxseg7_nxv2f16_nxv2i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12413,7 +12413,7 @@ define @test_vluxseg7_nxv2f16_nxv2i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12449,7 +12449,7 @@ define @test_vluxseg7_nxv2f16_nxv2i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12485,7 +12485,7 @@ define @test_vluxseg8_nxv2f16_nxv2i32(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12522,7 +12522,7 @@ define @test_vluxseg8_nxv2f16_nxv2i8(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12559,7 +12559,7 @@ define @test_vluxseg8_nxv2f16_nxv2i16(half* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12596,7 +12596,7 @@ define @test_vluxseg2_nxv4f32_nxv4i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -12626,7 +12626,7 @@ define @test_vluxseg2_nxv4f32_nxv4i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -12656,7 +12656,7 @@ define @test_vluxseg2_nxv4f32_nxv4i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -12686,7 +12686,7 @@ define @test_vluxseg3_nxv4f32_nxv4i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -12718,7 +12718,7 @@ define @test_vluxseg3_nxv4f32_nxv4i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -12750,7 +12750,7 @@ define @test_vluxseg3_nxv4f32_nxv4i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -12782,7 +12782,7 @@ define @test_vluxseg4_nxv4f32_nxv4i16(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -12815,7 +12815,7 @@ define @test_vluxseg4_nxv4f32_nxv4i8(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -12848,7 +12848,7 @@ define @test_vluxseg4_nxv4f32_nxv4i32(float* %base, %index, i32 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll @@ -8,7 +8,7 @@ define @test_vluxseg2_nxv16i16_nxv16i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -38,7 +38,7 @@ define @test_vluxseg2_nxv16i16_nxv16i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -68,7 +68,7 @@ define @test_vluxseg2_nxv16i16_nxv16i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v16, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret @@ -98,7 +98,7 @@ define @test_vluxseg2_nxv4i32_nxv4i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -128,7 +128,7 @@ define @test_vluxseg2_nxv4i32_nxv4i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -158,7 +158,7 @@ define @test_vluxseg2_nxv4i32_nxv4i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vluxseg2ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret @@ -188,7 +188,7 @@ define @test_vluxseg2_nxv4i32_nxv4i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -218,7 +218,7 @@ define @test_vluxseg3_nxv4i32_nxv4i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -250,7 +250,7 @@ define @test_vluxseg3_nxv4i32_nxv4i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -282,7 +282,7 @@ define @test_vluxseg3_nxv4i32_nxv4i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vluxseg3ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret @@ -313,7 +313,7 @@ define @test_vluxseg3_nxv4i32_nxv4i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -345,7 +345,7 @@ define @test_vluxseg4_nxv4i32_nxv4i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -378,7 +378,7 @@ define @test_vluxseg4_nxv4i32_nxv4i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -411,7 +411,7 @@ define @test_vluxseg4_nxv4i32_nxv4i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret @@ -444,7 +444,7 @@ define @test_vluxseg4_nxv4i32_nxv4i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -477,7 +477,7 @@ define @test_vluxseg2_nxv16i8_nxv16i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret @@ -507,7 +507,7 @@ define @test_vluxseg2_nxv16i8_nxv16i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -537,7 +537,7 @@ define @test_vluxseg2_nxv16i8_nxv16i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v16, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret @@ -567,7 +567,7 @@ define @test_vluxseg3_nxv16i8_nxv16i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vluxseg3ei16.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret @@ -598,7 +598,7 @@ define @test_vluxseg3_nxv16i8_nxv16i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -630,7 +630,7 @@ define @test_vluxseg3_nxv16i8_nxv16i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vluxseg3ei32.v v16, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret @@ -661,7 +661,7 @@ define @test_vluxseg4_nxv16i8_nxv16i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret @@ -694,7 +694,7 @@ define @test_vluxseg4_nxv16i8_nxv16i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -727,7 +727,7 @@ define @test_vluxseg4_nxv16i8_nxv16i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vluxseg4ei32.v v16, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret @@ -759,7 +759,7 @@ define @test_vluxseg2_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg2ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -789,7 +789,7 @@ define @test_vluxseg2_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -819,7 +819,7 @@ define @test_vluxseg2_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -849,7 +849,7 @@ define @test_vluxseg2_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -879,7 +879,7 @@ define @test_vluxseg3_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg3ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -911,7 +911,7 @@ define @test_vluxseg3_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -943,7 +943,7 @@ define @test_vluxseg3_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -975,7 +975,7 @@ define @test_vluxseg3_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1007,7 +1007,7 @@ define @test_vluxseg4_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg4ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1040,7 +1040,7 @@ define @test_vluxseg4_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1073,7 +1073,7 @@ define @test_vluxseg4_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1106,7 +1106,7 @@ define @test_vluxseg4_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1139,7 +1139,7 @@ define @test_vluxseg5_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg5ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1173,7 +1173,7 @@ define @test_vluxseg5_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1207,7 +1207,7 @@ define @test_vluxseg5_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1241,7 +1241,7 @@ define @test_vluxseg5_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1275,7 +1275,7 @@ define @test_vluxseg6_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg6ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1310,7 +1310,7 @@ define @test_vluxseg6_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1345,7 +1345,7 @@ define @test_vluxseg6_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1380,7 +1380,7 @@ define @test_vluxseg6_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1415,7 +1415,7 @@ define @test_vluxseg7_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg7ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1451,7 +1451,7 @@ define @test_vluxseg7_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1487,7 +1487,7 @@ define @test_vluxseg7_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1523,7 +1523,7 @@ define @test_vluxseg7_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1559,7 +1559,7 @@ define @test_vluxseg8_nxv1i64_nxv1i64(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg8ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1596,7 +1596,7 @@ define @test_vluxseg8_nxv1i64_nxv1i32(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1633,7 +1633,7 @@ define @test_vluxseg8_nxv1i64_nxv1i16(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1670,7 +1670,7 @@ define @test_vluxseg8_nxv1i64_nxv1i8(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1707,7 +1707,7 @@ define @test_vluxseg2_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg2ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1737,7 +1737,7 @@ define @test_vluxseg2_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1767,7 +1767,7 @@ define @test_vluxseg2_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1797,7 +1797,7 @@ define @test_vluxseg2_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1827,7 +1827,7 @@ define @test_vluxseg3_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg3ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1859,7 +1859,7 @@ define @test_vluxseg3_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1891,7 +1891,7 @@ define @test_vluxseg3_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1923,7 +1923,7 @@ define @test_vluxseg3_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1955,7 +1955,7 @@ define @test_vluxseg4_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg4ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1988,7 +1988,7 @@ define @test_vluxseg4_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2021,7 +2021,7 @@ define @test_vluxseg4_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2054,7 +2054,7 @@ define @test_vluxseg4_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2087,7 +2087,7 @@ define @test_vluxseg5_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg5ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2121,7 +2121,7 @@ define @test_vluxseg5_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2155,7 +2155,7 @@ define @test_vluxseg5_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2189,7 +2189,7 @@ define @test_vluxseg5_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2223,7 +2223,7 @@ define @test_vluxseg6_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg6ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2258,7 +2258,7 @@ define @test_vluxseg6_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2293,7 +2293,7 @@ define @test_vluxseg6_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2328,7 +2328,7 @@ define @test_vluxseg6_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2363,7 +2363,7 @@ define @test_vluxseg7_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg7ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2399,7 +2399,7 @@ define @test_vluxseg7_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2435,7 +2435,7 @@ define @test_vluxseg7_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2471,7 +2471,7 @@ define @test_vluxseg7_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2507,7 +2507,7 @@ define @test_vluxseg8_nxv1i32_nxv1i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg8ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2544,7 +2544,7 @@ define @test_vluxseg8_nxv1i32_nxv1i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2581,7 +2581,7 @@ define @test_vluxseg8_nxv1i32_nxv1i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2618,7 +2618,7 @@ define @test_vluxseg8_nxv1i32_nxv1i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2655,7 +2655,7 @@ define @test_vluxseg2_nxv8i16_nxv8i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -2685,7 +2685,7 @@ define @test_vluxseg2_nxv8i16_nxv8i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -2715,7 +2715,7 @@ define @test_vluxseg2_nxv8i16_nxv8i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vluxseg2ei64.v v16, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret @@ -2745,7 +2745,7 @@ define @test_vluxseg2_nxv8i16_nxv8i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret @@ -2775,7 +2775,7 @@ define @test_vluxseg3_nxv8i16_nxv8i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -2807,7 +2807,7 @@ define @test_vluxseg3_nxv8i16_nxv8i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -2839,7 +2839,7 @@ define @test_vluxseg3_nxv8i16_nxv8i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vluxseg3ei64.v v16, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret @@ -2870,7 +2870,7 @@ define @test_vluxseg3_nxv8i16_nxv8i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret @@ -2901,7 +2901,7 @@ define @test_vluxseg4_nxv8i16_nxv8i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -2934,7 +2934,7 @@ define @test_vluxseg4_nxv8i16_nxv8i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -2967,7 +2967,7 @@ define @test_vluxseg4_nxv8i16_nxv8i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vluxseg4ei64.v v16, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret @@ -2999,7 +2999,7 @@ define @test_vluxseg4_nxv8i16_nxv8i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret @@ -3032,7 +3032,7 @@ define @test_vluxseg2_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -3062,7 +3062,7 @@ define @test_vluxseg2_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3092,7 +3092,7 @@ define @test_vluxseg2_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg2ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -3122,7 +3122,7 @@ define @test_vluxseg2_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3152,7 +3152,7 @@ define @test_vluxseg3_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -3183,7 +3183,7 @@ define @test_vluxseg3_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3215,7 +3215,7 @@ define @test_vluxseg3_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg3ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -3246,7 +3246,7 @@ define @test_vluxseg3_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3278,7 +3278,7 @@ define @test_vluxseg4_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -3311,7 +3311,7 @@ define @test_vluxseg4_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3344,7 +3344,7 @@ define @test_vluxseg4_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -3376,7 +3376,7 @@ define @test_vluxseg4_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3409,7 +3409,7 @@ define @test_vluxseg5_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -3443,7 +3443,7 @@ define @test_vluxseg5_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3477,7 +3477,7 @@ define @test_vluxseg5_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg5ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -3510,7 +3510,7 @@ define @test_vluxseg5_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3544,7 +3544,7 @@ define @test_vluxseg6_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -3579,7 +3579,7 @@ define @test_vluxseg6_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3614,7 +3614,7 @@ define @test_vluxseg6_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg6ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -3649,7 +3649,7 @@ define @test_vluxseg6_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3684,7 +3684,7 @@ define @test_vluxseg7_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -3720,7 +3720,7 @@ define @test_vluxseg7_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3756,7 +3756,7 @@ define @test_vluxseg7_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg7ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -3792,7 +3792,7 @@ define @test_vluxseg7_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3828,7 +3828,7 @@ define @test_vluxseg8_nxv4i8_nxv4i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -3865,7 +3865,7 @@ define @test_vluxseg8_nxv4i8_nxv4i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3902,7 +3902,7 @@ define @test_vluxseg8_nxv4i8_nxv4i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg8ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -3939,7 +3939,7 @@ define @test_vluxseg8_nxv4i8_nxv4i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -3976,7 +3976,7 @@ define @test_vluxseg2_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg2ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4006,7 +4006,7 @@ define @test_vluxseg2_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4036,7 +4036,7 @@ define @test_vluxseg2_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4066,7 +4066,7 @@ define @test_vluxseg2_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4096,7 +4096,7 @@ define @test_vluxseg3_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg3ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4128,7 +4128,7 @@ define @test_vluxseg3_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4160,7 +4160,7 @@ define @test_vluxseg3_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4192,7 +4192,7 @@ define @test_vluxseg3_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4224,7 +4224,7 @@ define @test_vluxseg4_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg4ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4257,7 +4257,7 @@ define @test_vluxseg4_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4290,7 +4290,7 @@ define @test_vluxseg4_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4323,7 +4323,7 @@ define @test_vluxseg4_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4356,7 +4356,7 @@ define @test_vluxseg5_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg5ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4390,7 +4390,7 @@ define @test_vluxseg5_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4424,7 +4424,7 @@ define @test_vluxseg5_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4458,7 +4458,7 @@ define @test_vluxseg5_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4492,7 +4492,7 @@ define @test_vluxseg6_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg6ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4527,7 +4527,7 @@ define @test_vluxseg6_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4562,7 +4562,7 @@ define @test_vluxseg6_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4597,7 +4597,7 @@ define @test_vluxseg6_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4632,7 +4632,7 @@ define @test_vluxseg7_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg7ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4668,7 +4668,7 @@ define @test_vluxseg7_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4704,7 +4704,7 @@ define @test_vluxseg7_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4740,7 +4740,7 @@ define @test_vluxseg7_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4776,7 +4776,7 @@ define @test_vluxseg8_nxv1i16_nxv1i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg8ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4813,7 +4813,7 @@ define @test_vluxseg8_nxv1i16_nxv1i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4850,7 +4850,7 @@ define @test_vluxseg8_nxv1i16_nxv1i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4887,7 +4887,7 @@ define @test_vluxseg8_nxv1i16_nxv1i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4924,7 +4924,7 @@ define @test_vluxseg2_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4954,7 +4954,7 @@ define @test_vluxseg2_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -4984,7 +4984,7 @@ define @test_vluxseg2_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5014,7 +5014,7 @@ define @test_vluxseg2_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg2ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -5044,7 +5044,7 @@ define @test_vluxseg3_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5076,7 +5076,7 @@ define @test_vluxseg3_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5108,7 +5108,7 @@ define @test_vluxseg3_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5140,7 +5140,7 @@ define @test_vluxseg3_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -5171,7 +5171,7 @@ define @test_vluxseg4_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5204,7 +5204,7 @@ define @test_vluxseg4_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5237,7 +5237,7 @@ define @test_vluxseg4_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5270,7 +5270,7 @@ define @test_vluxseg4_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -5303,7 +5303,7 @@ define @test_vluxseg5_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5337,7 +5337,7 @@ define @test_vluxseg5_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5371,7 +5371,7 @@ define @test_vluxseg5_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5405,7 +5405,7 @@ define @test_vluxseg5_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -5439,7 +5439,7 @@ define @test_vluxseg6_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5474,7 +5474,7 @@ define @test_vluxseg6_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5509,7 +5509,7 @@ define @test_vluxseg6_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5544,7 +5544,7 @@ define @test_vluxseg6_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -5579,7 +5579,7 @@ define @test_vluxseg7_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5615,7 +5615,7 @@ define @test_vluxseg7_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5651,7 +5651,7 @@ define @test_vluxseg7_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5687,7 +5687,7 @@ define @test_vluxseg7_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -5723,7 +5723,7 @@ define @test_vluxseg8_nxv2i32_nxv2i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5760,7 +5760,7 @@ define @test_vluxseg8_nxv2i32_nxv2i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5797,7 +5797,7 @@ define @test_vluxseg8_nxv2i32_nxv2i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5834,7 +5834,7 @@ define @test_vluxseg8_nxv2i32_nxv2i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -5871,7 +5871,7 @@ define @test_vluxseg2_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -5901,7 +5901,7 @@ define @test_vluxseg2_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -5931,7 +5931,7 @@ define @test_vluxseg2_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxseg2ei64.v v16, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret @@ -5961,7 +5961,7 @@ define @test_vluxseg2_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -5991,7 +5991,7 @@ define @test_vluxseg3_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -6022,7 +6022,7 @@ define @test_vluxseg3_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6054,7 +6054,7 @@ define @test_vluxseg3_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxseg3ei64.v v16, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret @@ -6085,7 +6085,7 @@ define @test_vluxseg3_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -6116,7 +6116,7 @@ define @test_vluxseg4_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -6149,7 +6149,7 @@ define @test_vluxseg4_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6182,7 +6182,7 @@ define @test_vluxseg4_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxseg4ei64.v v16, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret @@ -6214,7 +6214,7 @@ define @test_vluxseg4_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -6246,7 +6246,7 @@ define @test_vluxseg5_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -6280,7 +6280,7 @@ define @test_vluxseg5_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6314,7 +6314,7 @@ define @test_vluxseg5_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxseg5ei64.v v16, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret @@ -6347,7 +6347,7 @@ define @test_vluxseg5_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxseg5ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -6380,7 +6380,7 @@ define @test_vluxseg6_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -6415,7 +6415,7 @@ define @test_vluxseg6_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6450,7 +6450,7 @@ define @test_vluxseg6_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxseg6ei64.v v16, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret @@ -6484,7 +6484,7 @@ define @test_vluxseg6_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxseg6ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -6519,7 +6519,7 @@ define @test_vluxseg7_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -6555,7 +6555,7 @@ define @test_vluxseg7_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6591,7 +6591,7 @@ define @test_vluxseg7_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxseg7ei64.v v16, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret @@ -6626,7 +6626,7 @@ define @test_vluxseg7_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxseg7ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -6662,7 +6662,7 @@ define @test_vluxseg8_nxv8i8_nxv8i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -6699,7 +6699,7 @@ define @test_vluxseg8_nxv8i8_nxv8i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6736,7 +6736,7 @@ define @test_vluxseg8_nxv8i8_nxv8i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxseg8ei64.v v16, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v17 ; CHECK-NEXT: ret @@ -6772,7 +6772,7 @@ define @test_vluxseg8_nxv8i8_nxv8i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vluxseg8ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -6809,7 +6809,7 @@ define @test_vluxseg2_nxv4i64_nxv4i32(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -6839,7 +6839,7 @@ define @test_vluxseg2_nxv4i64_nxv4i8(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i64_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -6869,7 +6869,7 @@ define @test_vluxseg2_nxv4i64_nxv4i64(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vluxseg2ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -6899,7 +6899,7 @@ define @test_vluxseg2_nxv4i64_nxv4i16(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i64_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -6929,7 +6929,7 @@ define @test_vluxseg2_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -6959,7 +6959,7 @@ define @test_vluxseg2_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -6989,7 +6989,7 @@ define @test_vluxseg2_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg2ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -7019,7 +7019,7 @@ define @test_vluxseg2_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7049,7 +7049,7 @@ define @test_vluxseg3_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -7080,7 +7080,7 @@ define @test_vluxseg3_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7112,7 +7112,7 @@ define @test_vluxseg3_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg3ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -7143,7 +7143,7 @@ define @test_vluxseg3_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7175,7 +7175,7 @@ define @test_vluxseg4_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -7208,7 +7208,7 @@ define @test_vluxseg4_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7241,7 +7241,7 @@ define @test_vluxseg4_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -7273,7 +7273,7 @@ define @test_vluxseg4_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7306,7 +7306,7 @@ define @test_vluxseg5_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -7340,7 +7340,7 @@ define @test_vluxseg5_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7374,7 +7374,7 @@ define @test_vluxseg5_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg5ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -7407,7 +7407,7 @@ define @test_vluxseg5_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7441,7 +7441,7 @@ define @test_vluxseg6_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -7476,7 +7476,7 @@ define @test_vluxseg6_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7511,7 +7511,7 @@ define @test_vluxseg6_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg6ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -7546,7 +7546,7 @@ define @test_vluxseg6_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7581,7 +7581,7 @@ define @test_vluxseg7_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -7617,7 +7617,7 @@ define @test_vluxseg7_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7653,7 +7653,7 @@ define @test_vluxseg7_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg7ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -7689,7 +7689,7 @@ define @test_vluxseg7_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7725,7 +7725,7 @@ define @test_vluxseg8_nxv4i16_nxv4i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -7762,7 +7762,7 @@ define @test_vluxseg8_nxv4i16_nxv4i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7799,7 +7799,7 @@ define @test_vluxseg8_nxv4i16_nxv4i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg8ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -7836,7 +7836,7 @@ define @test_vluxseg8_nxv4i16_nxv4i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7873,7 +7873,7 @@ define @test_vluxseg2_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxseg2ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7903,7 +7903,7 @@ define @test_vluxseg2_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7933,7 +7933,7 @@ define @test_vluxseg2_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7963,7 +7963,7 @@ define @test_vluxseg2_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -7993,7 +7993,7 @@ define @test_vluxseg3_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxseg3ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8025,7 +8025,7 @@ define @test_vluxseg3_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8057,7 +8057,7 @@ define @test_vluxseg3_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8089,7 +8089,7 @@ define @test_vluxseg3_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8121,7 +8121,7 @@ define @test_vluxseg4_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxseg4ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8154,7 +8154,7 @@ define @test_vluxseg4_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8187,7 +8187,7 @@ define @test_vluxseg4_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8220,7 +8220,7 @@ define @test_vluxseg4_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8253,7 +8253,7 @@ define @test_vluxseg5_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxseg5ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8287,7 +8287,7 @@ define @test_vluxseg5_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8321,7 +8321,7 @@ define @test_vluxseg5_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8355,7 +8355,7 @@ define @test_vluxseg5_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8389,7 +8389,7 @@ define @test_vluxseg6_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxseg6ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8424,7 +8424,7 @@ define @test_vluxseg6_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8459,7 +8459,7 @@ define @test_vluxseg6_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8494,7 +8494,7 @@ define @test_vluxseg6_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8529,7 +8529,7 @@ define @test_vluxseg7_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxseg7ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8565,7 +8565,7 @@ define @test_vluxseg7_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8601,7 +8601,7 @@ define @test_vluxseg7_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8637,7 +8637,7 @@ define @test_vluxseg7_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8673,7 +8673,7 @@ define @test_vluxseg8_nxv1i8_nxv1i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxseg8ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8710,7 +8710,7 @@ define @test_vluxseg8_nxv1i8_nxv1i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8747,7 +8747,7 @@ define @test_vluxseg8_nxv1i8_nxv1i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8784,7 +8784,7 @@ define @test_vluxseg8_nxv1i8_nxv1i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8821,7 +8821,7 @@ define @test_vluxseg2_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8851,7 +8851,7 @@ define @test_vluxseg2_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8881,7 +8881,7 @@ define @test_vluxseg2_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8911,7 +8911,7 @@ define @test_vluxseg2_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg2ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -8941,7 +8941,7 @@ define @test_vluxseg3_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -8973,7 +8973,7 @@ define @test_vluxseg3_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9005,7 +9005,7 @@ define @test_vluxseg3_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9037,7 +9037,7 @@ define @test_vluxseg3_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -9068,7 +9068,7 @@ define @test_vluxseg4_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9101,7 +9101,7 @@ define @test_vluxseg4_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9134,7 +9134,7 @@ define @test_vluxseg4_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9167,7 +9167,7 @@ define @test_vluxseg4_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -9200,7 +9200,7 @@ define @test_vluxseg5_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9234,7 +9234,7 @@ define @test_vluxseg5_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9268,7 +9268,7 @@ define @test_vluxseg5_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9302,7 +9302,7 @@ define @test_vluxseg5_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -9336,7 +9336,7 @@ define @test_vluxseg6_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9371,7 +9371,7 @@ define @test_vluxseg6_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9406,7 +9406,7 @@ define @test_vluxseg6_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9441,7 +9441,7 @@ define @test_vluxseg6_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -9476,7 +9476,7 @@ define @test_vluxseg7_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9512,7 +9512,7 @@ define @test_vluxseg7_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9548,7 +9548,7 @@ define @test_vluxseg7_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9584,7 +9584,7 @@ define @test_vluxseg7_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -9620,7 +9620,7 @@ define @test_vluxseg8_nxv2i8_nxv2i32(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9657,7 +9657,7 @@ define @test_vluxseg8_nxv2i8_nxv2i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9694,7 +9694,7 @@ define @test_vluxseg8_nxv2i8_nxv2i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9731,7 +9731,7 @@ define @test_vluxseg8_nxv2i8_nxv2i64(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -9768,7 +9768,7 @@ define @test_vluxseg2_nxv8i32_nxv8i16(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -9798,7 +9798,7 @@ define @test_vluxseg2_nxv8i32_nxv8i8(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -9828,7 +9828,7 @@ define @test_vluxseg2_nxv8i32_nxv8i64(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vluxseg2ei64.v v16, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret @@ -9858,7 +9858,7 @@ define @test_vluxseg2_nxv8i32_nxv8i32(i32* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -9888,7 +9888,7 @@ define @test_vluxseg2_nxv32i8_nxv32i16(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v16, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret @@ -9918,7 +9918,7 @@ define @test_vluxseg2_nxv32i8_nxv32i8(i8* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -9948,7 +9948,7 @@ define @test_vluxseg2_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -9978,7 +9978,7 @@ define @test_vluxseg2_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10008,7 +10008,7 @@ define @test_vluxseg2_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10038,7 +10038,7 @@ define @test_vluxseg2_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg2ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -10068,7 +10068,7 @@ define @test_vluxseg3_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10100,7 +10100,7 @@ define @test_vluxseg3_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10132,7 +10132,7 @@ define @test_vluxseg3_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10164,7 +10164,7 @@ define @test_vluxseg3_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -10195,7 +10195,7 @@ define @test_vluxseg4_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10228,7 +10228,7 @@ define @test_vluxseg4_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10261,7 +10261,7 @@ define @test_vluxseg4_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10294,7 +10294,7 @@ define @test_vluxseg4_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -10327,7 +10327,7 @@ define @test_vluxseg5_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10361,7 +10361,7 @@ define @test_vluxseg5_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10395,7 +10395,7 @@ define @test_vluxseg5_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10429,7 +10429,7 @@ define @test_vluxseg5_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -10463,7 +10463,7 @@ define @test_vluxseg6_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10498,7 +10498,7 @@ define @test_vluxseg6_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10533,7 +10533,7 @@ define @test_vluxseg6_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10568,7 +10568,7 @@ define @test_vluxseg6_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -10603,7 +10603,7 @@ define @test_vluxseg7_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10639,7 +10639,7 @@ define @test_vluxseg7_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10675,7 +10675,7 @@ define @test_vluxseg7_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10711,7 +10711,7 @@ define @test_vluxseg7_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -10747,7 +10747,7 @@ define @test_vluxseg8_nxv2i16_nxv2i32(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10784,7 +10784,7 @@ define @test_vluxseg8_nxv2i16_nxv2i8(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10821,7 +10821,7 @@ define @test_vluxseg8_nxv2i16_nxv2i16(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -10858,7 +10858,7 @@ define @test_vluxseg8_nxv2i16_nxv2i64(i16* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -10895,7 +10895,7 @@ define @test_vluxseg2_nxv2i64_nxv2i32(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -10925,7 +10925,7 @@ define @test_vluxseg2_nxv2i64_nxv2i8(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -10955,7 +10955,7 @@ define @test_vluxseg2_nxv2i64_nxv2i16(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -10985,7 +10985,7 @@ define @test_vluxseg2_nxv2i64_nxv2i64(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vluxseg2ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -11015,7 +11015,7 @@ define @test_vluxseg3_nxv2i64_nxv2i32(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -11047,7 +11047,7 @@ define @test_vluxseg3_nxv2i64_nxv2i8(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -11079,7 +11079,7 @@ define @test_vluxseg3_nxv2i64_nxv2i16(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -11111,7 +11111,7 @@ define @test_vluxseg3_nxv2i64_nxv2i64(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -11143,7 +11143,7 @@ define @test_vluxseg4_nxv2i64_nxv2i32(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -11176,7 +11176,7 @@ define @test_vluxseg4_nxv2i64_nxv2i8(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -11209,7 +11209,7 @@ define @test_vluxseg4_nxv2i64_nxv2i16(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -11242,7 +11242,7 @@ define @test_vluxseg4_nxv2i64_nxv2i64(i64* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -11275,7 +11275,7 @@ define @test_vluxseg2_nxv16f16_nxv16i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -11305,7 +11305,7 @@ define @test_vluxseg2_nxv16f16_nxv16i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -11335,7 +11335,7 @@ define @test_vluxseg2_nxv16f16_nxv16i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v16, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret @@ -11365,7 +11365,7 @@ define @test_vluxseg2_nxv4f64_nxv4i32(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -11395,7 +11395,7 @@ define @test_vluxseg2_nxv4f64_nxv4i8(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -11425,7 +11425,7 @@ define @test_vluxseg2_nxv4f64_nxv4i64(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vluxseg2ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -11455,7 +11455,7 @@ define @test_vluxseg2_nxv4f64_nxv4i16(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -11485,7 +11485,7 @@ define @test_vluxseg2_nxv1f64_nxv1i64(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg2ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11515,7 +11515,7 @@ define @test_vluxseg2_nxv1f64_nxv1i32(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11545,7 +11545,7 @@ define @test_vluxseg2_nxv1f64_nxv1i16(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11575,7 +11575,7 @@ define @test_vluxseg2_nxv1f64_nxv1i8(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11605,7 +11605,7 @@ define @test_vluxseg3_nxv1f64_nxv1i64(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg3ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11637,7 +11637,7 @@ define @test_vluxseg3_nxv1f64_nxv1i32(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11669,7 +11669,7 @@ define @test_vluxseg3_nxv1f64_nxv1i16(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11701,7 +11701,7 @@ define @test_vluxseg3_nxv1f64_nxv1i8(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11733,7 +11733,7 @@ define @test_vluxseg4_nxv1f64_nxv1i64(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg4ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11766,7 +11766,7 @@ define @test_vluxseg4_nxv1f64_nxv1i32(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11799,7 +11799,7 @@ define @test_vluxseg4_nxv1f64_nxv1i16(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11832,7 +11832,7 @@ define @test_vluxseg4_nxv1f64_nxv1i8(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11865,7 +11865,7 @@ define @test_vluxseg5_nxv1f64_nxv1i64(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg5ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11899,7 +11899,7 @@ define @test_vluxseg5_nxv1f64_nxv1i32(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11933,7 +11933,7 @@ define @test_vluxseg5_nxv1f64_nxv1i16(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -11967,7 +11967,7 @@ define @test_vluxseg5_nxv1f64_nxv1i8(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12001,7 +12001,7 @@ define @test_vluxseg6_nxv1f64_nxv1i64(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg6ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12036,7 +12036,7 @@ define @test_vluxseg6_nxv1f64_nxv1i32(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12071,7 +12071,7 @@ define @test_vluxseg6_nxv1f64_nxv1i16(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12106,7 +12106,7 @@ define @test_vluxseg6_nxv1f64_nxv1i8(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12141,7 +12141,7 @@ define @test_vluxseg7_nxv1f64_nxv1i64(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg7ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12177,7 +12177,7 @@ define @test_vluxseg7_nxv1f64_nxv1i32(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12213,7 +12213,7 @@ define @test_vluxseg7_nxv1f64_nxv1i16(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12249,7 +12249,7 @@ define @test_vluxseg7_nxv1f64_nxv1i8(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12285,7 +12285,7 @@ define @test_vluxseg8_nxv1f64_nxv1i64(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg8ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12322,7 +12322,7 @@ define @test_vluxseg8_nxv1f64_nxv1i32(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12359,7 +12359,7 @@ define @test_vluxseg8_nxv1f64_nxv1i16(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12396,7 +12396,7 @@ define @test_vluxseg8_nxv1f64_nxv1i8(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12433,7 +12433,7 @@ define @test_vluxseg2_nxv2f32_nxv2i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12463,7 +12463,7 @@ define @test_vluxseg2_nxv2f32_nxv2i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12493,7 +12493,7 @@ define @test_vluxseg2_nxv2f32_nxv2i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12523,7 +12523,7 @@ define @test_vluxseg2_nxv2f32_nxv2i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg2ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -12553,7 +12553,7 @@ define @test_vluxseg3_nxv2f32_nxv2i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12585,7 +12585,7 @@ define @test_vluxseg3_nxv2f32_nxv2i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12617,7 +12617,7 @@ define @test_vluxseg3_nxv2f32_nxv2i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12649,7 +12649,7 @@ define @test_vluxseg3_nxv2f32_nxv2i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -12680,7 +12680,7 @@ define @test_vluxseg4_nxv2f32_nxv2i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12713,7 +12713,7 @@ define @test_vluxseg4_nxv2f32_nxv2i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12746,7 +12746,7 @@ define @test_vluxseg4_nxv2f32_nxv2i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12779,7 +12779,7 @@ define @test_vluxseg4_nxv2f32_nxv2i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -12812,7 +12812,7 @@ define @test_vluxseg5_nxv2f32_nxv2i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12846,7 +12846,7 @@ define @test_vluxseg5_nxv2f32_nxv2i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12880,7 +12880,7 @@ define @test_vluxseg5_nxv2f32_nxv2i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12914,7 +12914,7 @@ define @test_vluxseg5_nxv2f32_nxv2i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -12948,7 +12948,7 @@ define @test_vluxseg6_nxv2f32_nxv2i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -12983,7 +12983,7 @@ define @test_vluxseg6_nxv2f32_nxv2i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13018,7 +13018,7 @@ define @test_vluxseg6_nxv2f32_nxv2i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13053,7 +13053,7 @@ define @test_vluxseg6_nxv2f32_nxv2i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -13088,7 +13088,7 @@ define @test_vluxseg7_nxv2f32_nxv2i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13124,7 +13124,7 @@ define @test_vluxseg7_nxv2f32_nxv2i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13160,7 +13160,7 @@ define @test_vluxseg7_nxv2f32_nxv2i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13196,7 +13196,7 @@ define @test_vluxseg7_nxv2f32_nxv2i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -13232,7 +13232,7 @@ define @test_vluxseg8_nxv2f32_nxv2i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13269,7 +13269,7 @@ define @test_vluxseg8_nxv2f32_nxv2i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13306,7 +13306,7 @@ define @test_vluxseg8_nxv2f32_nxv2i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13343,7 +13343,7 @@ define @test_vluxseg8_nxv2f32_nxv2i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -13380,7 +13380,7 @@ define @test_vluxseg2_nxv1f16_nxv1i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg2ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13410,7 +13410,7 @@ define @test_vluxseg2_nxv1f16_nxv1i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13440,7 +13440,7 @@ define @test_vluxseg2_nxv1f16_nxv1i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13470,7 +13470,7 @@ define @test_vluxseg2_nxv1f16_nxv1i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13500,7 +13500,7 @@ define @test_vluxseg3_nxv1f16_nxv1i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg3ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13532,7 +13532,7 @@ define @test_vluxseg3_nxv1f16_nxv1i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13564,7 +13564,7 @@ define @test_vluxseg3_nxv1f16_nxv1i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13596,7 +13596,7 @@ define @test_vluxseg3_nxv1f16_nxv1i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13628,7 +13628,7 @@ define @test_vluxseg4_nxv1f16_nxv1i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg4ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13661,7 +13661,7 @@ define @test_vluxseg4_nxv1f16_nxv1i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13694,7 +13694,7 @@ define @test_vluxseg4_nxv1f16_nxv1i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13727,7 +13727,7 @@ define @test_vluxseg4_nxv1f16_nxv1i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13760,7 +13760,7 @@ define @test_vluxseg5_nxv1f16_nxv1i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg5ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13794,7 +13794,7 @@ define @test_vluxseg5_nxv1f16_nxv1i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13828,7 +13828,7 @@ define @test_vluxseg5_nxv1f16_nxv1i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13862,7 +13862,7 @@ define @test_vluxseg5_nxv1f16_nxv1i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13896,7 +13896,7 @@ define @test_vluxseg6_nxv1f16_nxv1i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg6ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13931,7 +13931,7 @@ define @test_vluxseg6_nxv1f16_nxv1i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -13966,7 +13966,7 @@ define @test_vluxseg6_nxv1f16_nxv1i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14001,7 +14001,7 @@ define @test_vluxseg6_nxv1f16_nxv1i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14036,7 +14036,7 @@ define @test_vluxseg7_nxv1f16_nxv1i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg7ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14072,7 +14072,7 @@ define @test_vluxseg7_nxv1f16_nxv1i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14108,7 +14108,7 @@ define @test_vluxseg7_nxv1f16_nxv1i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14144,7 +14144,7 @@ define @test_vluxseg7_nxv1f16_nxv1i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14180,7 +14180,7 @@ define @test_vluxseg8_nxv1f16_nxv1i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg8ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14217,7 +14217,7 @@ define @test_vluxseg8_nxv1f16_nxv1i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14254,7 +14254,7 @@ define @test_vluxseg8_nxv1f16_nxv1i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14291,7 +14291,7 @@ define @test_vluxseg8_nxv1f16_nxv1i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14328,7 +14328,7 @@ define @test_vluxseg2_nxv1f32_nxv1i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg2ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14358,7 +14358,7 @@ define @test_vluxseg2_nxv1f32_nxv1i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14388,7 +14388,7 @@ define @test_vluxseg2_nxv1f32_nxv1i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14418,7 +14418,7 @@ define @test_vluxseg2_nxv1f32_nxv1i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14448,7 +14448,7 @@ define @test_vluxseg3_nxv1f32_nxv1i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg3ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14480,7 +14480,7 @@ define @test_vluxseg3_nxv1f32_nxv1i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14512,7 +14512,7 @@ define @test_vluxseg3_nxv1f32_nxv1i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14544,7 +14544,7 @@ define @test_vluxseg3_nxv1f32_nxv1i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14576,7 +14576,7 @@ define @test_vluxseg4_nxv1f32_nxv1i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg4ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14609,7 +14609,7 @@ define @test_vluxseg4_nxv1f32_nxv1i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14642,7 +14642,7 @@ define @test_vluxseg4_nxv1f32_nxv1i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14675,7 +14675,7 @@ define @test_vluxseg4_nxv1f32_nxv1i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14708,7 +14708,7 @@ define @test_vluxseg5_nxv1f32_nxv1i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg5ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14742,7 +14742,7 @@ define @test_vluxseg5_nxv1f32_nxv1i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14776,7 +14776,7 @@ define @test_vluxseg5_nxv1f32_nxv1i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14810,7 +14810,7 @@ define @test_vluxseg5_nxv1f32_nxv1i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14844,7 +14844,7 @@ define @test_vluxseg6_nxv1f32_nxv1i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg6ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14879,7 +14879,7 @@ define @test_vluxseg6_nxv1f32_nxv1i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14914,7 +14914,7 @@ define @test_vluxseg6_nxv1f32_nxv1i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14949,7 +14949,7 @@ define @test_vluxseg6_nxv1f32_nxv1i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -14984,7 +14984,7 @@ define @test_vluxseg7_nxv1f32_nxv1i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg7ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -15020,7 +15020,7 @@ define @test_vluxseg7_nxv1f32_nxv1i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -15056,7 +15056,7 @@ define @test_vluxseg7_nxv1f32_nxv1i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -15092,7 +15092,7 @@ define @test_vluxseg7_nxv1f32_nxv1i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -15128,7 +15128,7 @@ define @test_vluxseg8_nxv1f32_nxv1i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg8ei64.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -15165,7 +15165,7 @@ define @test_vluxseg8_nxv1f32_nxv1i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -15202,7 +15202,7 @@ define @test_vluxseg8_nxv1f32_nxv1i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -15239,7 +15239,7 @@ define @test_vluxseg8_nxv1f32_nxv1i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -15276,7 +15276,7 @@ define @test_vluxseg2_nxv8f16_nxv8i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -15306,7 +15306,7 @@ define @test_vluxseg2_nxv8f16_nxv8i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -15336,7 +15336,7 @@ define @test_vluxseg2_nxv8f16_nxv8i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vluxseg2ei64.v v16, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret @@ -15366,7 +15366,7 @@ define @test_vluxseg2_nxv8f16_nxv8i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret @@ -15396,7 +15396,7 @@ define @test_vluxseg3_nxv8f16_nxv8i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -15428,7 +15428,7 @@ define @test_vluxseg3_nxv8f16_nxv8i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -15460,7 +15460,7 @@ define @test_vluxseg3_nxv8f16_nxv8i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vluxseg3ei64.v v16, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret @@ -15491,7 +15491,7 @@ define @test_vluxseg3_nxv8f16_nxv8i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret @@ -15522,7 +15522,7 @@ define @test_vluxseg4_nxv8f16_nxv8i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -15555,7 +15555,7 @@ define @test_vluxseg4_nxv8f16_nxv8i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -15588,7 +15588,7 @@ define @test_vluxseg4_nxv8f16_nxv8i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vluxseg4ei64.v v16, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v18 ; CHECK-NEXT: ret @@ -15620,7 +15620,7 @@ define @test_vluxseg4_nxv8f16_nxv8i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret @@ -15653,7 +15653,7 @@ define @test_vluxseg2_nxv8f32_nxv8i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -15683,7 +15683,7 @@ define @test_vluxseg2_nxv8f32_nxv8i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -15713,7 +15713,7 @@ define @test_vluxseg2_nxv8f32_nxv8i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vluxseg2ei64.v v16, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v20 ; CHECK-NEXT: ret @@ -15743,7 +15743,7 @@ define @test_vluxseg2_nxv8f32_nxv8i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v12, (a0), v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -15773,7 +15773,7 @@ define @test_vluxseg2_nxv2f64_nxv2i32(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -15803,7 +15803,7 @@ define @test_vluxseg2_nxv2f64_nxv2i8(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -15833,7 +15833,7 @@ define @test_vluxseg2_nxv2f64_nxv2i16(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -15863,7 +15863,7 @@ define @test_vluxseg2_nxv2f64_nxv2i64(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vluxseg2ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -15893,7 +15893,7 @@ define @test_vluxseg3_nxv2f64_nxv2i32(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -15925,7 +15925,7 @@ define @test_vluxseg3_nxv2f64_nxv2i8(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -15957,7 +15957,7 @@ define @test_vluxseg3_nxv2f64_nxv2i16(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -15989,7 +15989,7 @@ define @test_vluxseg3_nxv2f64_nxv2i64(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -16021,7 +16021,7 @@ define @test_vluxseg4_nxv2f64_nxv2i32(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -16054,7 +16054,7 @@ define @test_vluxseg4_nxv2f64_nxv2i8(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -16087,7 +16087,7 @@ define @test_vluxseg4_nxv2f64_nxv2i16(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -16120,7 +16120,7 @@ define @test_vluxseg4_nxv2f64_nxv2i64(double* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -16153,7 +16153,7 @@ define @test_vluxseg2_nxv4f16_nxv4i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -16183,7 +16183,7 @@ define @test_vluxseg2_nxv4f16_nxv4i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -16213,7 +16213,7 @@ define @test_vluxseg2_nxv4f16_nxv4i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg2ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -16243,7 +16243,7 @@ define @test_vluxseg2_nxv4f16_nxv4i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -16273,7 +16273,7 @@ define @test_vluxseg3_nxv4f16_nxv4i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -16304,7 +16304,7 @@ define @test_vluxseg3_nxv4f16_nxv4i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -16336,7 +16336,7 @@ define @test_vluxseg3_nxv4f16_nxv4i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg3ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -16367,7 +16367,7 @@ define @test_vluxseg3_nxv4f16_nxv4i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -16399,7 +16399,7 @@ define @test_vluxseg4_nxv4f16_nxv4i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -16432,7 +16432,7 @@ define @test_vluxseg4_nxv4f16_nxv4i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -16465,7 +16465,7 @@ define @test_vluxseg4_nxv4f16_nxv4i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -16497,7 +16497,7 @@ define @test_vluxseg4_nxv4f16_nxv4i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -16530,7 +16530,7 @@ define @test_vluxseg5_nxv4f16_nxv4i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -16564,7 +16564,7 @@ define @test_vluxseg5_nxv4f16_nxv4i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -16598,7 +16598,7 @@ define @test_vluxseg5_nxv4f16_nxv4i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg5ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -16631,7 +16631,7 @@ define @test_vluxseg5_nxv4f16_nxv4i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -16665,7 +16665,7 @@ define @test_vluxseg6_nxv4f16_nxv4i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -16700,7 +16700,7 @@ define @test_vluxseg6_nxv4f16_nxv4i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -16735,7 +16735,7 @@ define @test_vluxseg6_nxv4f16_nxv4i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg6ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -16770,7 +16770,7 @@ define @test_vluxseg6_nxv4f16_nxv4i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -16805,7 +16805,7 @@ define @test_vluxseg7_nxv4f16_nxv4i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -16841,7 +16841,7 @@ define @test_vluxseg7_nxv4f16_nxv4i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -16877,7 +16877,7 @@ define @test_vluxseg7_nxv4f16_nxv4i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg7ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -16913,7 +16913,7 @@ define @test_vluxseg7_nxv4f16_nxv4i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -16949,7 +16949,7 @@ define @test_vluxseg8_nxv4f16_nxv4i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -16986,7 +16986,7 @@ define @test_vluxseg8_nxv4f16_nxv4i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -17023,7 +17023,7 @@ define @test_vluxseg8_nxv4f16_nxv4i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg8ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret @@ -17060,7 +17060,7 @@ define @test_vluxseg8_nxv4f16_nxv4i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -17097,7 +17097,7 @@ define @test_vluxseg2_nxv2f16_nxv2i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -17127,7 +17127,7 @@ define @test_vluxseg2_nxv2f16_nxv2i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -17157,7 +17157,7 @@ define @test_vluxseg2_nxv2f16_nxv2i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -17187,7 +17187,7 @@ define @test_vluxseg2_nxv2f16_nxv2i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg2ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -17217,7 +17217,7 @@ define @test_vluxseg3_nxv2f16_nxv2i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg3ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -17249,7 +17249,7 @@ define @test_vluxseg3_nxv2f16_nxv2i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -17281,7 +17281,7 @@ define @test_vluxseg3_nxv2f16_nxv2i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg3ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -17313,7 +17313,7 @@ define @test_vluxseg3_nxv2f16_nxv2i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -17344,7 +17344,7 @@ define @test_vluxseg4_nxv2f16_nxv2i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg4ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -17377,7 +17377,7 @@ define @test_vluxseg4_nxv2f16_nxv2i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -17410,7 +17410,7 @@ define @test_vluxseg4_nxv2f16_nxv2i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg4ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -17443,7 +17443,7 @@ define @test_vluxseg4_nxv2f16_nxv2i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -17476,7 +17476,7 @@ define @test_vluxseg5_nxv2f16_nxv2i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg5ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -17510,7 +17510,7 @@ define @test_vluxseg5_nxv2f16_nxv2i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -17544,7 +17544,7 @@ define @test_vluxseg5_nxv2f16_nxv2i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg5ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -17578,7 +17578,7 @@ define @test_vluxseg5_nxv2f16_nxv2i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg5_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -17612,7 +17612,7 @@ define @test_vluxseg6_nxv2f16_nxv2i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg6ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -17647,7 +17647,7 @@ define @test_vluxseg6_nxv2f16_nxv2i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -17682,7 +17682,7 @@ define @test_vluxseg6_nxv2f16_nxv2i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg6ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -17717,7 +17717,7 @@ define @test_vluxseg6_nxv2f16_nxv2i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg6_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -17752,7 +17752,7 @@ define @test_vluxseg7_nxv2f16_nxv2i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg7ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -17788,7 +17788,7 @@ define @test_vluxseg7_nxv2f16_nxv2i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -17824,7 +17824,7 @@ define @test_vluxseg7_nxv2f16_nxv2i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg7ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -17860,7 +17860,7 @@ define @test_vluxseg7_nxv2f16_nxv2i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg7_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -17896,7 +17896,7 @@ define @test_vluxseg8_nxv2f16_nxv2i32(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg8ei32.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -17933,7 +17933,7 @@ define @test_vluxseg8_nxv2f16_nxv2i8(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -17970,7 +17970,7 @@ define @test_vluxseg8_nxv2f16_nxv2i16(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg8ei16.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -18007,7 +18007,7 @@ define @test_vluxseg8_nxv2f16_nxv2i64(half* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg8_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret @@ -18044,7 +18044,7 @@ define @test_vluxseg2_nxv4f32_nxv4i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vluxseg2ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -18074,7 +18074,7 @@ define @test_vluxseg2_nxv4f32_nxv4i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vluxseg2ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -18104,7 +18104,7 @@ define @test_vluxseg2_nxv4f32_nxv4i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vluxseg2ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret @@ -18134,7 +18134,7 @@ define @test_vluxseg2_nxv4f32_nxv4i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg2_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vluxseg2ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -18164,7 +18164,7 @@ define @test_vluxseg3_nxv4f32_nxv4i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -18196,7 +18196,7 @@ define @test_vluxseg3_nxv4f32_nxv4i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -18228,7 +18228,7 @@ define @test_vluxseg3_nxv4f32_nxv4i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vluxseg3ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret @@ -18259,7 +18259,7 @@ define @test_vluxseg3_nxv4f32_nxv4i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg3_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -18291,7 +18291,7 @@ define @test_vluxseg4_nxv4f32_nxv4i32(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -18324,7 +18324,7 @@ define @test_vluxseg4_nxv4f32_nxv4i8(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -18357,7 +18357,7 @@ define @test_vluxseg4_nxv4f32_nxv4i64(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret @@ -18390,7 +18390,7 @@ define @test_vluxseg4_nxv4f32_nxv4i16(float* %base, %index, i64 %vl) { ; CHECK-LABEL: test_vluxseg4_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmacc-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmacc-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmacc-rv32.ll @@ -12,7 +12,7 @@ define @intrinsic_vmacc_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -60,7 +60,7 @@ define @intrinsic_vmacc_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -108,7 +108,7 @@ define @intrinsic_vmacc_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -156,7 +156,7 @@ define @intrinsic_vmacc_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -204,7 +204,7 @@ define @intrinsic_vmacc_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vmacc.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -252,7 +252,7 @@ define @intrinsic_vmacc_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vmacc.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -300,7 +300,7 @@ define @intrinsic_vmacc_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -348,7 +348,7 @@ define @intrinsic_vmacc_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -396,7 +396,7 @@ define @intrinsic_vmacc_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -444,7 +444,7 @@ define @intrinsic_vmacc_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vmacc.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -492,7 +492,7 @@ define @intrinsic_vmacc_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vmacc.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -540,7 +540,7 @@ define @intrinsic_vmacc_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -588,7 +588,7 @@ define @intrinsic_vmacc_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -636,7 +636,7 @@ define @intrinsic_vmacc_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vmacc.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -684,7 +684,7 @@ define @intrinsic_vmacc_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vmacc.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -732,7 +732,7 @@ define @intrinsic_vmacc_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -780,7 +780,7 @@ define @intrinsic_vmacc_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vmacc.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -828,7 +828,7 @@ define @intrinsic_vmacc_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vmacc.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -876,7 +876,7 @@ define @intrinsic_vmacc_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i8_i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma ; CHECK-NEXT: vmacc.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -924,7 +924,7 @@ define @intrinsic_vmacc_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i8_i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma ; CHECK-NEXT: vmacc.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -972,7 +972,7 @@ define @intrinsic_vmacc_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i8_i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma ; CHECK-NEXT: vmacc.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1020,7 +1020,7 @@ define @intrinsic_vmacc_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i8_i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma ; CHECK-NEXT: vmacc.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1068,7 +1068,7 @@ define @intrinsic_vmacc_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv16i8_i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, ma ; CHECK-NEXT: vmacc.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -1116,7 +1116,7 @@ define @intrinsic_vmacc_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv32i8_i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, ma ; CHECK-NEXT: vmacc.vx v8, a0, v12 ; CHECK-NEXT: ret entry: @@ -1164,7 +1164,7 @@ define @intrinsic_vmacc_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i16_i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma ; CHECK-NEXT: vmacc.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1212,7 +1212,7 @@ define @intrinsic_vmacc_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i16_i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma ; CHECK-NEXT: vmacc.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1260,7 +1260,7 @@ define @intrinsic_vmacc_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i16_i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma ; CHECK-NEXT: vmacc.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1308,7 +1308,7 @@ define @intrinsic_vmacc_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i16_i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma ; CHECK-NEXT: vmacc.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -1356,7 +1356,7 @@ define @intrinsic_vmacc_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv16i16_i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma ; CHECK-NEXT: vmacc.vx v8, a0, v12 ; CHECK-NEXT: ret entry: @@ -1404,7 +1404,7 @@ define @intrinsic_vmacc_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i32_i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma ; CHECK-NEXT: vmacc.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1452,7 +1452,7 @@ define @intrinsic_vmacc_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i32_i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; CHECK-NEXT: vmacc.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1500,7 +1500,7 @@ define @intrinsic_vmacc_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i32_i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma ; CHECK-NEXT: vmacc.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -1548,7 +1548,7 @@ define @intrinsic_vmacc_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i32_i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma ; CHECK-NEXT: vmacc.vx v8, a0, v12 ; CHECK-NEXT: ret entry: @@ -1600,9 +1600,9 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, ma ; CHECK-NEXT: vmacc.vv v8, v10, v9 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1630,7 +1630,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vmacc.vv v8, v10, v9, v0.t @@ -1662,9 +1662,9 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, ma ; CHECK-NEXT: vmacc.vv v8, v12, v10 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1692,7 +1692,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu ; CHECK-NEXT: vmacc.vv v8, v12, v10, v0.t @@ -1724,9 +1724,9 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, ma ; CHECK-NEXT: vmacc.vv v8, v16, v12 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1754,7 +1754,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; CHECK-NEXT: vmacc.vv v8, v16, v12, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/vmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmacc-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmacc-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmacc-rv64.ll @@ -11,7 +11,7 @@ define @intrinsic_vmacc_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -58,7 +58,7 @@ define @intrinsic_vmacc_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -105,7 +105,7 @@ define @intrinsic_vmacc_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -152,7 +152,7 @@ define @intrinsic_vmacc_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -199,7 +199,7 @@ define @intrinsic_vmacc_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vmacc.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -246,7 +246,7 @@ define @intrinsic_vmacc_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vmacc.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -293,7 +293,7 @@ define @intrinsic_vmacc_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vmacc_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vmacc_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vmacc_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vmacc.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vmacc_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vmacc.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vmacc_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vmacc_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -622,7 +622,7 @@ define @intrinsic_vmacc_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vmacc.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -669,7 +669,7 @@ define @intrinsic_vmacc_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vmacc.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -716,7 +716,7 @@ define @intrinsic_vmacc_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -763,7 +763,7 @@ define @intrinsic_vmacc_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vmacc.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -810,7 +810,7 @@ define @intrinsic_vmacc_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vmacc.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -857,7 +857,7 @@ define @intrinsic_vmacc_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i8_i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma ; CHECK-NEXT: vmacc.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -904,7 +904,7 @@ define @intrinsic_vmacc_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i8_i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma ; CHECK-NEXT: vmacc.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -951,7 +951,7 @@ define @intrinsic_vmacc_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i8_i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma ; CHECK-NEXT: vmacc.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -998,7 +998,7 @@ define @intrinsic_vmacc_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i8_i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma ; CHECK-NEXT: vmacc.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1045,7 +1045,7 @@ define @intrinsic_vmacc_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv16i8_i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, ma ; CHECK-NEXT: vmacc.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -1092,7 +1092,7 @@ define @intrinsic_vmacc_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv32i8_i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, ma ; CHECK-NEXT: vmacc.vx v8, a0, v12 ; CHECK-NEXT: ret entry: @@ -1139,7 +1139,7 @@ define @intrinsic_vmacc_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i16_i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma ; CHECK-NEXT: vmacc.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1186,7 +1186,7 @@ define @intrinsic_vmacc_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i16_i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma ; CHECK-NEXT: vmacc.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1233,7 +1233,7 @@ define @intrinsic_vmacc_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i16_i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma ; CHECK-NEXT: vmacc.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1280,7 +1280,7 @@ define @intrinsic_vmacc_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i16_i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma ; CHECK-NEXT: vmacc.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -1327,7 +1327,7 @@ define @intrinsic_vmacc_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv16i16_i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma ; CHECK-NEXT: vmacc.vx v8, a0, v12 ; CHECK-NEXT: ret entry: @@ -1374,7 +1374,7 @@ define @intrinsic_vmacc_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i32_i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma ; CHECK-NEXT: vmacc.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1421,7 +1421,7 @@ define @intrinsic_vmacc_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i32_i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; CHECK-NEXT: vmacc.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1468,7 +1468,7 @@ define @intrinsic_vmacc_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i32_i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma ; CHECK-NEXT: vmacc.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -1515,7 +1515,7 @@ define @intrinsic_vmacc_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i32_i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma ; CHECK-NEXT: vmacc.vx v8, a0, v12 ; CHECK-NEXT: ret entry: @@ -1562,7 +1562,7 @@ define @intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, ma ; CHECK-NEXT: vmacc.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1609,7 +1609,7 @@ define @intrinsic_vmacc_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i64_i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, ma ; CHECK-NEXT: vmacc.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -1656,7 +1656,7 @@ define @intrinsic_vmacc_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i64_i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, ma ; CHECK-NEXT: vmacc.vx v8, a0, v12 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll @@ -9,7 +9,7 @@ define @intrinsic_vmadc_vv_nxv1i1_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmadc.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -29,7 +29,7 @@ define @intrinsic_vmadc_vv_nxv2i1_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmadc.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -49,7 +49,7 @@ define @intrinsic_vmadc_vv_nxv4i1_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmadc.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -69,7 +69,7 @@ define @intrinsic_vmadc_vv_nxv8i1_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmadc.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -89,7 +89,7 @@ define @intrinsic_vmadc_vv_nxv16i1_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmadc.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -109,7 +109,7 @@ define @intrinsic_vmadc_vv_nxv32i1_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv32i1_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmadc.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -129,7 +129,7 @@ define @intrinsic_vmadc_vv_nxv64i1_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv64i1_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmadc.vv v0, v8, v16 ; CHECK-NEXT: ret entry: @@ -149,7 +149,7 @@ define @intrinsic_vmadc_vv_nxv1i1_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmadc.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -169,7 +169,7 @@ define @intrinsic_vmadc_vv_nxv2i1_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmadc.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -189,7 +189,7 @@ define @intrinsic_vmadc_vv_nxv4i1_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmadc.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -209,7 +209,7 @@ define @intrinsic_vmadc_vv_nxv8i1_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmadc.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -229,7 +229,7 @@ define @intrinsic_vmadc_vv_nxv16i1_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmadc.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -249,7 +249,7 @@ define @intrinsic_vmadc_vv_nxv32i1_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv32i1_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vmadc.vv v0, v8, v16 ; CHECK-NEXT: ret entry: @@ -269,7 +269,7 @@ define @intrinsic_vmadc_vv_nxv1i1_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmadc.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -289,7 +289,7 @@ define @intrinsic_vmadc_vv_nxv2i1_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmadc.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -309,7 +309,7 @@ define @intrinsic_vmadc_vv_nxv4i1_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmadc.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -329,7 +329,7 @@ define @intrinsic_vmadc_vv_nxv8i1_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmadc.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -349,7 +349,7 @@ define @intrinsic_vmadc_vv_nxv16i1_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmadc.vv v0, v8, v16 ; CHECK-NEXT: ret entry: @@ -369,7 +369,7 @@ define @intrinsic_vmadc_vv_nxv1i1_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmadc.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -389,7 +389,7 @@ define @intrinsic_vmadc_vv_nxv2i1_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmadc.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -409,7 +409,7 @@ define @intrinsic_vmadc_vv_nxv4i1_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmadc.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -429,7 +429,7 @@ define @intrinsic_vmadc_vv_nxv8i1_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmadc.vv v0, v8, v16 ; CHECK-NEXT: ret entry: @@ -449,7 +449,7 @@ define @intrinsic_vmadc_vx_nxv1i1_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmadc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -469,7 +469,7 @@ define @intrinsic_vmadc_vx_nxv2i1_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmadc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -489,7 +489,7 @@ define @intrinsic_vmadc_vx_nxv4i1_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmadc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -509,7 +509,7 @@ define @intrinsic_vmadc_vx_nxv8i1_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmadc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -529,7 +529,7 @@ define @intrinsic_vmadc_vx_nxv16i1_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmadc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -549,7 +549,7 @@ define @intrinsic_vmadc_vx_nxv32i1_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv32i1_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmadc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -569,7 +569,7 @@ define @intrinsic_vmadc_vx_nxv64i1_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv64i1_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmadc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -589,7 +589,7 @@ define @intrinsic_vmadc_vx_nxv1i1_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vmadc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -609,7 +609,7 @@ define @intrinsic_vmadc_vx_nxv2i1_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmadc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -629,7 +629,7 @@ define @intrinsic_vmadc_vx_nxv4i1_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmadc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -649,7 +649,7 @@ define @intrinsic_vmadc_vx_nxv8i1_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmadc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -669,7 +669,7 @@ define @intrinsic_vmadc_vx_nxv16i1_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vmadc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -689,7 +689,7 @@ define @intrinsic_vmadc_vx_nxv32i1_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv32i1_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vmadc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -709,7 +709,7 @@ define @intrinsic_vmadc_vx_nxv1i1_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmadc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -729,7 +729,7 @@ define @intrinsic_vmadc_vx_nxv2i1_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmadc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -749,7 +749,7 @@ define @intrinsic_vmadc_vx_nxv4i1_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmadc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -769,7 +769,7 @@ define @intrinsic_vmadc_vx_nxv8i1_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmadc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -789,7 +789,7 @@ define @intrinsic_vmadc_vx_nxv16i1_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vmadc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -813,7 +813,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vmadc.vv v0, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 @@ -839,7 +839,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vmadc.vv v0, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 @@ -865,7 +865,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmadc.vv v0, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 @@ -891,7 +891,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmadc.vv v0, v8, v16 ; CHECK-NEXT: addi sp, sp, 16 @@ -908,7 +908,7 @@ define @intrinsic_vmadc_vi_nxv1i1_nxv1i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmadc.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -923,7 +923,7 @@ define @intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmadc.vi v0, v8, -9 ; CHECK-NEXT: ret entry: @@ -938,7 +938,7 @@ define @intrinsic_vmadc_vi_nxv4i1_nxv4i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmadc.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmadc.vi v0, v8, -9 ; CHECK-NEXT: ret entry: @@ -968,7 +968,7 @@ define @intrinsic_vmadc_vi_nxv16i1_nxv16i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmadc.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -983,7 +983,7 @@ define @intrinsic_vmadc_vi_nxv32i1_nxv32i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv32i1_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmadc.vi v0, v8, -9 ; CHECK-NEXT: ret entry: @@ -998,7 +998,7 @@ define @intrinsic_vmadc_vi_nxv64i1_nxv64i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv64i1_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmadc.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1013,7 +1013,7 @@ define @intrinsic_vmadc_vi_nxv1i1_nxv1i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmadc.vi v0, v8, -9 ; CHECK-NEXT: ret entry: @@ -1028,7 +1028,7 @@ define @intrinsic_vmadc_vi_nxv2i1_nxv2i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmadc.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1043,7 +1043,7 @@ define @intrinsic_vmadc_vi_nxv4i1_nxv4i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmadc.vi v0, v8, -9 ; CHECK-NEXT: ret entry: @@ -1058,7 +1058,7 @@ define @intrinsic_vmadc_vi_nxv8i1_nxv8i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmadc.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1073,7 +1073,7 @@ define @intrinsic_vmadc_vi_nxv16i1_nxv16i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmadc.vi v0, v8, -9 ; CHECK-NEXT: ret entry: @@ -1088,7 +1088,7 @@ define @intrinsic_vmadc_vi_nxv32i1_nxv32i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv32i1_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vmadc.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1103,7 +1103,7 @@ define @intrinsic_vmadc_vi_nxv1i1_nxv1i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmadc.vi v0, v8, -9 ; CHECK-NEXT: ret entry: @@ -1118,7 +1118,7 @@ define @intrinsic_vmadc_vi_nxv2i1_nxv2i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmadc.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1133,7 +1133,7 @@ define @intrinsic_vmadc_vi_nxv4i1_nxv4i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmadc.vi v0, v8, -9 ; CHECK-NEXT: ret entry: @@ -1148,7 +1148,7 @@ define @intrinsic_vmadc_vi_nxv8i1_nxv8i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmadc.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1163,7 +1163,7 @@ define @intrinsic_vmadc_vi_nxv16i1_nxv16i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmadc.vi v0, v8, -9 ; CHECK-NEXT: ret entry: @@ -1178,7 +1178,7 @@ define @intrinsic_vmadc_vi_nxv1i1_nxv1i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmadc.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1193,7 +1193,7 @@ define @intrinsic_vmadc_vi_nxv2i1_nxv2i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmadc.vi v0, v8, -9 ; CHECK-NEXT: ret entry: @@ -1208,7 +1208,7 @@ define @intrinsic_vmadc_vi_nxv4i1_nxv4i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmadc.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1223,7 +1223,7 @@ define @intrinsic_vmadc_vi_nxv8i1_nxv8i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmadc.vi v0, v8, -9 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmadc-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmadc-rv64.ll @@ -9,7 +9,7 @@ define @intrinsic_vmadc_vv_nxv1i1_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmadc.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -29,7 +29,7 @@ define @intrinsic_vmadc_vv_nxv2i1_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmadc.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -49,7 +49,7 @@ define @intrinsic_vmadc_vv_nxv4i1_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmadc.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -69,7 +69,7 @@ define @intrinsic_vmadc_vv_nxv8i1_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmadc.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -89,7 +89,7 @@ define @intrinsic_vmadc_vv_nxv16i1_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmadc.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -109,7 +109,7 @@ define @intrinsic_vmadc_vv_nxv32i1_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv32i1_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmadc.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -129,7 +129,7 @@ define @intrinsic_vmadc_vv_nxv64i1_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv64i1_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmadc.vv v0, v8, v16 ; CHECK-NEXT: ret entry: @@ -149,7 +149,7 @@ define @intrinsic_vmadc_vv_nxv1i1_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmadc.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -169,7 +169,7 @@ define @intrinsic_vmadc_vv_nxv2i1_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmadc.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -189,7 +189,7 @@ define @intrinsic_vmadc_vv_nxv4i1_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmadc.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -209,7 +209,7 @@ define @intrinsic_vmadc_vv_nxv8i1_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmadc.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -229,7 +229,7 @@ define @intrinsic_vmadc_vv_nxv16i1_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmadc.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -249,7 +249,7 @@ define @intrinsic_vmadc_vv_nxv32i1_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv32i1_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vmadc.vv v0, v8, v16 ; CHECK-NEXT: ret entry: @@ -269,7 +269,7 @@ define @intrinsic_vmadc_vv_nxv1i1_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmadc.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -289,7 +289,7 @@ define @intrinsic_vmadc_vv_nxv2i1_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmadc.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -309,7 +309,7 @@ define @intrinsic_vmadc_vv_nxv4i1_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmadc.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -329,7 +329,7 @@ define @intrinsic_vmadc_vv_nxv8i1_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmadc.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -349,7 +349,7 @@ define @intrinsic_vmadc_vv_nxv16i1_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmadc.vv v0, v8, v16 ; CHECK-NEXT: ret entry: @@ -369,7 +369,7 @@ define @intrinsic_vmadc_vv_nxv1i1_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmadc.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -389,7 +389,7 @@ define @intrinsic_vmadc_vv_nxv2i1_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmadc.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -409,7 +409,7 @@ define @intrinsic_vmadc_vv_nxv4i1_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmadc.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -429,7 +429,7 @@ define @intrinsic_vmadc_vv_nxv8i1_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmadc.vv v0, v8, v16 ; CHECK-NEXT: ret entry: @@ -449,7 +449,7 @@ define @intrinsic_vmadc_vx_nxv1i1_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmadc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -469,7 +469,7 @@ define @intrinsic_vmadc_vx_nxv2i1_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmadc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -489,7 +489,7 @@ define @intrinsic_vmadc_vx_nxv4i1_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmadc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -509,7 +509,7 @@ define @intrinsic_vmadc_vx_nxv8i1_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmadc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -529,7 +529,7 @@ define @intrinsic_vmadc_vx_nxv16i1_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmadc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -549,7 +549,7 @@ define @intrinsic_vmadc_vx_nxv32i1_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv32i1_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmadc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -569,7 +569,7 @@ define @intrinsic_vmadc_vx_nxv64i1_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv64i1_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmadc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -589,7 +589,7 @@ define @intrinsic_vmadc_vx_nxv1i1_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vmadc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -609,7 +609,7 @@ define @intrinsic_vmadc_vx_nxv2i1_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmadc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -629,7 +629,7 @@ define @intrinsic_vmadc_vx_nxv4i1_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmadc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -649,7 +649,7 @@ define @intrinsic_vmadc_vx_nxv8i1_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmadc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -669,7 +669,7 @@ define @intrinsic_vmadc_vx_nxv16i1_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vmadc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -689,7 +689,7 @@ define @intrinsic_vmadc_vx_nxv32i1_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv32i1_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vmadc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -709,7 +709,7 @@ define @intrinsic_vmadc_vx_nxv1i1_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmadc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -729,7 +729,7 @@ define @intrinsic_vmadc_vx_nxv2i1_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmadc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -749,7 +749,7 @@ define @intrinsic_vmadc_vx_nxv4i1_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmadc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -769,7 +769,7 @@ define @intrinsic_vmadc_vx_nxv8i1_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmadc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -789,7 +789,7 @@ define @intrinsic_vmadc_vx_nxv16i1_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vmadc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -809,7 +809,7 @@ define @intrinsic_vmadc_vx_nxv1i1_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vmadc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -829,7 +829,7 @@ define @intrinsic_vmadc_vx_nxv2i1_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vmadc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -849,7 +849,7 @@ define @intrinsic_vmadc_vx_nxv4i1_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vmadc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -869,7 +869,7 @@ define @intrinsic_vmadc_vx_nxv8i1_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vmadc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -884,7 +884,7 @@ define @intrinsic_vmadc_vi_nxv1i1_nxv1i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmadc.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -899,7 +899,7 @@ define @intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmadc.vi v0, v8, -9 ; CHECK-NEXT: ret entry: @@ -914,7 +914,7 @@ define @intrinsic_vmadc_vi_nxv4i1_nxv4i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmadc.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -929,7 +929,7 @@ define @intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmadc.vi v0, v8, -9 ; CHECK-NEXT: ret entry: @@ -944,7 +944,7 @@ define @intrinsic_vmadc_vi_nxv16i1_nxv16i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmadc.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -959,7 +959,7 @@ define @intrinsic_vmadc_vi_nxv32i1_nxv32i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv32i1_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmadc.vi v0, v8, -9 ; CHECK-NEXT: ret entry: @@ -974,7 +974,7 @@ define @intrinsic_vmadc_vi_nxv64i1_nxv64i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv64i1_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmadc.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -989,7 +989,7 @@ define @intrinsic_vmadc_vi_nxv1i1_nxv1i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmadc.vi v0, v8, -9 ; CHECK-NEXT: ret entry: @@ -1004,7 +1004,7 @@ define @intrinsic_vmadc_vi_nxv2i1_nxv2i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmadc.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1019,7 +1019,7 @@ define @intrinsic_vmadc_vi_nxv4i1_nxv4i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmadc.vi v0, v8, -9 ; CHECK-NEXT: ret entry: @@ -1034,7 +1034,7 @@ define @intrinsic_vmadc_vi_nxv8i1_nxv8i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmadc.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1049,7 +1049,7 @@ define @intrinsic_vmadc_vi_nxv16i1_nxv16i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmadc.vi v0, v8, -9 ; CHECK-NEXT: ret entry: @@ -1064,7 +1064,7 @@ define @intrinsic_vmadc_vi_nxv32i1_nxv32i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv32i1_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vmadc.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1079,7 +1079,7 @@ define @intrinsic_vmadc_vi_nxv1i1_nxv1i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmadc.vi v0, v8, -9 ; CHECK-NEXT: ret entry: @@ -1094,7 +1094,7 @@ define @intrinsic_vmadc_vi_nxv2i1_nxv2i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmadc.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1109,7 +1109,7 @@ define @intrinsic_vmadc_vi_nxv4i1_nxv4i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmadc.vi v0, v8, -9 ; CHECK-NEXT: ret entry: @@ -1124,7 +1124,7 @@ define @intrinsic_vmadc_vi_nxv8i1_nxv8i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmadc.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1139,7 +1139,7 @@ define @intrinsic_vmadc_vi_nxv16i1_nxv16i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmadc.vi v0, v8, -9 ; CHECK-NEXT: ret entry: @@ -1154,7 +1154,7 @@ define @intrinsic_vmadc_vi_nxv1i1_nxv1i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmadc.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1169,7 +1169,7 @@ define @intrinsic_vmadc_vi_nxv2i1_nxv2i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmadc.vi v0, v8, -9 ; CHECK-NEXT: ret entry: @@ -1184,7 +1184,7 @@ define @intrinsic_vmadc_vi_nxv4i1_nxv4i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmadc.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1199,7 +1199,7 @@ define @intrinsic_vmadc_vi_nxv8i1_nxv8i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmadc.vi v0, v8, -9 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i8_nxv1i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmadc.vvm v10, v8, v9, v0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -33,7 +33,7 @@ define @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i8_nxv2i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmadc.vvm v10, v8, v9, v0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -56,7 +56,7 @@ define @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i8_nxv4i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmadc.vvm v10, v8, v9, v0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -79,7 +79,7 @@ define @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmadc.vvm v10, v8, v9, v0 ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -102,7 +102,7 @@ define @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i8_nxv16i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmadc.vvm v12, v8, v10, v0 ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: ret @@ -125,7 +125,7 @@ define @intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i8_nxv32i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmadc.vvm v16, v8, v12, v0 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: ret @@ -148,7 +148,7 @@ define @intrinsic_vmadc.carry.in_vvm_nxv64i1_nxv64i8_nxv64i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv64i1_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmadc.vvm v24, v8, v16, v0 ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: ret @@ -171,7 +171,7 @@ define @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i16_nxv1i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmadc.vvm v10, v8, v9, v0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -194,7 +194,7 @@ define @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i16_nxv2i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmadc.vvm v10, v8, v9, v0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -217,7 +217,7 @@ define @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmadc.vvm v10, v8, v9, v0 ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -240,7 +240,7 @@ define @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i16_nxv8i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmadc.vvm v12, v8, v10, v0 ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: ret @@ -263,7 +263,7 @@ define @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i16_nxv16i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmadc.vvm v16, v8, v12, v0 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: ret @@ -286,7 +286,7 @@ define @intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i16_nxv32i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vmadc.vvm v24, v8, v16, v0 ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: ret @@ -309,7 +309,7 @@ define @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i32_nxv1i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmadc.vvm v10, v8, v9, v0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -332,7 +332,7 @@ define @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmadc.vvm v10, v8, v9, v0 ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -355,7 +355,7 @@ define @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i32_nxv4i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmadc.vvm v12, v8, v10, v0 ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: ret @@ -378,7 +378,7 @@ define @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i32_nxv8i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmadc.vvm v16, v8, v12, v0 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: ret @@ -401,7 +401,7 @@ define @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i32_nxv16i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmadc.vvm v24, v8, v16, v0 ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: ret @@ -424,7 +424,7 @@ define @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i64_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmadc.vvm v10, v8, v9, v0 ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -447,7 +447,7 @@ define @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i64_nxv2i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmadc.vvm v12, v8, v10, v0 ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: ret @@ -470,7 +470,7 @@ define @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i64_nxv4i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmadc.vvm v16, v8, v12, v0 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: ret @@ -493,7 +493,7 @@ define @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i64_nxv8i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmadc.vvm v24, v8, v16, v0 ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: ret @@ -516,7 +516,7 @@ define @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i8_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmadc.vxm v9, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: ret @@ -539,7 +539,7 @@ define @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i8_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmadc.vxm v9, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: ret @@ -562,7 +562,7 @@ define @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i8_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmadc.vxm v9, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: ret @@ -585,7 +585,7 @@ define @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i8_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmadc.vxm v9, v8, a0, v0 ; CHECK-NEXT: vmv.v.v v0, v9 ; CHECK-NEXT: ret @@ -608,7 +608,7 @@ define @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i8_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmadc.vxm v10, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -631,7 +631,7 @@ define @intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i8_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmadc.vxm v12, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: ret @@ -654,7 +654,7 @@ define @intrinsic_vmadc.carry.in_vxm_nxv64i1_nxv64i8_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv64i1_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmadc.vxm v16, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: ret @@ -677,7 +677,7 @@ define @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i16_i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vmadc.vxm v9, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: ret @@ -700,7 +700,7 @@ define @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i16_i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmadc.vxm v9, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: ret @@ -723,7 +723,7 @@ define @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i16_i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmadc.vxm v9, v8, a0, v0 ; CHECK-NEXT: vmv.v.v v0, v9 ; CHECK-NEXT: ret @@ -746,7 +746,7 @@ define @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i16_i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmadc.vxm v10, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -769,7 +769,7 @@ define @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i16_i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vmadc.vxm v12, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: ret @@ -792,7 +792,7 @@ define @intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i16_i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vmadc.vxm v16, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: ret @@ -815,7 +815,7 @@ define @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i32_i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmadc.vxm v9, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: ret @@ -838,7 +838,7 @@ define @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i32_i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmadc.vxm v9, v8, a0, v0 ; CHECK-NEXT: vmv.v.v v0, v9 ; CHECK-NEXT: ret @@ -861,7 +861,7 @@ define @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i32_i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmadc.vxm v10, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -884,7 +884,7 @@ define @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i32_i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmadc.vxm v12, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: ret @@ -907,7 +907,7 @@ define @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i32_i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vmadc.vxm v16, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: ret @@ -934,7 +934,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vmadc.vvm v9, v8, v10, v0 ; CHECK-NEXT: vmv.v.v v0, v9 @@ -963,7 +963,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmadc.vvm v10, v8, v12, v0 ; CHECK-NEXT: vmv1r.v v0, v10 @@ -992,7 +992,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmadc.vvm v12, v8, v16, v0 ; CHECK-NEXT: vmv1r.v v0, v12 @@ -1021,7 +1021,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vlse64.v v24, (a0), zero ; CHECK-NEXT: vmadc.vvm v16, v8, v24, v0 ; CHECK-NEXT: vmv1r.v v0, v16 @@ -1040,7 +1040,7 @@ define @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i8_i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmadc.vim v9, v8, 9, v0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: ret @@ -1057,7 +1057,7 @@ define @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i8_i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmadc.vim v9, v8, 9, v0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: ret @@ -1074,7 +1074,7 @@ define @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i8_i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmadc.vim v9, v8, 9, v0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: ret @@ -1091,7 +1091,7 @@ define @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i8_i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmadc.vim v9, v8, 9, v0 ; CHECK-NEXT: vmv.v.v v0, v9 ; CHECK-NEXT: ret @@ -1108,7 +1108,7 @@ define @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i8_i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmadc.vim v10, v8, 9, v0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1125,7 +1125,7 @@ define @intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i8_i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmadc.vim v12, v8, 9, v0 ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: ret @@ -1142,7 +1142,7 @@ define @intrinsic_vmadc.carry.in_vim_nxv64i1_nxv64i8_i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv64i1_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmadc.vim v16, v8, 9, v0 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: ret @@ -1159,7 +1159,7 @@ define @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i16_i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmadc.vim v9, v8, 9, v0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: ret @@ -1176,7 +1176,7 @@ define @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i16_i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmadc.vim v9, v8, 9, v0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: ret @@ -1193,7 +1193,7 @@ define @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i16_i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmadc.vim v9, v8, 9, v0 ; CHECK-NEXT: vmv.v.v v0, v9 ; CHECK-NEXT: ret @@ -1210,7 +1210,7 @@ define @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i16_i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmadc.vim v10, v8, 9, v0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1227,7 +1227,7 @@ define @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i16_i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmadc.vim v12, v8, 9, v0 ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: ret @@ -1244,7 +1244,7 @@ define @intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i16_i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vmadc.vim v16, v8, 9, v0 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: ret @@ -1261,7 +1261,7 @@ define @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i32_i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmadc.vim v9, v8, 9, v0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: ret @@ -1278,7 +1278,7 @@ define @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i32_i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmadc.vim v9, v8, 9, v0 ; CHECK-NEXT: vmv.v.v v0, v9 ; CHECK-NEXT: ret @@ -1295,7 +1295,7 @@ define @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i32_i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmadc.vim v10, v8, 9, v0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1312,7 +1312,7 @@ define @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i32_i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmadc.vim v12, v8, 9, v0 ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: ret @@ -1329,7 +1329,7 @@ define @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i32_i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmadc.vim v16, v8, 9, v0 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: ret @@ -1346,7 +1346,7 @@ define @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i64_i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmadc.vim v9, v8, 9, v0 ; CHECK-NEXT: vmv.v.v v0, v9 ; CHECK-NEXT: ret @@ -1363,7 +1363,7 @@ define @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i64_i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmadc.vim v10, v8, 9, v0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1380,7 +1380,7 @@ define @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i64_i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmadc.vim v12, v8, 9, v0 ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: ret @@ -1397,7 +1397,7 @@ define @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i64_i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmadc.vim v16, v8, 9, v0 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i8_nxv1i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmadc.vvm v10, v8, v9, v0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -33,7 +33,7 @@ define @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i8_nxv2i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmadc.vvm v10, v8, v9, v0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -56,7 +56,7 @@ define @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i8_nxv4i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmadc.vvm v10, v8, v9, v0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -79,7 +79,7 @@ define @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmadc.vvm v10, v8, v9, v0 ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -102,7 +102,7 @@ define @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i8_nxv16i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmadc.vvm v12, v8, v10, v0 ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: ret @@ -125,7 +125,7 @@ define @intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i8_nxv32i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmadc.vvm v16, v8, v12, v0 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: ret @@ -148,7 +148,7 @@ define @intrinsic_vmadc.carry.in_vvm_nxv64i1_nxv64i8_nxv64i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv64i1_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmadc.vvm v24, v8, v16, v0 ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: ret @@ -171,7 +171,7 @@ define @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i16_nxv1i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmadc.vvm v10, v8, v9, v0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -194,7 +194,7 @@ define @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i16_nxv2i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmadc.vvm v10, v8, v9, v0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -217,7 +217,7 @@ define @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmadc.vvm v10, v8, v9, v0 ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -240,7 +240,7 @@ define @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i16_nxv8i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmadc.vvm v12, v8, v10, v0 ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: ret @@ -263,7 +263,7 @@ define @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i16_nxv16i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmadc.vvm v16, v8, v12, v0 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: ret @@ -286,7 +286,7 @@ define @intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i16_nxv32i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vmadc.vvm v24, v8, v16, v0 ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: ret @@ -309,7 +309,7 @@ define @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i32_nxv1i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmadc.vvm v10, v8, v9, v0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -332,7 +332,7 @@ define @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmadc.vvm v10, v8, v9, v0 ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -355,7 +355,7 @@ define @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i32_nxv4i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmadc.vvm v12, v8, v10, v0 ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: ret @@ -378,7 +378,7 @@ define @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i32_nxv8i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmadc.vvm v16, v8, v12, v0 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: ret @@ -401,7 +401,7 @@ define @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i32_nxv16i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmadc.vvm v24, v8, v16, v0 ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: ret @@ -424,7 +424,7 @@ define @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmadc.vvm v10, v8, v9, v0 ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -447,7 +447,7 @@ define @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i64_nxv2i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmadc.vvm v12, v8, v10, v0 ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: ret @@ -470,7 +470,7 @@ define @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i64_nxv4i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmadc.vvm v16, v8, v12, v0 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: ret @@ -493,7 +493,7 @@ define @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i64_nxv8i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmadc.vvm v24, v8, v16, v0 ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: ret @@ -516,7 +516,7 @@ define @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i8_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmadc.vxm v9, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: ret @@ -539,7 +539,7 @@ define @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i8_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmadc.vxm v9, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: ret @@ -562,7 +562,7 @@ define @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i8_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmadc.vxm v9, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: ret @@ -585,7 +585,7 @@ define @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i8_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmadc.vxm v9, v8, a0, v0 ; CHECK-NEXT: vmv.v.v v0, v9 ; CHECK-NEXT: ret @@ -608,7 +608,7 @@ define @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i8_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmadc.vxm v10, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -631,7 +631,7 @@ define @intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i8_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmadc.vxm v12, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: ret @@ -654,7 +654,7 @@ define @intrinsic_vmadc.carry.in_vxm_nxv64i1_nxv64i8_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv64i1_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmadc.vxm v16, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: ret @@ -677,7 +677,7 @@ define @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i16_i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vmadc.vxm v9, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: ret @@ -700,7 +700,7 @@ define @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i16_i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmadc.vxm v9, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: ret @@ -723,7 +723,7 @@ define @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i16_i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmadc.vxm v9, v8, a0, v0 ; CHECK-NEXT: vmv.v.v v0, v9 ; CHECK-NEXT: ret @@ -746,7 +746,7 @@ define @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i16_i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmadc.vxm v10, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -769,7 +769,7 @@ define @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i16_i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vmadc.vxm v12, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: ret @@ -792,7 +792,7 @@ define @intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i16_i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vmadc.vxm v16, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: ret @@ -815,7 +815,7 @@ define @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i32_i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmadc.vxm v9, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: ret @@ -838,7 +838,7 @@ define @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i32_i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmadc.vxm v9, v8, a0, v0 ; CHECK-NEXT: vmv.v.v v0, v9 ; CHECK-NEXT: ret @@ -861,7 +861,7 @@ define @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i32_i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmadc.vxm v10, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -884,7 +884,7 @@ define @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i32_i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmadc.vxm v12, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: ret @@ -907,7 +907,7 @@ define @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i32_i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vmadc.vxm v16, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: ret @@ -930,7 +930,7 @@ define @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i64_i64( %0, i64 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vmadc.vxm v9, v8, a0, v0 ; CHECK-NEXT: vmv.v.v v0, v9 ; CHECK-NEXT: ret @@ -953,7 +953,7 @@ define @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i64_i64( %0, i64 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vmadc.vxm v10, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -976,7 +976,7 @@ define @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i64_i64( %0, i64 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vmadc.vxm v12, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: ret @@ -999,7 +999,7 @@ define @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i64_i64( %0, i64 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vmadc.vxm v16, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: ret @@ -1016,7 +1016,7 @@ define @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i8_i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmadc.vim v9, v8, 9, v0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: ret @@ -1033,7 +1033,7 @@ define @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i8_i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmadc.vim v9, v8, 9, v0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: ret @@ -1050,7 +1050,7 @@ define @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i8_i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmadc.vim v9, v8, 9, v0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: ret @@ -1067,7 +1067,7 @@ define @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i8_i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmadc.vim v9, v8, 9, v0 ; CHECK-NEXT: vmv.v.v v0, v9 ; CHECK-NEXT: ret @@ -1084,7 +1084,7 @@ define @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i8_i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmadc.vim v10, v8, 9, v0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1101,7 +1101,7 @@ define @intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i8_i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmadc.vim v12, v8, 9, v0 ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: ret @@ -1118,7 +1118,7 @@ define @intrinsic_vmadc.carry.in_vim_nxv64i1_nxv64i8_i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv64i1_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmadc.vim v16, v8, 9, v0 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: ret @@ -1135,7 +1135,7 @@ define @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i16_i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmadc.vim v9, v8, 9, v0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: ret @@ -1152,7 +1152,7 @@ define @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i16_i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmadc.vim v9, v8, 9, v0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: ret @@ -1169,7 +1169,7 @@ define @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i16_i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmadc.vim v9, v8, 9, v0 ; CHECK-NEXT: vmv.v.v v0, v9 ; CHECK-NEXT: ret @@ -1186,7 +1186,7 @@ define @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i16_i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmadc.vim v10, v8, 9, v0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1203,7 +1203,7 @@ define @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i16_i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmadc.vim v12, v8, 9, v0 ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: ret @@ -1220,7 +1220,7 @@ define @intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i16_i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vmadc.vim v16, v8, 9, v0 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: ret @@ -1237,7 +1237,7 @@ define @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i32_i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmadc.vim v9, v8, 9, v0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: ret @@ -1254,7 +1254,7 @@ define @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i32_i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmadc.vim v9, v8, 9, v0 ; CHECK-NEXT: vmv.v.v v0, v9 ; CHECK-NEXT: ret @@ -1271,7 +1271,7 @@ define @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i32_i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmadc.vim v10, v8, 9, v0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1288,7 +1288,7 @@ define @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i32_i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmadc.vim v12, v8, 9, v0 ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: ret @@ -1305,7 +1305,7 @@ define @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i32_i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmadc.vim v16, v8, 9, v0 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: ret @@ -1322,7 +1322,7 @@ define @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i64_i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmadc.vim v9, v8, 9, v0 ; CHECK-NEXT: vmv.v.v v0, v9 ; CHECK-NEXT: ret @@ -1339,7 +1339,7 @@ define @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i64_i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmadc.vim v10, v8, 9, v0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -1356,7 +1356,7 @@ define @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i64_i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmadc.vim v12, v8, 9, v0 ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: ret @@ -1373,7 +1373,7 @@ define @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i64_i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmadc.vim v16, v8, 9, v0 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmadd-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmadd-rv32.ll @@ -11,7 +11,7 @@ define @intrinsic_vmadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vmadd.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -58,7 +58,7 @@ define @intrinsic_vmadd_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vmadd.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -105,7 +105,7 @@ define @intrinsic_vmadd_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vmadd.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -152,7 +152,7 @@ define @intrinsic_vmadd_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vmadd.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -199,7 +199,7 @@ define @intrinsic_vmadd_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vmadd.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -246,7 +246,7 @@ define @intrinsic_vmadd_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vmadd.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -293,7 +293,7 @@ define @intrinsic_vmadd_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vmadd.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vmadd_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vmadd.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vmadd_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vmadd.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vmadd_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vmadd.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vmadd_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vmadd.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vmadd_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vmadd.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vmadd_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vmadd.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -622,7 +622,7 @@ define @intrinsic_vmadd_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vmadd.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -669,7 +669,7 @@ define @intrinsic_vmadd_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vmadd.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -716,7 +716,7 @@ define @intrinsic_vmadd_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vmadd.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -763,7 +763,7 @@ define @intrinsic_vmadd_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vmadd.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -810,7 +810,7 @@ define @intrinsic_vmadd_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vmadd.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -857,7 +857,7 @@ define @intrinsic_vmadd_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i8_i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma ; CHECK-NEXT: vmadd.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -904,7 +904,7 @@ define @intrinsic_vmadd_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i8_i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma ; CHECK-NEXT: vmadd.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -951,7 +951,7 @@ define @intrinsic_vmadd_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i8_i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma ; CHECK-NEXT: vmadd.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -998,7 +998,7 @@ define @intrinsic_vmadd_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i8_i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma ; CHECK-NEXT: vmadd.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1045,7 +1045,7 @@ define @intrinsic_vmadd_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv16i8_i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, ma ; CHECK-NEXT: vmadd.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -1092,7 +1092,7 @@ define @intrinsic_vmadd_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv32i8_i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, ma ; CHECK-NEXT: vmadd.vx v8, a0, v12 ; CHECK-NEXT: ret entry: @@ -1139,7 +1139,7 @@ define @intrinsic_vmadd_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i16_i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma ; CHECK-NEXT: vmadd.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1186,7 +1186,7 @@ define @intrinsic_vmadd_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i16_i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma ; CHECK-NEXT: vmadd.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1233,7 +1233,7 @@ define @intrinsic_vmadd_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i16_i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma ; CHECK-NEXT: vmadd.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1280,7 +1280,7 @@ define @intrinsic_vmadd_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i16_i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma ; CHECK-NEXT: vmadd.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -1327,7 +1327,7 @@ define @intrinsic_vmadd_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv16i16_i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma ; CHECK-NEXT: vmadd.vx v8, a0, v12 ; CHECK-NEXT: ret entry: @@ -1374,7 +1374,7 @@ define @intrinsic_vmadd_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i32_i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma ; CHECK-NEXT: vmadd.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1421,7 +1421,7 @@ define @intrinsic_vmadd_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i32_i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; CHECK-NEXT: vmadd.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1468,7 +1468,7 @@ define @intrinsic_vmadd_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i32_i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma ; CHECK-NEXT: vmadd.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -1515,7 +1515,7 @@ define @intrinsic_vmadd_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i32_i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma ; CHECK-NEXT: vmadd.vx v8, a0, v12 ; CHECK-NEXT: ret entry: @@ -1566,9 +1566,9 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, ma ; CHECK-NEXT: vmadd.vv v8, v10, v9 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1596,7 +1596,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vmadd.vv v8, v10, v9, v0.t @@ -1627,9 +1627,9 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, ma ; CHECK-NEXT: vmadd.vv v8, v12, v10 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1657,7 +1657,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu ; CHECK-NEXT: vmadd.vv v8, v12, v10, v0.t @@ -1688,9 +1688,9 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, ma ; CHECK-NEXT: vmadd.vv v8, v16, v12 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1718,7 +1718,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; CHECK-NEXT: vmadd.vv v8, v16, v12, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmadd-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmadd-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmadd-rv64.ll @@ -11,7 +11,7 @@ define @intrinsic_vmadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vmadd.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -58,7 +58,7 @@ define @intrinsic_vmadd_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vmadd.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -105,7 +105,7 @@ define @intrinsic_vmadd_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vmadd.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -152,7 +152,7 @@ define @intrinsic_vmadd_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vmadd.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -199,7 +199,7 @@ define @intrinsic_vmadd_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vmadd.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -246,7 +246,7 @@ define @intrinsic_vmadd_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vmadd.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -293,7 +293,7 @@ define @intrinsic_vmadd_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vmadd.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vmadd_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vmadd.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vmadd_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vmadd.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vmadd_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vmadd.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vmadd_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vmadd.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vmadd_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vmadd.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vmadd_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vmadd.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -622,7 +622,7 @@ define @intrinsic_vmadd_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vmadd.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -669,7 +669,7 @@ define @intrinsic_vmadd_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vmadd.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -716,7 +716,7 @@ define @intrinsic_vmadd_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vmadd.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -763,7 +763,7 @@ define @intrinsic_vmadd_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vmadd.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -810,7 +810,7 @@ define @intrinsic_vmadd_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vmadd.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -857,7 +857,7 @@ define @intrinsic_vmadd_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i8_i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma ; CHECK-NEXT: vmadd.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -904,7 +904,7 @@ define @intrinsic_vmadd_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i8_i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma ; CHECK-NEXT: vmadd.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -951,7 +951,7 @@ define @intrinsic_vmadd_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i8_i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma ; CHECK-NEXT: vmadd.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -998,7 +998,7 @@ define @intrinsic_vmadd_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i8_i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma ; CHECK-NEXT: vmadd.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1045,7 +1045,7 @@ define @intrinsic_vmadd_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv16i8_i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, ma ; CHECK-NEXT: vmadd.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -1092,7 +1092,7 @@ define @intrinsic_vmadd_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv32i8_i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, ma ; CHECK-NEXT: vmadd.vx v8, a0, v12 ; CHECK-NEXT: ret entry: @@ -1139,7 +1139,7 @@ define @intrinsic_vmadd_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i16_i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma ; CHECK-NEXT: vmadd.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1186,7 +1186,7 @@ define @intrinsic_vmadd_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i16_i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma ; CHECK-NEXT: vmadd.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1233,7 +1233,7 @@ define @intrinsic_vmadd_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i16_i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma ; CHECK-NEXT: vmadd.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1280,7 +1280,7 @@ define @intrinsic_vmadd_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i16_i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma ; CHECK-NEXT: vmadd.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -1327,7 +1327,7 @@ define @intrinsic_vmadd_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv16i16_i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma ; CHECK-NEXT: vmadd.vx v8, a0, v12 ; CHECK-NEXT: ret entry: @@ -1374,7 +1374,7 @@ define @intrinsic_vmadd_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i32_i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma ; CHECK-NEXT: vmadd.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1421,7 +1421,7 @@ define @intrinsic_vmadd_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i32_i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; CHECK-NEXT: vmadd.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1468,7 +1468,7 @@ define @intrinsic_vmadd_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i32_i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma ; CHECK-NEXT: vmadd.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -1515,7 +1515,7 @@ define @intrinsic_vmadd_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv8i32_i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma ; CHECK-NEXT: vmadd.vx v8, a0, v12 ; CHECK-NEXT: ret entry: @@ -1562,7 +1562,7 @@ define @intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, ma ; CHECK-NEXT: vmadd.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1609,7 +1609,7 @@ define @intrinsic_vmadd_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv2i64_i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, ma ; CHECK-NEXT: vmadd.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -1656,7 +1656,7 @@ define @intrinsic_vmadd_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmadd_vx_nxv4i64_i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, ma ; CHECK-NEXT: vmadd.vx v8, a0, v12 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmadd-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmadd-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmadd-sdnode.ll @@ -10,7 +10,7 @@ define @vmadd_vv_nxv1i8( %va, %vb, %vc) { ; CHECK-LABEL: vmadd_vv_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %x = mul %va, %vb @@ -21,7 +21,7 @@ define @vmadd_vx_nxv1i8( %va, %vb, i8 %c) { ; CHECK-LABEL: vmadd_vx_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmadd.vx v8, a0, v9 ; CHECK-NEXT: ret %head = insertelement poison, i8 %c, i32 0 @@ -34,7 +34,7 @@ define @vmadd_vv_nxv2i8( %va, %vb, %vc) { ; CHECK-LABEL: vmadd_vv_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmadd.vv v8, v10, v9 ; CHECK-NEXT: ret %x = mul %va, %vc @@ -45,7 +45,7 @@ define @vmadd_vx_nxv2i8( %va, %vb, i8 %c) { ; CHECK-LABEL: vmadd_vx_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmacc.vx v8, a0, v9 ; CHECK-NEXT: ret %head = insertelement poison, i8 %c, i32 0 @@ -58,7 +58,7 @@ define @vmadd_vv_nxv4i8( %va, %vb, %vc) { ; CHECK-LABEL: vmadd_vv_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %x = mul %vb, %va @@ -69,7 +69,7 @@ define @vmadd_vx_nxv4i8( %va, %vb, i8 %c) { ; CHECK-LABEL: vmadd_vx_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmadd.vx v8, a0, v9 ; CHECK-NEXT: ret %head = insertelement poison, i8 %c, i32 0 @@ -82,7 +82,7 @@ define @vmadd_vv_nxv8i8( %va, %vb, %vc) { ; CHECK-LABEL: vmadd_vv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmacc.vv v8, v10, v9 ; CHECK-NEXT: ret %x = mul %vb, %vc @@ -93,7 +93,7 @@ define @vmadd_vx_nxv8i8( %va, %vb, i8 %c) { ; CHECK-LABEL: vmadd_vx_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmacc.vx v8, a0, v9 ; CHECK-NEXT: ret %head = insertelement poison, i8 %c, i32 0 @@ -106,7 +106,7 @@ define @vmadd_vv_nxv16i8( %va, %vb, %vc) { ; CHECK-LABEL: vmadd_vv_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmadd.vv v8, v12, v10 ; CHECK-NEXT: ret %x = mul %vc, %va @@ -117,7 +117,7 @@ define @vmadd_vx_nxv16i8( %va, %vb, i8 %c) { ; CHECK-LABEL: vmadd_vx_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vmadd.vx v8, a0, v10 ; CHECK-NEXT: ret %head = insertelement poison, i8 %c, i32 0 @@ -130,7 +130,7 @@ define @vmadd_vv_nxv32i8( %va, %vb, %vc) { ; CHECK-LABEL: vmadd_vv_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vmacc.vv v8, v16, v12 ; CHECK-NEXT: ret %x = mul %vc, %vb @@ -141,7 +141,7 @@ define @vmadd_vx_nxv32i8( %va, %vb, i8 %c) { ; CHECK-LABEL: vmadd_vx_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vmacc.vx v8, a0, v12 ; CHECK-NEXT: ret %head = insertelement poison, i8 %c, i32 0 @@ -155,7 +155,7 @@ ; CHECK-LABEL: vmadd_vv_nxv64i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vmacc.vv v8, v16, v24 ; CHECK-NEXT: ret %x = mul %vc, %vb @@ -166,7 +166,7 @@ define @vmadd_vx_nxv64i8( %va, %vb, i8 %c) { ; CHECK-LABEL: vmadd_vx_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vmacc.vx v8, a0, v16 ; CHECK-NEXT: ret %head = insertelement poison, i8 %c, i32 0 @@ -179,7 +179,7 @@ define @vmadd_vv_nxv1i16( %va, %vb, %vc) { ; CHECK-LABEL: vmadd_vv_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %x = mul %va, %vb @@ -190,7 +190,7 @@ define @vmadd_vx_nxv1i16( %va, %vb, i16 %c) { ; CHECK-LABEL: vmadd_vx_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vmadd.vx v8, a0, v9 ; CHECK-NEXT: ret %head = insertelement poison, i16 %c, i32 0 @@ -203,7 +203,7 @@ define @vmadd_vv_nxv2i16( %va, %vb, %vc) { ; CHECK-LABEL: vmadd_vv_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmadd.vv v8, v10, v9 ; CHECK-NEXT: ret %x = mul %va, %vc @@ -214,7 +214,7 @@ define @vmadd_vx_nxv2i16( %va, %vb, i16 %c) { ; CHECK-LABEL: vmadd_vx_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmacc.vx v8, a0, v9 ; CHECK-NEXT: ret %head = insertelement poison, i16 %c, i32 0 @@ -227,7 +227,7 @@ define @vmadd_vv_nxv4i16( %va, %vb, %vc) { ; CHECK-LABEL: vmadd_vv_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %x = mul %vb, %va @@ -238,7 +238,7 @@ define @vmadd_vx_nxv4i16( %va, %vb, i16 %c) { ; CHECK-LABEL: vmadd_vx_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vmadd.vx v8, a0, v9 ; CHECK-NEXT: ret %head = insertelement poison, i16 %c, i32 0 @@ -251,7 +251,7 @@ define @vmadd_vv_nxv8i16( %va, %vb, %vc) { ; CHECK-LABEL: vmadd_vv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmacc.vv v8, v12, v10 ; CHECK-NEXT: ret %x = mul %vb, %vc @@ -262,7 +262,7 @@ define @vmadd_vx_nxv8i16( %va, %vb, i16 %c) { ; CHECK-LABEL: vmadd_vx_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vmacc.vx v8, a0, v10 ; CHECK-NEXT: ret %head = insertelement poison, i16 %c, i32 0 @@ -275,7 +275,7 @@ define @vmadd_vv_nxv16i16( %va, %vb, %vc) { ; CHECK-LABEL: vmadd_vv_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vmadd.vv v8, v16, v12 ; CHECK-NEXT: ret %x = mul %vc, %va @@ -286,7 +286,7 @@ define @vmadd_vx_nxv16i16( %va, %vb, i16 %c) { ; CHECK-LABEL: vmadd_vx_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vmadd.vx v8, a0, v12 ; CHECK-NEXT: ret %head = insertelement poison, i16 %c, i32 0 @@ -300,7 +300,7 @@ ; CHECK-LABEL: vmadd_vv_nxv32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vmacc.vv v8, v16, v24 ; CHECK-NEXT: ret %x = mul %vc, %vb @@ -311,7 +311,7 @@ define @vmadd_vx_nxv32i16( %va, %vb, i16 %c) { ; CHECK-LABEL: vmadd_vx_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; CHECK-NEXT: vmacc.vx v8, a0, v16 ; CHECK-NEXT: ret %head = insertelement poison, i16 %c, i32 0 @@ -324,7 +324,7 @@ define @vmadd_vv_nxv1i32( %va, %vb, %vc) { ; CHECK-LABEL: vmadd_vv_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %x = mul %va, %vb @@ -335,7 +335,7 @@ define @vmadd_vx_nxv1i32( %va, %vb, i32 %c) { ; CHECK-LABEL: vmadd_vx_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmadd.vx v8, a0, v9 ; CHECK-NEXT: ret %head = insertelement poison, i32 %c, i32 0 @@ -348,7 +348,7 @@ define @vmadd_vv_nxv2i32( %va, %vb, %vc) { ; CHECK-LABEL: vmadd_vv_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vmadd.vv v8, v10, v9 ; CHECK-NEXT: ret %x = mul %va, %vc @@ -359,7 +359,7 @@ define @vmadd_vx_nxv2i32( %va, %vb, i32 %c) { ; CHECK-LABEL: vmadd_vx_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vmacc.vx v8, a0, v9 ; CHECK-NEXT: ret %head = insertelement poison, i32 %c, i32 0 @@ -372,7 +372,7 @@ define @vmadd_vv_nxv4i32( %va, %vb, %vc) { ; CHECK-LABEL: vmadd_vv_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vmadd.vv v8, v10, v12 ; CHECK-NEXT: ret %x = mul %vb, %va @@ -383,7 +383,7 @@ define @vmadd_vx_nxv4i32( %va, %vb, i32 %c) { ; CHECK-LABEL: vmadd_vx_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vmadd.vx v8, a0, v10 ; CHECK-NEXT: ret %head = insertelement poison, i32 %c, i32 0 @@ -396,7 +396,7 @@ define @vmadd_vv_nxv8i32( %va, %vb, %vc) { ; CHECK-LABEL: vmadd_vv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmacc.vv v8, v16, v12 ; CHECK-NEXT: ret %x = mul %vb, %vc @@ -407,7 +407,7 @@ define @vmadd_vx_nxv8i32( %va, %vb, i32 %c) { ; CHECK-LABEL: vmadd_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vmacc.vx v8, a0, v12 ; CHECK-NEXT: ret %head = insertelement poison, i32 %c, i32 0 @@ -421,7 +421,7 @@ ; CHECK-LABEL: vmadd_vv_nxv16i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vmadd.vv v8, v24, v16 ; CHECK-NEXT: ret %x = mul %vc, %va @@ -432,7 +432,7 @@ define @vmadd_vx_nxv16i32( %va, %vb, i32 %c) { ; CHECK-LABEL: vmadd_vx_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; CHECK-NEXT: vmadd.vx v8, a0, v16 ; CHECK-NEXT: ret %head = insertelement poison, i32 %c, i32 0 @@ -445,7 +445,7 @@ define @vmadd_vv_nxv1i64( %va, %vb, %vc) { ; CHECK-LABEL: vmadd_vv_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vmadd.vv v8, v9, v10 ; CHECK-NEXT: ret %x = mul %va, %vb @@ -461,7 +461,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vmadd.vv v8, v10, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -469,7 +469,7 @@ ; ; RV64-LABEL: vmadd_vx_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV64-NEXT: vmadd.vx v8, a0, v9 ; RV64-NEXT: ret %head = insertelement poison, i64 %c, i32 0 @@ -482,7 +482,7 @@ define @vmadd_vv_nxv2i64( %va, %vb, %vc) { ; CHECK-LABEL: vmadd_vv_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vmadd.vv v8, v12, v10 ; CHECK-NEXT: ret %x = mul %va, %vc @@ -498,7 +498,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vmacc.vv v8, v10, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -506,7 +506,7 @@ ; ; RV64-LABEL: vmadd_vx_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV64-NEXT: vmacc.vx v8, a0, v10 ; RV64-NEXT: ret %head = insertelement poison, i64 %c, i32 0 @@ -519,7 +519,7 @@ define @vmadd_vv_nxv4i64( %va, %vb, %vc) { ; CHECK-LABEL: vmadd_vv_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vmadd.vv v8, v12, v16 ; CHECK-NEXT: ret %x = mul %vb, %va @@ -535,7 +535,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmadd.vv v8, v16, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -543,7 +543,7 @@ ; ; RV64-LABEL: vmadd_vx_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV64-NEXT: vmadd.vx v8, a0, v12 ; RV64-NEXT: ret %head = insertelement poison, i64 %c, i32 0 @@ -557,7 +557,7 @@ ; CHECK-LABEL: vmadd_vv_nxv8i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmacc.vv v8, v16, v24 ; CHECK-NEXT: ret %x = mul %vb, %vc @@ -573,7 +573,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vmacc.vv v8, v16, v24 ; RV32-NEXT: addi sp, sp, 16 @@ -581,7 +581,7 @@ ; ; RV64-LABEL: vmadd_vx_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vmacc.vx v8, a0, v16 ; RV64-NEXT: ret %head = insertelement poison, i64 %c, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmand.ll b/llvm/test/CodeGen/RISCV/rvv/vmand.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmand.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmand.ll @@ -11,7 +11,7 @@ define @intrinsic_vmand_mm_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmand_mm_nxv1i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmand.mm v0, v0, v8 ; CHECK-NEXT: ret entry: @@ -31,7 +31,7 @@ define @intrinsic_vmand_mm_nxv2i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmand_mm_nxv2i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmand.mm v0, v0, v8 ; CHECK-NEXT: ret entry: @@ -51,7 +51,7 @@ define @intrinsic_vmand_mm_nxv4i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmand_mm_nxv4i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmand.mm v0, v0, v8 ; CHECK-NEXT: ret entry: @@ -71,7 +71,7 @@ define @intrinsic_vmand_mm_nxv8i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmand_mm_nxv8i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmand.mm v0, v0, v8 ; CHECK-NEXT: ret entry: @@ -91,7 +91,7 @@ define @intrinsic_vmand_mm_nxv16i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmand_mm_nxv16i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmand.mm v0, v0, v8 ; CHECK-NEXT: ret entry: @@ -111,7 +111,7 @@ define @intrinsic_vmand_mm_nxv32i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmand_mm_nxv32i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmand.mm v0, v0, v8 ; CHECK-NEXT: ret entry: @@ -131,7 +131,7 @@ define @intrinsic_vmand_mm_nxv64i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmand_mm_nxv64i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmand.mm v0, v0, v8 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmandn.ll b/llvm/test/CodeGen/RISCV/rvv/vmandn.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmandn.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmandn.ll @@ -11,7 +11,7 @@ define @intrinsic_vmandn_mm_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmandn_mm_nxv1i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: @@ -31,7 +31,7 @@ define @intrinsic_vmandn_mm_nxv2i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmandn_mm_nxv2i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: @@ -51,7 +51,7 @@ define @intrinsic_vmandn_mm_nxv4i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmandn_mm_nxv4i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: @@ -71,7 +71,7 @@ define @intrinsic_vmandn_mm_nxv8i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmandn_mm_nxv8i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: @@ -91,7 +91,7 @@ define @intrinsic_vmandn_mm_nxv16i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmandn_mm_nxv16i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: @@ -111,7 +111,7 @@ define @intrinsic_vmandn_mm_nxv32i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmandn_mm_nxv32i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: @@ -131,7 +131,7 @@ define @intrinsic_vmandn_mm_nxv64i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmandn_mm_nxv64i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmarith-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmarith-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmarith-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmarith-sdnode.ll @@ -5,7 +5,7 @@ define @vmand_vv_nxv1i1( %va, %vb) { ; CHECK-LABEL: vmand_vv_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmand.mm v0, v0, v8 ; CHECK-NEXT: ret %vc = and %va, %vb @@ -15,7 +15,7 @@ define @vmand_vv_nxv2i1( %va, %vb) { ; CHECK-LABEL: vmand_vv_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmand.mm v0, v0, v8 ; CHECK-NEXT: ret %vc = and %va, %vb @@ -25,7 +25,7 @@ define @vmand_vv_nxv4i1( %va, %vb) { ; CHECK-LABEL: vmand_vv_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmand.mm v0, v0, v8 ; CHECK-NEXT: ret %vc = and %va, %vb @@ -35,7 +35,7 @@ define @vmand_vv_nxv8i1( %va, %vb) { ; CHECK-LABEL: vmand_vv_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmand.mm v0, v0, v8 ; CHECK-NEXT: ret %vc = and %va, %vb @@ -45,7 +45,7 @@ define @vmand_vv_nxv16i1( %va, %vb) { ; CHECK-LABEL: vmand_vv_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmand.mm v0, v0, v8 ; CHECK-NEXT: ret %vc = and %va, %vb @@ -55,7 +55,7 @@ define @vmor_vv_nxv1i1( %va, %vb) { ; CHECK-LABEL: vmor_vv_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmor.mm v0, v0, v8 ; CHECK-NEXT: ret %vc = or %va, %vb @@ -65,7 +65,7 @@ define @vmor_vv_nxv2i1( %va, %vb) { ; CHECK-LABEL: vmor_vv_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmor.mm v0, v0, v8 ; CHECK-NEXT: ret %vc = or %va, %vb @@ -75,7 +75,7 @@ define @vmor_vv_nxv4i1( %va, %vb) { ; CHECK-LABEL: vmor_vv_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmor.mm v0, v0, v8 ; CHECK-NEXT: ret %vc = or %va, %vb @@ -85,7 +85,7 @@ define @vmor_vv_nxv8i1( %va, %vb) { ; CHECK-LABEL: vmor_vv_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmor.mm v0, v0, v8 ; CHECK-NEXT: ret %vc = or %va, %vb @@ -95,7 +95,7 @@ define @vmor_vv_nxv16i1( %va, %vb) { ; CHECK-LABEL: vmor_vv_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmor.mm v0, v0, v8 ; CHECK-NEXT: ret %vc = or %va, %vb @@ -105,7 +105,7 @@ define @vmxor_vv_nxv1i1( %va, %vb) { ; CHECK-LABEL: vmxor_vv_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %vc = xor %va, %vb @@ -115,7 +115,7 @@ define @vmxor_vv_nxv2i1( %va, %vb) { ; CHECK-LABEL: vmxor_vv_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %vc = xor %va, %vb @@ -125,7 +125,7 @@ define @vmxor_vv_nxv4i1( %va, %vb) { ; CHECK-LABEL: vmxor_vv_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %vc = xor %va, %vb @@ -135,7 +135,7 @@ define @vmxor_vv_nxv8i1( %va, %vb) { ; CHECK-LABEL: vmxor_vv_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %vc = xor %va, %vb @@ -145,7 +145,7 @@ define @vmxor_vv_nxv16i1( %va, %vb) { ; CHECK-LABEL: vmxor_vv_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %vc = xor %va, %vb @@ -155,7 +155,7 @@ define @vmnand_vv_nxv1i1( %va, %vb) { ; CHECK-LABEL: vmnand_vv_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmnand.mm v0, v0, v8 ; CHECK-NEXT: ret %vc = and %va, %vb @@ -168,7 +168,7 @@ define @vmnand_vv_nxv2i1( %va, %vb) { ; CHECK-LABEL: vmnand_vv_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmnand.mm v0, v0, v8 ; CHECK-NEXT: ret %vc = and %va, %vb @@ -181,7 +181,7 @@ define @vmnand_vv_nxv4i1( %va, %vb) { ; CHECK-LABEL: vmnand_vv_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmnand.mm v0, v0, v8 ; CHECK-NEXT: ret %vc = and %va, %vb @@ -194,7 +194,7 @@ define @vmnand_vv_nxv8i1( %va, %vb) { ; CHECK-LABEL: vmnand_vv_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmnand.mm v0, v0, v8 ; CHECK-NEXT: ret %vc = and %va, %vb @@ -207,7 +207,7 @@ define @vmnand_vv_nxv16i1( %va, %vb) { ; CHECK-LABEL: vmnand_vv_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmnand.mm v0, v0, v8 ; CHECK-NEXT: ret %vc = and %va, %vb @@ -220,7 +220,7 @@ define @vmnor_vv_nxv1i1( %va, %vb) { ; CHECK-LABEL: vmnor_vv_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmnor.mm v0, v0, v8 ; CHECK-NEXT: ret %vc = or %va, %vb @@ -233,7 +233,7 @@ define @vmnor_vv_nxv2i1( %va, %vb) { ; CHECK-LABEL: vmnor_vv_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmnor.mm v0, v0, v8 ; CHECK-NEXT: ret %vc = or %va, %vb @@ -246,7 +246,7 @@ define @vmnor_vv_nxv4i1( %va, %vb) { ; CHECK-LABEL: vmnor_vv_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmnor.mm v0, v0, v8 ; CHECK-NEXT: ret %vc = or %va, %vb @@ -259,7 +259,7 @@ define @vmnor_vv_nxv8i1( %va, %vb) { ; CHECK-LABEL: vmnor_vv_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmnor.mm v0, v0, v8 ; CHECK-NEXT: ret %vc = or %va, %vb @@ -272,7 +272,7 @@ define @vmnor_vv_nxv16i1( %va, %vb) { ; CHECK-LABEL: vmnor_vv_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmnor.mm v0, v0, v8 ; CHECK-NEXT: ret %vc = or %va, %vb @@ -285,7 +285,7 @@ define @vmxnor_vv_nxv1i1( %va, %vb) { ; CHECK-LABEL: vmxnor_vv_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v0, v8 ; CHECK-NEXT: ret %vc = xor %va, %vb @@ -298,7 +298,7 @@ define @vmxnor_vv_nxv2i1( %va, %vb) { ; CHECK-LABEL: vmxnor_vv_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v0, v8 ; CHECK-NEXT: ret %vc = xor %va, %vb @@ -311,7 +311,7 @@ define @vmxnor_vv_nxv4i1( %va, %vb) { ; CHECK-LABEL: vmxnor_vv_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v0, v8 ; CHECK-NEXT: ret %vc = xor %va, %vb @@ -324,7 +324,7 @@ define @vmxnor_vv_nxv8i1( %va, %vb) { ; CHECK-LABEL: vmxnor_vv_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v0, v8 ; CHECK-NEXT: ret %vc = xor %va, %vb @@ -337,7 +337,7 @@ define @vmxnor_vv_nxv16i1( %va, %vb) { ; CHECK-LABEL: vmxnor_vv_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v0, v8 ; CHECK-NEXT: ret %vc = xor %va, %vb @@ -350,7 +350,7 @@ define @vmandn_vv_nxv1i1( %va, %vb) { ; CHECK-LABEL: vmandn_vv_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 1, i32 0 @@ -363,7 +363,7 @@ define @vmandn_vv_nxv2i1( %va, %vb) { ; CHECK-LABEL: vmandn_vv_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 1, i32 0 @@ -376,7 +376,7 @@ define @vmandn_vv_nxv4i1( %va, %vb) { ; CHECK-LABEL: vmandn_vv_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 1, i32 0 @@ -389,7 +389,7 @@ define @vmandn_vv_nxv8i1( %va, %vb) { ; CHECK-LABEL: vmandn_vv_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 1, i32 0 @@ -402,7 +402,7 @@ define @vmandn_vv_nxv16i1( %va, %vb) { ; CHECK-LABEL: vmandn_vv_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 1, i32 0 @@ -415,7 +415,7 @@ define @vmorn_vv_nxv1i1( %va, %vb) { ; CHECK-LABEL: vmorn_vv_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmorn.mm v0, v0, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 1, i32 0 @@ -428,7 +428,7 @@ define @vmorn_vv_nxv2i1( %va, %vb) { ; CHECK-LABEL: vmorn_vv_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmorn.mm v0, v0, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 1, i32 0 @@ -441,7 +441,7 @@ define @vmorn_vv_nxv4i1( %va, %vb) { ; CHECK-LABEL: vmorn_vv_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmorn.mm v0, v0, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 1, i32 0 @@ -454,7 +454,7 @@ define @vmorn_vv_nxv8i1( %va, %vb) { ; CHECK-LABEL: vmorn_vv_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmorn.mm v0, v0, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 1, i32 0 @@ -467,7 +467,7 @@ define @vmorn_vv_nxv16i1( %va, %vb) { ; CHECK-LABEL: vmorn_vv_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmorn.mm v0, v0, v8 ; CHECK-NEXT: ret %head = insertelement poison, i1 1, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vmax_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vmax_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vmax_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vmax_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vmax_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -292,7 +292,7 @@ define @intrinsic_vmax_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vmax_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vmax_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vmax_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vmax_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vmax_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vmax_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vmax_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vmax_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -717,7 +717,7 @@ define @intrinsic_vmax_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -764,7 +764,7 @@ define @intrinsic_vmax_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -811,7 +811,7 @@ define @intrinsic_vmax_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vmax_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vmax_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vmax_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vmax_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1048,7 +1048,7 @@ define @intrinsic_vmax_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1095,7 +1095,7 @@ define @intrinsic_vmax_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1142,7 +1142,7 @@ define @intrinsic_vmax_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1189,7 +1189,7 @@ define @intrinsic_vmax_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1236,7 +1236,7 @@ define @intrinsic_vmax_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1283,7 +1283,7 @@ define @intrinsic_vmax_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1330,7 +1330,7 @@ define @intrinsic_vmax_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1377,7 +1377,7 @@ define @intrinsic_vmax_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1424,7 +1424,7 @@ define @intrinsic_vmax_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1471,7 +1471,7 @@ define @intrinsic_vmax_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1518,7 +1518,7 @@ define @intrinsic_vmax_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1565,7 +1565,7 @@ define @intrinsic_vmax_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1612,7 +1612,7 @@ define @intrinsic_vmax_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1659,7 +1659,7 @@ define @intrinsic_vmax_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1706,7 +1706,7 @@ define @intrinsic_vmax_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1753,7 +1753,7 @@ define @intrinsic_vmax_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1800,7 +1800,7 @@ define @intrinsic_vmax_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1847,7 +1847,7 @@ define @intrinsic_vmax_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1898,7 +1898,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vmax.vv v8, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 @@ -1957,7 +1957,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vmax.vv v8, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 @@ -2016,7 +2016,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmax.vv v8, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 @@ -2075,7 +2075,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmax.vv v8, v8, v16 ; CHECK-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vmax_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vmax_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vmax_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vmax_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vmax_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -292,7 +292,7 @@ define @intrinsic_vmax_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vmax_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vmax_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vmax_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vmax_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vmax_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vmax_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vmax_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vmax_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -717,7 +717,7 @@ define @intrinsic_vmax_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -764,7 +764,7 @@ define @intrinsic_vmax_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -811,7 +811,7 @@ define @intrinsic_vmax_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vmax_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vmax_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vmax_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vmax_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1048,7 +1048,7 @@ define @intrinsic_vmax_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1095,7 +1095,7 @@ define @intrinsic_vmax_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1142,7 +1142,7 @@ define @intrinsic_vmax_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1189,7 +1189,7 @@ define @intrinsic_vmax_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1236,7 +1236,7 @@ define @intrinsic_vmax_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1283,7 +1283,7 @@ define @intrinsic_vmax_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1330,7 +1330,7 @@ define @intrinsic_vmax_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1377,7 +1377,7 @@ define @intrinsic_vmax_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1424,7 +1424,7 @@ define @intrinsic_vmax_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1471,7 +1471,7 @@ define @intrinsic_vmax_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1518,7 +1518,7 @@ define @intrinsic_vmax_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1565,7 +1565,7 @@ define @intrinsic_vmax_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1612,7 +1612,7 @@ define @intrinsic_vmax_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1659,7 +1659,7 @@ define @intrinsic_vmax_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1706,7 +1706,7 @@ define @intrinsic_vmax_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1753,7 +1753,7 @@ define @intrinsic_vmax_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1800,7 +1800,7 @@ define @intrinsic_vmax_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1847,7 +1847,7 @@ define @intrinsic_vmax_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1894,7 +1894,7 @@ define @intrinsic_vmax_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1941,7 +1941,7 @@ define @intrinsic_vmax_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1988,7 +1988,7 @@ define @intrinsic_vmax_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -2035,7 +2035,7 @@ define @intrinsic_vmax_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmax_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmax-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmax-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmax-sdnode.ll @@ -5,7 +5,7 @@ define @vmax_vv_nxv1i8( %va, %vb) { ; CHECK-LABEL: vmax_vv_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v9 ; CHECK-NEXT: ret %cmp = icmp sgt %va, %vb @@ -16,7 +16,7 @@ define @vmax_vx_nxv1i8( %va, i8 signext %b) { ; CHECK-LABEL: vmax_vx_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -30,7 +30,7 @@ ; CHECK-LABEL: vmax_vi_nxv1i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 -3, i32 0 @@ -43,7 +43,7 @@ define @vmax_vv_nxv2i8( %va, %vb) { ; CHECK-LABEL: vmax_vv_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v9 ; CHECK-NEXT: ret %cmp = icmp sgt %va, %vb @@ -54,7 +54,7 @@ define @vmax_vx_nxv2i8( %va, i8 signext %b) { ; CHECK-LABEL: vmax_vx_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -68,7 +68,7 @@ ; CHECK-LABEL: vmax_vi_nxv2i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 -3, i32 0 @@ -81,7 +81,7 @@ define @vmax_vv_nxv4i8( %va, %vb) { ; CHECK-LABEL: vmax_vv_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v9 ; CHECK-NEXT: ret %cmp = icmp sgt %va, %vb @@ -92,7 +92,7 @@ define @vmax_vx_nxv4i8( %va, i8 signext %b) { ; CHECK-LABEL: vmax_vx_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -106,7 +106,7 @@ ; CHECK-LABEL: vmax_vi_nxv4i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 -3, i32 0 @@ -119,7 +119,7 @@ define @vmax_vv_nxv8i8( %va, %vb) { ; CHECK-LABEL: vmax_vv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v9 ; CHECK-NEXT: ret %cmp = icmp sgt %va, %vb @@ -130,7 +130,7 @@ define @vmax_vx_nxv8i8( %va, i8 signext %b) { ; CHECK-LABEL: vmax_vx_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -144,7 +144,7 @@ ; CHECK-LABEL: vmax_vi_nxv8i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 -3, i32 0 @@ -157,7 +157,7 @@ define @vmax_vv_nxv16i8( %va, %vb) { ; CHECK-LABEL: vmax_vv_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v10 ; CHECK-NEXT: ret %cmp = icmp sgt %va, %vb @@ -168,7 +168,7 @@ define @vmax_vx_nxv16i8( %va, i8 signext %b) { ; CHECK-LABEL: vmax_vx_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -182,7 +182,7 @@ ; CHECK-LABEL: vmax_vi_nxv16i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 -3, i32 0 @@ -195,7 +195,7 @@ define @vmax_vv_nxv32i8( %va, %vb) { ; CHECK-LABEL: vmax_vv_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v12 ; CHECK-NEXT: ret %cmp = icmp sgt %va, %vb @@ -206,7 +206,7 @@ define @vmax_vx_nxv32i8( %va, i8 signext %b) { ; CHECK-LABEL: vmax_vx_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -220,7 +220,7 @@ ; CHECK-LABEL: vmax_vi_nxv32i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 -3, i32 0 @@ -233,7 +233,7 @@ define @vmax_vv_nxv64i8( %va, %vb) { ; CHECK-LABEL: vmax_vv_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v16 ; CHECK-NEXT: ret %cmp = icmp sgt %va, %vb @@ -244,7 +244,7 @@ define @vmax_vx_nxv64i8( %va, i8 signext %b) { ; CHECK-LABEL: vmax_vx_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -258,7 +258,7 @@ ; CHECK-LABEL: vmax_vi_nxv64i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 -3, i32 0 @@ -271,7 +271,7 @@ define @vmax_vv_nxv1i16( %va, %vb) { ; CHECK-LABEL: vmax_vv_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v9 ; CHECK-NEXT: ret %cmp = icmp sgt %va, %vb @@ -282,7 +282,7 @@ define @vmax_vx_nxv1i16( %va, i16 signext %b) { ; CHECK-LABEL: vmax_vx_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -296,7 +296,7 @@ ; CHECK-LABEL: vmax_vi_nxv1i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 -3, i32 0 @@ -309,7 +309,7 @@ define @vmax_vv_nxv2i16( %va, %vb) { ; CHECK-LABEL: vmax_vv_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v9 ; CHECK-NEXT: ret %cmp = icmp sgt %va, %vb @@ -320,7 +320,7 @@ define @vmax_vx_nxv2i16( %va, i16 signext %b) { ; CHECK-LABEL: vmax_vx_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -334,7 +334,7 @@ ; CHECK-LABEL: vmax_vi_nxv2i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 -3, i32 0 @@ -347,7 +347,7 @@ define @vmax_vv_nxv4i16( %va, %vb) { ; CHECK-LABEL: vmax_vv_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v9 ; CHECK-NEXT: ret %cmp = icmp sgt %va, %vb @@ -358,7 +358,7 @@ define @vmax_vx_nxv4i16( %va, i16 signext %b) { ; CHECK-LABEL: vmax_vx_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -372,7 +372,7 @@ ; CHECK-LABEL: vmax_vi_nxv4i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 -3, i32 0 @@ -385,7 +385,7 @@ define @vmax_vv_nxv8i16( %va, %vb) { ; CHECK-LABEL: vmax_vv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v10 ; CHECK-NEXT: ret %cmp = icmp sgt %va, %vb @@ -396,7 +396,7 @@ define @vmax_vx_nxv8i16( %va, i16 signext %b) { ; CHECK-LABEL: vmax_vx_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -410,7 +410,7 @@ ; CHECK-LABEL: vmax_vi_nxv8i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 -3, i32 0 @@ -423,7 +423,7 @@ define @vmax_vv_nxv16i16( %va, %vb) { ; CHECK-LABEL: vmax_vv_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v12 ; CHECK-NEXT: ret %cmp = icmp sgt %va, %vb @@ -434,7 +434,7 @@ define @vmax_vx_nxv16i16( %va, i16 signext %b) { ; CHECK-LABEL: vmax_vx_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -448,7 +448,7 @@ ; CHECK-LABEL: vmax_vi_nxv16i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 -3, i32 0 @@ -461,7 +461,7 @@ define @vmax_vv_nxv32i16( %va, %vb) { ; CHECK-LABEL: vmax_vv_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v16 ; CHECK-NEXT: ret %cmp = icmp sgt %va, %vb @@ -472,7 +472,7 @@ define @vmax_vx_nxv32i16( %va, i16 signext %b) { ; CHECK-LABEL: vmax_vx_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -486,7 +486,7 @@ ; CHECK-LABEL: vmax_vi_nxv32i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 -3, i32 0 @@ -499,7 +499,7 @@ define @vmax_vv_nxv1i32( %va, %vb) { ; CHECK-LABEL: vmax_vv_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v9 ; CHECK-NEXT: ret %cmp = icmp sgt %va, %vb @@ -510,7 +510,7 @@ define @vmax_vx_nxv1i32( %va, i32 signext %b) { ; CHECK-LABEL: vmax_vx_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -524,7 +524,7 @@ ; CHECK-LABEL: vmax_vi_nxv1i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 -3, i32 0 @@ -537,7 +537,7 @@ define @vmax_vv_nxv2i32( %va, %vb) { ; CHECK-LABEL: vmax_vv_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v9 ; CHECK-NEXT: ret %cmp = icmp sgt %va, %vb @@ -548,7 +548,7 @@ define @vmax_vx_nxv2i32( %va, i32 signext %b) { ; CHECK-LABEL: vmax_vx_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -562,7 +562,7 @@ ; CHECK-LABEL: vmax_vi_nxv2i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 -3, i32 0 @@ -575,7 +575,7 @@ define @vmax_vv_nxv4i32( %va, %vb) { ; CHECK-LABEL: vmax_vv_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v10 ; CHECK-NEXT: ret %cmp = icmp sgt %va, %vb @@ -586,7 +586,7 @@ define @vmax_vx_nxv4i32( %va, i32 signext %b) { ; CHECK-LABEL: vmax_vx_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -600,7 +600,7 @@ ; CHECK-LABEL: vmax_vi_nxv4i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 -3, i32 0 @@ -613,7 +613,7 @@ define @vmax_vv_nxv8i32( %va, %vb) { ; CHECK-LABEL: vmax_vv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v12 ; CHECK-NEXT: ret %cmp = icmp sgt %va, %vb @@ -624,7 +624,7 @@ define @vmax_vx_nxv8i32( %va, i32 signext %b) { ; CHECK-LABEL: vmax_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -638,7 +638,7 @@ ; CHECK-LABEL: vmax_vi_nxv8i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 -3, i32 0 @@ -651,7 +651,7 @@ define @vmax_vv_nxv16i32( %va, %vb) { ; CHECK-LABEL: vmax_vv_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v16 ; CHECK-NEXT: ret %cmp = icmp sgt %va, %vb @@ -662,7 +662,7 @@ define @vmax_vx_nxv16i32( %va, i32 signext %b) { ; CHECK-LABEL: vmax_vx_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -676,7 +676,7 @@ ; CHECK-LABEL: vmax_vi_nxv16i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 -3, i32 0 @@ -689,7 +689,7 @@ define @vmax_vv_nxv1i64( %va, %vb) { ; CHECK-LABEL: vmax_vv_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v9 ; CHECK-NEXT: ret %cmp = icmp sgt %va, %vb @@ -705,7 +705,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vmax.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -713,7 +713,7 @@ ; ; RV64-LABEL: vmax_vx_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV64-NEXT: vmax.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -727,7 +727,7 @@ ; CHECK-LABEL: vmax_vi_nxv1i64_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 -3, i32 0 @@ -740,7 +740,7 @@ define @vmax_vv_nxv2i64( %va, %vb) { ; CHECK-LABEL: vmax_vv_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v10 ; CHECK-NEXT: ret %cmp = icmp sgt %va, %vb @@ -756,7 +756,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vmax.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -764,7 +764,7 @@ ; ; RV64-LABEL: vmax_vx_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV64-NEXT: vmax.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -778,7 +778,7 @@ ; CHECK-LABEL: vmax_vi_nxv2i64_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 -3, i32 0 @@ -791,7 +791,7 @@ define @vmax_vv_nxv4i64( %va, %vb) { ; CHECK-LABEL: vmax_vv_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v12 ; CHECK-NEXT: ret %cmp = icmp sgt %va, %vb @@ -807,7 +807,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vmax.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -815,7 +815,7 @@ ; ; RV64-LABEL: vmax_vx_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV64-NEXT: vmax.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -829,7 +829,7 @@ ; CHECK-LABEL: vmax_vi_nxv4i64_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 -3, i32 0 @@ -842,7 +842,7 @@ define @vmax_vv_nxv8i64( %va, %vb) { ; CHECK-LABEL: vmax_vv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmax.vv v8, v8, v16 ; CHECK-NEXT: ret %cmp = icmp sgt %va, %vb @@ -858,7 +858,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmax.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -866,7 +866,7 @@ ; ; RV64-LABEL: vmax_vx_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vmax.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -880,7 +880,7 @@ ; CHECK-LABEL: vmax_vi_nxv8i64_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; CHECK-NEXT: vmax.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 -3, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vmaxu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vmaxu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vmaxu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vmaxu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vmaxu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -292,7 +292,7 @@ define @intrinsic_vmaxu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vmaxu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vmaxu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vmaxu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vmaxu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vmaxu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vmaxu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vmaxu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vmaxu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -717,7 +717,7 @@ define @intrinsic_vmaxu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -764,7 +764,7 @@ define @intrinsic_vmaxu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -811,7 +811,7 @@ define @intrinsic_vmaxu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vmaxu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vmaxu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vmaxu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vmaxu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1048,7 +1048,7 @@ define @intrinsic_vmaxu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1095,7 +1095,7 @@ define @intrinsic_vmaxu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1142,7 +1142,7 @@ define @intrinsic_vmaxu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1189,7 +1189,7 @@ define @intrinsic_vmaxu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1236,7 +1236,7 @@ define @intrinsic_vmaxu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1283,7 +1283,7 @@ define @intrinsic_vmaxu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1330,7 +1330,7 @@ define @intrinsic_vmaxu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1377,7 +1377,7 @@ define @intrinsic_vmaxu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1424,7 +1424,7 @@ define @intrinsic_vmaxu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1471,7 +1471,7 @@ define @intrinsic_vmaxu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1518,7 +1518,7 @@ define @intrinsic_vmaxu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1565,7 +1565,7 @@ define @intrinsic_vmaxu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1612,7 +1612,7 @@ define @intrinsic_vmaxu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1659,7 +1659,7 @@ define @intrinsic_vmaxu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1706,7 +1706,7 @@ define @intrinsic_vmaxu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1753,7 +1753,7 @@ define @intrinsic_vmaxu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1800,7 +1800,7 @@ define @intrinsic_vmaxu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1847,7 +1847,7 @@ define @intrinsic_vmaxu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1898,7 +1898,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vmaxu.vv v8, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 @@ -1957,7 +1957,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vmaxu.vv v8, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 @@ -2016,7 +2016,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmaxu.vv v8, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 @@ -2075,7 +2075,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmaxu.vv v8, v8, v16 ; CHECK-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vmaxu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vmaxu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vmaxu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vmaxu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vmaxu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -292,7 +292,7 @@ define @intrinsic_vmaxu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vmaxu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vmaxu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vmaxu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vmaxu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vmaxu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vmaxu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vmaxu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vmaxu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -717,7 +717,7 @@ define @intrinsic_vmaxu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -764,7 +764,7 @@ define @intrinsic_vmaxu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -811,7 +811,7 @@ define @intrinsic_vmaxu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vmaxu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vmaxu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vmaxu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vmaxu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1048,7 +1048,7 @@ define @intrinsic_vmaxu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1095,7 +1095,7 @@ define @intrinsic_vmaxu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1142,7 +1142,7 @@ define @intrinsic_vmaxu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1189,7 +1189,7 @@ define @intrinsic_vmaxu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1236,7 +1236,7 @@ define @intrinsic_vmaxu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1283,7 +1283,7 @@ define @intrinsic_vmaxu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1330,7 +1330,7 @@ define @intrinsic_vmaxu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1377,7 +1377,7 @@ define @intrinsic_vmaxu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1424,7 +1424,7 @@ define @intrinsic_vmaxu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1471,7 +1471,7 @@ define @intrinsic_vmaxu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1518,7 +1518,7 @@ define @intrinsic_vmaxu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1565,7 +1565,7 @@ define @intrinsic_vmaxu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1612,7 +1612,7 @@ define @intrinsic_vmaxu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1659,7 +1659,7 @@ define @intrinsic_vmaxu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1706,7 +1706,7 @@ define @intrinsic_vmaxu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1753,7 +1753,7 @@ define @intrinsic_vmaxu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1800,7 +1800,7 @@ define @intrinsic_vmaxu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1847,7 +1847,7 @@ define @intrinsic_vmaxu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1894,7 +1894,7 @@ define @intrinsic_vmaxu_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1941,7 +1941,7 @@ define @intrinsic_vmaxu_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1988,7 +1988,7 @@ define @intrinsic_vmaxu_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -2035,7 +2035,7 @@ define @intrinsic_vmaxu_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-sdnode.ll @@ -5,7 +5,7 @@ define @vmax_vv_nxv1i8( %va, %vb) { ; CHECK-LABEL: vmax_vv_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v9 ; CHECK-NEXT: ret %cmp = icmp ugt %va, %vb @@ -16,7 +16,7 @@ define @vmax_vx_nxv1i8( %va, i8 signext %b) { ; CHECK-LABEL: vmax_vx_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -30,7 +30,7 @@ ; CHECK-LABEL: vmax_vi_nxv1i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 -3, i32 0 @@ -43,7 +43,7 @@ define @vmax_vv_nxv2i8( %va, %vb) { ; CHECK-LABEL: vmax_vv_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v9 ; CHECK-NEXT: ret %cmp = icmp ugt %va, %vb @@ -54,7 +54,7 @@ define @vmax_vx_nxv2i8( %va, i8 signext %b) { ; CHECK-LABEL: vmax_vx_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -68,7 +68,7 @@ ; CHECK-LABEL: vmax_vi_nxv2i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 -3, i32 0 @@ -81,7 +81,7 @@ define @vmax_vv_nxv4i8( %va, %vb) { ; CHECK-LABEL: vmax_vv_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v9 ; CHECK-NEXT: ret %cmp = icmp ugt %va, %vb @@ -92,7 +92,7 @@ define @vmax_vx_nxv4i8( %va, i8 signext %b) { ; CHECK-LABEL: vmax_vx_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -106,7 +106,7 @@ ; CHECK-LABEL: vmax_vi_nxv4i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 -3, i32 0 @@ -119,7 +119,7 @@ define @vmax_vv_nxv8i8( %va, %vb) { ; CHECK-LABEL: vmax_vv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v9 ; CHECK-NEXT: ret %cmp = icmp ugt %va, %vb @@ -130,7 +130,7 @@ define @vmax_vx_nxv8i8( %va, i8 signext %b) { ; CHECK-LABEL: vmax_vx_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -144,7 +144,7 @@ ; CHECK-LABEL: vmax_vi_nxv8i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 -3, i32 0 @@ -157,7 +157,7 @@ define @vmax_vv_nxv16i8( %va, %vb) { ; CHECK-LABEL: vmax_vv_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v10 ; CHECK-NEXT: ret %cmp = icmp ugt %va, %vb @@ -168,7 +168,7 @@ define @vmax_vx_nxv16i8( %va, i8 signext %b) { ; CHECK-LABEL: vmax_vx_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -182,7 +182,7 @@ ; CHECK-LABEL: vmax_vi_nxv16i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 -3, i32 0 @@ -195,7 +195,7 @@ define @vmax_vv_nxv32i8( %va, %vb) { ; CHECK-LABEL: vmax_vv_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v12 ; CHECK-NEXT: ret %cmp = icmp ugt %va, %vb @@ -206,7 +206,7 @@ define @vmax_vx_nxv32i8( %va, i8 signext %b) { ; CHECK-LABEL: vmax_vx_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -220,7 +220,7 @@ ; CHECK-LABEL: vmax_vi_nxv32i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 -3, i32 0 @@ -233,7 +233,7 @@ define @vmax_vv_nxv64i8( %va, %vb) { ; CHECK-LABEL: vmax_vv_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v16 ; CHECK-NEXT: ret %cmp = icmp ugt %va, %vb @@ -244,7 +244,7 @@ define @vmax_vx_nxv64i8( %va, i8 signext %b) { ; CHECK-LABEL: vmax_vx_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -258,7 +258,7 @@ ; CHECK-LABEL: vmax_vi_nxv64i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 -3, i32 0 @@ -271,7 +271,7 @@ define @vmax_vv_nxv1i16( %va, %vb) { ; CHECK-LABEL: vmax_vv_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v9 ; CHECK-NEXT: ret %cmp = icmp ugt %va, %vb @@ -282,7 +282,7 @@ define @vmax_vx_nxv1i16( %va, i16 signext %b) { ; CHECK-LABEL: vmax_vx_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -296,7 +296,7 @@ ; CHECK-LABEL: vmax_vi_nxv1i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 -3, i32 0 @@ -309,7 +309,7 @@ define @vmax_vv_nxv2i16( %va, %vb) { ; CHECK-LABEL: vmax_vv_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v9 ; CHECK-NEXT: ret %cmp = icmp ugt %va, %vb @@ -320,7 +320,7 @@ define @vmax_vx_nxv2i16( %va, i16 signext %b) { ; CHECK-LABEL: vmax_vx_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -334,7 +334,7 @@ ; CHECK-LABEL: vmax_vi_nxv2i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 -3, i32 0 @@ -347,7 +347,7 @@ define @vmax_vv_nxv4i16( %va, %vb) { ; CHECK-LABEL: vmax_vv_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v9 ; CHECK-NEXT: ret %cmp = icmp ugt %va, %vb @@ -358,7 +358,7 @@ define @vmax_vx_nxv4i16( %va, i16 signext %b) { ; CHECK-LABEL: vmax_vx_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -372,7 +372,7 @@ ; CHECK-LABEL: vmax_vi_nxv4i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 -3, i32 0 @@ -385,7 +385,7 @@ define @vmax_vv_nxv8i16( %va, %vb) { ; CHECK-LABEL: vmax_vv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v10 ; CHECK-NEXT: ret %cmp = icmp ugt %va, %vb @@ -396,7 +396,7 @@ define @vmax_vx_nxv8i16( %va, i16 signext %b) { ; CHECK-LABEL: vmax_vx_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -410,7 +410,7 @@ ; CHECK-LABEL: vmax_vi_nxv8i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 -3, i32 0 @@ -423,7 +423,7 @@ define @vmax_vv_nxv16i16( %va, %vb) { ; CHECK-LABEL: vmax_vv_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v12 ; CHECK-NEXT: ret %cmp = icmp ugt %va, %vb @@ -434,7 +434,7 @@ define @vmax_vx_nxv16i16( %va, i16 signext %b) { ; CHECK-LABEL: vmax_vx_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -448,7 +448,7 @@ ; CHECK-LABEL: vmax_vi_nxv16i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 -3, i32 0 @@ -461,7 +461,7 @@ define @vmax_vv_nxv32i16( %va, %vb) { ; CHECK-LABEL: vmax_vv_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v16 ; CHECK-NEXT: ret %cmp = icmp ugt %va, %vb @@ -472,7 +472,7 @@ define @vmax_vx_nxv32i16( %va, i16 signext %b) { ; CHECK-LABEL: vmax_vx_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -486,7 +486,7 @@ ; CHECK-LABEL: vmax_vi_nxv32i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 -3, i32 0 @@ -499,7 +499,7 @@ define @vmax_vv_nxv1i32( %va, %vb) { ; CHECK-LABEL: vmax_vv_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v9 ; CHECK-NEXT: ret %cmp = icmp ugt %va, %vb @@ -510,7 +510,7 @@ define @vmax_vx_nxv1i32( %va, i32 signext %b) { ; CHECK-LABEL: vmax_vx_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -524,7 +524,7 @@ ; CHECK-LABEL: vmax_vi_nxv1i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 -3, i32 0 @@ -537,7 +537,7 @@ define @vmax_vv_nxv2i32( %va, %vb) { ; CHECK-LABEL: vmax_vv_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v9 ; CHECK-NEXT: ret %cmp = icmp ugt %va, %vb @@ -548,7 +548,7 @@ define @vmax_vx_nxv2i32( %va, i32 signext %b) { ; CHECK-LABEL: vmax_vx_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -562,7 +562,7 @@ ; CHECK-LABEL: vmax_vi_nxv2i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 -3, i32 0 @@ -575,7 +575,7 @@ define @vmax_vv_nxv4i32( %va, %vb) { ; CHECK-LABEL: vmax_vv_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v10 ; CHECK-NEXT: ret %cmp = icmp ugt %va, %vb @@ -586,7 +586,7 @@ define @vmax_vx_nxv4i32( %va, i32 signext %b) { ; CHECK-LABEL: vmax_vx_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -600,7 +600,7 @@ ; CHECK-LABEL: vmax_vi_nxv4i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 -3, i32 0 @@ -613,7 +613,7 @@ define @vmax_vv_nxv8i32( %va, %vb) { ; CHECK-LABEL: vmax_vv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v12 ; CHECK-NEXT: ret %cmp = icmp ugt %va, %vb @@ -624,7 +624,7 @@ define @vmax_vx_nxv8i32( %va, i32 signext %b) { ; CHECK-LABEL: vmax_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -638,7 +638,7 @@ ; CHECK-LABEL: vmax_vi_nxv8i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 -3, i32 0 @@ -651,7 +651,7 @@ define @vmax_vv_nxv16i32( %va, %vb) { ; CHECK-LABEL: vmax_vv_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v16 ; CHECK-NEXT: ret %cmp = icmp ugt %va, %vb @@ -662,7 +662,7 @@ define @vmax_vx_nxv16i32( %va, i32 signext %b) { ; CHECK-LABEL: vmax_vx_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -676,7 +676,7 @@ ; CHECK-LABEL: vmax_vi_nxv16i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 -3, i32 0 @@ -689,7 +689,7 @@ define @vmax_vv_nxv1i64( %va, %vb) { ; CHECK-LABEL: vmax_vv_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v9 ; CHECK-NEXT: ret %cmp = icmp ugt %va, %vb @@ -705,7 +705,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vmaxu.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -713,7 +713,7 @@ ; ; RV64-LABEL: vmax_vx_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV64-NEXT: vmaxu.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -727,7 +727,7 @@ ; CHECK-LABEL: vmax_vi_nxv1i64_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 -3, i32 0 @@ -740,7 +740,7 @@ define @vmax_vv_nxv2i64( %va, %vb) { ; CHECK-LABEL: vmax_vv_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v10 ; CHECK-NEXT: ret %cmp = icmp ugt %va, %vb @@ -756,7 +756,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vmaxu.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -764,7 +764,7 @@ ; ; RV64-LABEL: vmax_vx_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV64-NEXT: vmaxu.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -778,7 +778,7 @@ ; CHECK-LABEL: vmax_vi_nxv2i64_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 -3, i32 0 @@ -791,7 +791,7 @@ define @vmax_vv_nxv4i64( %va, %vb) { ; CHECK-LABEL: vmax_vv_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v12 ; CHECK-NEXT: ret %cmp = icmp ugt %va, %vb @@ -807,7 +807,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vmaxu.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -815,7 +815,7 @@ ; ; RV64-LABEL: vmax_vx_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV64-NEXT: vmaxu.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -829,7 +829,7 @@ ; CHECK-LABEL: vmax_vi_nxv4i64_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 -3, i32 0 @@ -842,7 +842,7 @@ define @vmax_vv_nxv8i64( %va, %vb) { ; CHECK-LABEL: vmax_vv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmaxu.vv v8, v8, v16 ; CHECK-NEXT: ret %cmp = icmp ugt %va, %vb @@ -858,7 +858,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmaxu.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -866,7 +866,7 @@ ; ; RV64-LABEL: vmax_vx_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vmaxu.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -880,7 +880,7 @@ ; CHECK-LABEL: vmax_vi_nxv8i64_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; CHECK-NEXT: vmaxu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 -3, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmclr.ll b/llvm/test/CodeGen/RISCV/rvv/vmclr.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmclr.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmclr.ll @@ -9,7 +9,7 @@ define @intrinsic_vmclr_m_pseudo_nxv1i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv1i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmclr.m v0 ; CHECK-NEXT: ret entry: @@ -25,7 +25,7 @@ define @intrinsic_vmclr_m_pseudo_nxv2i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv2i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmclr.m v0 ; CHECK-NEXT: ret entry: @@ -41,7 +41,7 @@ define @intrinsic_vmclr_m_pseudo_nxv4i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv4i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmclr.m v0 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vmclr_m_pseudo_nxv8i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv8i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmclr.m v0 ; CHECK-NEXT: ret entry: @@ -73,7 +73,7 @@ define @intrinsic_vmclr_m_pseudo_nxv16i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv16i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmclr.m v0 ; CHECK-NEXT: ret entry: @@ -89,7 +89,7 @@ define @intrinsic_vmclr_m_pseudo_nxv32i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv32i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmclr.m v0 ; CHECK-NEXT: ret entry: @@ -105,7 +105,7 @@ define @intrinsic_vmclr_m_pseudo_nxv64i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv64i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmclr.m v0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmerge.ll b/llvm/test/CodeGen/RISCV/rvv/vmerge.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmerge.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmerge.ll @@ -13,7 +13,7 @@ define @intrinsic_vmerge_vvm_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -37,7 +37,7 @@ define @intrinsic_vmerge_vvm_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -61,7 +61,7 @@ define @intrinsic_vmerge_vvm_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -85,7 +85,7 @@ define @intrinsic_vmerge_vvm_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -109,7 +109,7 @@ define @intrinsic_vmerge_vvm_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0 ; CHECK-NEXT: ret entry: @@ -133,7 +133,7 @@ define @intrinsic_vmerge_vvm_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0 ; CHECK-NEXT: ret entry: @@ -157,7 +157,7 @@ define @intrinsic_vmerge_vvm_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0 ; CHECK-NEXT: ret entry: @@ -181,7 +181,7 @@ define @intrinsic_vmerge_vvm_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -205,7 +205,7 @@ define @intrinsic_vmerge_vvm_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -229,7 +229,7 @@ define @intrinsic_vmerge_vvm_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -253,7 +253,7 @@ define @intrinsic_vmerge_vvm_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0 ; CHECK-NEXT: ret entry: @@ -277,7 +277,7 @@ define @intrinsic_vmerge_vvm_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0 ; CHECK-NEXT: ret entry: @@ -301,7 +301,7 @@ define @intrinsic_vmerge_vvm_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0 ; CHECK-NEXT: ret entry: @@ -325,7 +325,7 @@ define @intrinsic_vmerge_vvm_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -349,7 +349,7 @@ define @intrinsic_vmerge_vvm_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -373,7 +373,7 @@ define @intrinsic_vmerge_vvm_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0 ; CHECK-NEXT: ret entry: @@ -397,7 +397,7 @@ define @intrinsic_vmerge_vvm_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0 ; CHECK-NEXT: ret entry: @@ -421,7 +421,7 @@ define @intrinsic_vmerge_vvm_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0 ; CHECK-NEXT: ret entry: @@ -445,7 +445,7 @@ define @intrinsic_vmerge_vvm_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -469,7 +469,7 @@ define @intrinsic_vmerge_vvm_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0 ; CHECK-NEXT: ret entry: @@ -493,7 +493,7 @@ define @intrinsic_vmerge_vvm_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0 ; CHECK-NEXT: ret entry: @@ -517,7 +517,7 @@ define @intrinsic_vmerge_vvm_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0 ; CHECK-NEXT: ret entry: @@ -541,7 +541,7 @@ define @intrinsic_vmerge_vxm_nxv1i8_nxv1i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -565,7 +565,7 @@ define @intrinsic_vmerge_vxm_nxv2i8_nxv2i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -589,7 +589,7 @@ define @intrinsic_vmerge_vxm_nxv4i8_nxv4i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -613,7 +613,7 @@ define @intrinsic_vmerge_vxm_nxv8i8_nxv8i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -637,7 +637,7 @@ define @intrinsic_vmerge_vxm_nxv16i8_nxv16i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -661,7 +661,7 @@ define @intrinsic_vmerge_vxm_nxv32i8_nxv32i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -685,7 +685,7 @@ define @intrinsic_vmerge_vxm_nxv64i8_nxv64i8_i8( %0, i8 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -709,7 +709,7 @@ define @intrinsic_vmerge_vxm_nxv1i16_nxv1i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -733,7 +733,7 @@ define @intrinsic_vmerge_vxm_nxv2i16_nxv2i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -757,7 +757,7 @@ define @intrinsic_vmerge_vxm_nxv4i16_nxv4i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -781,7 +781,7 @@ define @intrinsic_vmerge_vxm_nxv8i16_nxv8i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -805,7 +805,7 @@ define @intrinsic_vmerge_vxm_nxv16i16_nxv16i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -829,7 +829,7 @@ define @intrinsic_vmerge_vxm_nxv32i16_nxv32i16_i16( %0, i16 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -853,7 +853,7 @@ define @intrinsic_vmerge_vxm_nxv1i32_nxv1i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -877,7 +877,7 @@ define @intrinsic_vmerge_vxm_nxv2i32_nxv2i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -901,7 +901,7 @@ define @intrinsic_vmerge_vxm_nxv4i32_nxv4i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -925,7 +925,7 @@ define @intrinsic_vmerge_vxm_nxv8i32_nxv8i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -949,7 +949,7 @@ define @intrinsic_vmerge_vxm_nxv16i32_nxv16i32_i32( %0, i32 %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vxm_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -1042,7 +1042,7 @@ define @intrinsic_vmerge_vim_nxv1i8_nxv1i8_i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vim_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 9, v0 ; CHECK-NEXT: ret entry: @@ -1059,7 +1059,7 @@ define @intrinsic_vmerge_vim_nxv2i8_nxv2i8_i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vim_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 9, v0 ; CHECK-NEXT: ret entry: @@ -1076,7 +1076,7 @@ define @intrinsic_vmerge_vim_nxv4i8_nxv4i8_i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vim_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 9, v0 ; CHECK-NEXT: ret entry: @@ -1093,7 +1093,7 @@ define @intrinsic_vmerge_vim_nxv8i8_nxv8i8_i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vim_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 9, v0 ; CHECK-NEXT: ret entry: @@ -1110,7 +1110,7 @@ define @intrinsic_vmerge_vim_nxv16i8_nxv16i8_i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vim_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 9, v0 ; CHECK-NEXT: ret entry: @@ -1127,7 +1127,7 @@ define @intrinsic_vmerge_vim_nxv32i8_nxv32i8_i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vim_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 9, v0 ; CHECK-NEXT: ret entry: @@ -1144,7 +1144,7 @@ define @intrinsic_vmerge_vim_nxv64i8_nxv64i8_i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vim_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 9, v0 ; CHECK-NEXT: ret entry: @@ -1161,7 +1161,7 @@ define @intrinsic_vmerge_vim_nxv1i16_nxv1i16_i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vim_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 9, v0 ; CHECK-NEXT: ret entry: @@ -1178,7 +1178,7 @@ define @intrinsic_vmerge_vim_nxv2i16_nxv2i16_i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vim_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 9, v0 ; CHECK-NEXT: ret entry: @@ -1195,7 +1195,7 @@ define @intrinsic_vmerge_vim_nxv4i16_nxv4i16_i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vim_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 9, v0 ; CHECK-NEXT: ret entry: @@ -1212,7 +1212,7 @@ define @intrinsic_vmerge_vim_nxv8i16_nxv8i16_i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vim_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 9, v0 ; CHECK-NEXT: ret entry: @@ -1229,7 +1229,7 @@ define @intrinsic_vmerge_vim_nxv16i16_nxv16i16_i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vim_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 9, v0 ; CHECK-NEXT: ret entry: @@ -1246,7 +1246,7 @@ define @intrinsic_vmerge_vim_nxv32i16_nxv32i16_i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vim_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 9, v0 ; CHECK-NEXT: ret entry: @@ -1263,7 +1263,7 @@ define @intrinsic_vmerge_vim_nxv1i32_nxv1i32_i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vim_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 9, v0 ; CHECK-NEXT: ret entry: @@ -1280,7 +1280,7 @@ define @intrinsic_vmerge_vim_nxv2i32_nxv2i32_i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vim_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 9, v0 ; CHECK-NEXT: ret entry: @@ -1297,7 +1297,7 @@ define @intrinsic_vmerge_vim_nxv4i32_nxv4i32_i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vim_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 9, v0 ; CHECK-NEXT: ret entry: @@ -1314,7 +1314,7 @@ define @intrinsic_vmerge_vim_nxv8i32_nxv8i32_i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vim_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 9, v0 ; CHECK-NEXT: ret entry: @@ -1331,7 +1331,7 @@ define @intrinsic_vmerge_vim_nxv16i32_nxv16i32_i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vim_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 9, v0 ; CHECK-NEXT: ret entry: @@ -1348,7 +1348,7 @@ define @intrinsic_vmerge_vim_nxv1i64_nxv1i64_i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vim_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 9, v0 ; CHECK-NEXT: ret entry: @@ -1365,7 +1365,7 @@ define @intrinsic_vmerge_vim_nxv2i64_nxv2i64_i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vim_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 9, v0 ; CHECK-NEXT: ret entry: @@ -1382,7 +1382,7 @@ define @intrinsic_vmerge_vim_nxv4i64_nxv4i64_i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vim_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 9, v0 ; CHECK-NEXT: ret entry: @@ -1399,7 +1399,7 @@ define @intrinsic_vmerge_vim_nxv8i64_nxv8i64_i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vim_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 9, v0 ; CHECK-NEXT: ret entry: @@ -1423,7 +1423,7 @@ define @intrinsic_vmerge_vvm_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -1447,7 +1447,7 @@ define @intrinsic_vmerge_vvm_nxv2f16_nxv2f16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2f16_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -1471,7 +1471,7 @@ define @intrinsic_vmerge_vvm_nxv4f16_nxv4f16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4f16_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -1495,7 +1495,7 @@ define @intrinsic_vmerge_vvm_nxv8f16_nxv8f16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8f16_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0 ; CHECK-NEXT: ret entry: @@ -1519,7 +1519,7 @@ define @intrinsic_vmerge_vvm_nxv16f16_nxv16f16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv16f16_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0 ; CHECK-NEXT: ret entry: @@ -1543,7 +1543,7 @@ define @intrinsic_vmerge_vvm_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0 ; CHECK-NEXT: ret entry: @@ -1567,7 +1567,7 @@ define @intrinsic_vmerge_vvm_nxv1f32_nxv1f32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1f32_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -1591,7 +1591,7 @@ define @intrinsic_vmerge_vvm_nxv2f32_nxv2f32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2f32_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -1615,7 +1615,7 @@ define @intrinsic_vmerge_vvm_nxv4f32_nxv4f32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4f32_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0 ; CHECK-NEXT: ret entry: @@ -1639,7 +1639,7 @@ define @intrinsic_vmerge_vvm_nxv8f32_nxv8f32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8f32_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0 ; CHECK-NEXT: ret entry: @@ -1663,7 +1663,7 @@ define @intrinsic_vmerge_vvm_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0 ; CHECK-NEXT: ret entry: @@ -1687,7 +1687,7 @@ define @intrinsic_vmerge_vvm_nxv1f64_nxv1f64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv1f64_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -1711,7 +1711,7 @@ define @intrinsic_vmerge_vvm_nxv2f64_nxv2f64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv2f64_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0 ; CHECK-NEXT: ret entry: @@ -1735,7 +1735,7 @@ define @intrinsic_vmerge_vvm_nxv4f64_nxv4f64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv4f64_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0 ; CHECK-NEXT: ret entry: @@ -1759,7 +1759,7 @@ define @intrinsic_vmerge_vvm_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vmerge_vvm_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfeq.ll b/llvm/test/CodeGen/RISCV/rvv/vmfeq.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmfeq.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfeq.ll @@ -11,7 +11,7 @@ define @intrinsic_vmfeq_vv_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmfeq.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -63,7 +63,7 @@ define @intrinsic_vmfeq_vv_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmfeq.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -115,7 +115,7 @@ define @intrinsic_vmfeq_vv_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmfeq.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -167,7 +167,7 @@ define @intrinsic_vmfeq_vv_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmfeq.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -219,7 +219,7 @@ define @intrinsic_vmfeq_vv_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmfeq.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -271,7 +271,7 @@ define @intrinsic_vmfeq_vv_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmfeq.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -323,7 +323,7 @@ define @intrinsic_vmfeq_vv_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmfeq.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -375,7 +375,7 @@ define @intrinsic_vmfeq_vv_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmfeq.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -427,7 +427,7 @@ define @intrinsic_vmfeq_vv_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmfeq.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -479,7 +479,7 @@ define @intrinsic_vmfeq_vv_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmfeq.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -531,7 +531,7 @@ define @intrinsic_vmfeq_vv_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmfeq.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -583,7 +583,7 @@ define @intrinsic_vmfeq_vv_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmfeq.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -635,7 +635,7 @@ define @intrinsic_vmfeq_vf_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -682,7 +682,7 @@ define @intrinsic_vmfeq_vf_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -729,7 +729,7 @@ define @intrinsic_vmfeq_vf_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -776,7 +776,7 @@ define @intrinsic_vmfeq_vf_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -823,7 +823,7 @@ define @intrinsic_vmfeq_vf_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -870,7 +870,7 @@ define @intrinsic_vmfeq_vf_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -917,7 +917,7 @@ define @intrinsic_vmfeq_vf_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -964,7 +964,7 @@ define @intrinsic_vmfeq_vf_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1011,7 +1011,7 @@ define @intrinsic_vmfeq_vf_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1058,7 +1058,7 @@ define @intrinsic_vmfeq_vf_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1105,7 +1105,7 @@ define @intrinsic_vmfeq_vf_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1152,7 +1152,7 @@ define @intrinsic_vmfeq_vf_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfeq_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmfeq.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfge.ll b/llvm/test/CodeGen/RISCV/rvv/vmfge.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmfge.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfge.ll @@ -11,7 +11,7 @@ define @intrinsic_vmfge_vv_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmfle.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -63,7 +63,7 @@ define @intrinsic_vmfge_vv_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmfle.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -115,7 +115,7 @@ define @intrinsic_vmfge_vv_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmfle.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -167,7 +167,7 @@ define @intrinsic_vmfge_vv_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmfle.vv v0, v10, v8 ; CHECK-NEXT: ret entry: @@ -219,7 +219,7 @@ define @intrinsic_vmfge_vv_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmfle.vv v0, v12, v8 ; CHECK-NEXT: ret entry: @@ -271,7 +271,7 @@ define @intrinsic_vmfge_vv_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmfle.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -323,7 +323,7 @@ define @intrinsic_vmfge_vv_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmfle.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -375,7 +375,7 @@ define @intrinsic_vmfge_vv_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmfle.vv v0, v10, v8 ; CHECK-NEXT: ret entry: @@ -427,7 +427,7 @@ define @intrinsic_vmfge_vv_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmfle.vv v0, v12, v8 ; CHECK-NEXT: ret entry: @@ -479,7 +479,7 @@ define @intrinsic_vmfge_vv_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmfle.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -531,7 +531,7 @@ define @intrinsic_vmfge_vv_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmfle.vv v0, v10, v8 ; CHECK-NEXT: ret entry: @@ -583,7 +583,7 @@ define @intrinsic_vmfge_vv_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmfle.vv v0, v12, v8 ; CHECK-NEXT: ret entry: @@ -635,7 +635,7 @@ define @intrinsic_vmfge_vf_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmfge.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -682,7 +682,7 @@ define @intrinsic_vmfge_vf_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmfge.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -729,7 +729,7 @@ define @intrinsic_vmfge_vf_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmfge.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -776,7 +776,7 @@ define @intrinsic_vmfge_vf_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmfge.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -823,7 +823,7 @@ define @intrinsic_vmfge_vf_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmfge.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -870,7 +870,7 @@ define @intrinsic_vmfge_vf_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmfge.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -917,7 +917,7 @@ define @intrinsic_vmfge_vf_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmfge.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -964,7 +964,7 @@ define @intrinsic_vmfge_vf_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmfge.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1011,7 +1011,7 @@ define @intrinsic_vmfge_vf_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmfge.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1058,7 +1058,7 @@ define @intrinsic_vmfge_vf_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmfge.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1105,7 +1105,7 @@ define @intrinsic_vmfge_vf_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmfge.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1152,7 +1152,7 @@ define @intrinsic_vmfge_vf_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfge_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmfge.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfgt.ll b/llvm/test/CodeGen/RISCV/rvv/vmfgt.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmfgt.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfgt.ll @@ -11,7 +11,7 @@ define @intrinsic_vmfgt_vv_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmflt.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -63,7 +63,7 @@ define @intrinsic_vmfgt_vv_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmflt.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -115,7 +115,7 @@ define @intrinsic_vmfgt_vv_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmflt.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -167,7 +167,7 @@ define @intrinsic_vmfgt_vv_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmflt.vv v0, v10, v8 ; CHECK-NEXT: ret entry: @@ -219,7 +219,7 @@ define @intrinsic_vmfgt_vv_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmflt.vv v0, v12, v8 ; CHECK-NEXT: ret entry: @@ -271,7 +271,7 @@ define @intrinsic_vmfgt_vv_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmflt.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -323,7 +323,7 @@ define @intrinsic_vmfgt_vv_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmflt.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -375,7 +375,7 @@ define @intrinsic_vmfgt_vv_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmflt.vv v0, v10, v8 ; CHECK-NEXT: ret entry: @@ -427,7 +427,7 @@ define @intrinsic_vmfgt_vv_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmflt.vv v0, v12, v8 ; CHECK-NEXT: ret entry: @@ -479,7 +479,7 @@ define @intrinsic_vmfgt_vv_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmflt.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -531,7 +531,7 @@ define @intrinsic_vmfgt_vv_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmflt.vv v0, v10, v8 ; CHECK-NEXT: ret entry: @@ -583,7 +583,7 @@ define @intrinsic_vmfgt_vv_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmflt.vv v0, v12, v8 ; CHECK-NEXT: ret entry: @@ -635,7 +635,7 @@ define @intrinsic_vmfgt_vf_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -682,7 +682,7 @@ define @intrinsic_vmfgt_vf_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -729,7 +729,7 @@ define @intrinsic_vmfgt_vf_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -776,7 +776,7 @@ define @intrinsic_vmfgt_vf_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -823,7 +823,7 @@ define @intrinsic_vmfgt_vf_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -870,7 +870,7 @@ define @intrinsic_vmfgt_vf_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -917,7 +917,7 @@ define @intrinsic_vmfgt_vf_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -964,7 +964,7 @@ define @intrinsic_vmfgt_vf_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1011,7 +1011,7 @@ define @intrinsic_vmfgt_vf_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1058,7 +1058,7 @@ define @intrinsic_vmfgt_vf_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1105,7 +1105,7 @@ define @intrinsic_vmfgt_vf_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1152,7 +1152,7 @@ define @intrinsic_vmfgt_vf_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfgt_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmfgt.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfle.ll b/llvm/test/CodeGen/RISCV/rvv/vmfle.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmfle.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfle.ll @@ -11,7 +11,7 @@ define @intrinsic_vmfle_vv_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmfle.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -63,7 +63,7 @@ define @intrinsic_vmfle_vv_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmfle.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -115,7 +115,7 @@ define @intrinsic_vmfle_vv_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmfle.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -167,7 +167,7 @@ define @intrinsic_vmfle_vv_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmfle.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -219,7 +219,7 @@ define @intrinsic_vmfle_vv_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmfle.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -271,7 +271,7 @@ define @intrinsic_vmfle_vv_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmfle.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -323,7 +323,7 @@ define @intrinsic_vmfle_vv_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmfle.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -375,7 +375,7 @@ define @intrinsic_vmfle_vv_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmfle.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -427,7 +427,7 @@ define @intrinsic_vmfle_vv_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmfle.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -479,7 +479,7 @@ define @intrinsic_vmfle_vv_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmfle.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -531,7 +531,7 @@ define @intrinsic_vmfle_vv_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmfle.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -583,7 +583,7 @@ define @intrinsic_vmfle_vv_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmfle.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -635,7 +635,7 @@ define @intrinsic_vmfle_vf_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmfle.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -682,7 +682,7 @@ define @intrinsic_vmfle_vf_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmfle.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -729,7 +729,7 @@ define @intrinsic_vmfle_vf_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmfle.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -776,7 +776,7 @@ define @intrinsic_vmfle_vf_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmfle.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -823,7 +823,7 @@ define @intrinsic_vmfle_vf_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmfle.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -870,7 +870,7 @@ define @intrinsic_vmfle_vf_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmfle.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -917,7 +917,7 @@ define @intrinsic_vmfle_vf_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmfle.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -964,7 +964,7 @@ define @intrinsic_vmfle_vf_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmfle.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1011,7 +1011,7 @@ define @intrinsic_vmfle_vf_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmfle.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1058,7 +1058,7 @@ define @intrinsic_vmfle_vf_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmfle.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1105,7 +1105,7 @@ define @intrinsic_vmfle_vf_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmfle.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1152,7 +1152,7 @@ define @intrinsic_vmfle_vf_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfle_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmfle.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmflt.ll b/llvm/test/CodeGen/RISCV/rvv/vmflt.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmflt.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmflt.ll @@ -11,7 +11,7 @@ define @intrinsic_vmflt_vv_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmflt.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -63,7 +63,7 @@ define @intrinsic_vmflt_vv_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmflt.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -115,7 +115,7 @@ define @intrinsic_vmflt_vv_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmflt.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -167,7 +167,7 @@ define @intrinsic_vmflt_vv_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmflt.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -219,7 +219,7 @@ define @intrinsic_vmflt_vv_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmflt.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -271,7 +271,7 @@ define @intrinsic_vmflt_vv_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmflt.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -323,7 +323,7 @@ define @intrinsic_vmflt_vv_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmflt.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -375,7 +375,7 @@ define @intrinsic_vmflt_vv_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmflt.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -427,7 +427,7 @@ define @intrinsic_vmflt_vv_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmflt.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -479,7 +479,7 @@ define @intrinsic_vmflt_vv_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmflt.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -531,7 +531,7 @@ define @intrinsic_vmflt_vv_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmflt.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -583,7 +583,7 @@ define @intrinsic_vmflt_vv_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmflt.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -635,7 +635,7 @@ define @intrinsic_vmflt_vf_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmflt.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -682,7 +682,7 @@ define @intrinsic_vmflt_vf_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmflt.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -729,7 +729,7 @@ define @intrinsic_vmflt_vf_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmflt.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -776,7 +776,7 @@ define @intrinsic_vmflt_vf_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmflt.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -823,7 +823,7 @@ define @intrinsic_vmflt_vf_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmflt.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -870,7 +870,7 @@ define @intrinsic_vmflt_vf_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmflt.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -917,7 +917,7 @@ define @intrinsic_vmflt_vf_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmflt.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -964,7 +964,7 @@ define @intrinsic_vmflt_vf_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmflt.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1011,7 +1011,7 @@ define @intrinsic_vmflt_vf_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmflt.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1058,7 +1058,7 @@ define @intrinsic_vmflt_vf_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmflt.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1105,7 +1105,7 @@ define @intrinsic_vmflt_vf_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmflt.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1152,7 +1152,7 @@ define @intrinsic_vmflt_vf_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmflt_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmflt.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfne.ll b/llvm/test/CodeGen/RISCV/rvv/vmfne.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmfne.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfne.ll @@ -11,7 +11,7 @@ define @intrinsic_vmfne_vv_nxv1f16_nxv1f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -63,7 +63,7 @@ define @intrinsic_vmfne_vv_nxv2f16_nxv2f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -115,7 +115,7 @@ define @intrinsic_vmfne_vv_nxv4f16_nxv4f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -167,7 +167,7 @@ define @intrinsic_vmfne_vv_nxv8f16_nxv8f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -219,7 +219,7 @@ define @intrinsic_vmfne_vv_nxv16f16_nxv16f16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -271,7 +271,7 @@ define @intrinsic_vmfne_vv_nxv1f32_nxv1f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -323,7 +323,7 @@ define @intrinsic_vmfne_vv_nxv2f32_nxv2f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vv_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -375,7 +375,7 @@ define @intrinsic_vmfne_vv_nxv4f32_nxv4f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -427,7 +427,7 @@ define @intrinsic_vmfne_vv_nxv8f32_nxv8f32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -479,7 +479,7 @@ define @intrinsic_vmfne_vv_nxv1f64_nxv1f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vv_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -531,7 +531,7 @@ define @intrinsic_vmfne_vv_nxv2f64_nxv2f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vv_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -583,7 +583,7 @@ define @intrinsic_vmfne_vv_nxv4f64_nxv4f64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmfne.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -635,7 +635,7 @@ define @intrinsic_vmfne_vf_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vf_nxv1f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmfne.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -682,7 +682,7 @@ define @intrinsic_vmfne_vf_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vf_nxv2f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmfne.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -729,7 +729,7 @@ define @intrinsic_vmfne_vf_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vf_nxv4f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmfne.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -776,7 +776,7 @@ define @intrinsic_vmfne_vf_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vf_nxv8f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmfne.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -823,7 +823,7 @@ define @intrinsic_vmfne_vf_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vf_nxv16f16_f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmfne.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -870,7 +870,7 @@ define @intrinsic_vmfne_vf_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vf_nxv1f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmfne.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -917,7 +917,7 @@ define @intrinsic_vmfne_vf_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vf_nxv2f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmfne.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -964,7 +964,7 @@ define @intrinsic_vmfne_vf_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vf_nxv4f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmfne.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1011,7 +1011,7 @@ define @intrinsic_vmfne_vf_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vf_nxv8f32_f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmfne.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1058,7 +1058,7 @@ define @intrinsic_vmfne_vf_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vf_nxv1f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmfne.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1105,7 +1105,7 @@ define @intrinsic_vmfne_vf_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vf_nxv2f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmfne.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: @@ -1152,7 +1152,7 @@ define @intrinsic_vmfne_vf_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmfne_vf_nxv4f64_f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmfne.vf v0, v8, fa0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vmin_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vmin_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vmin_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vmin_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vmin_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -292,7 +292,7 @@ define @intrinsic_vmin_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vmin_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vmin_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vmin_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vmin_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vmin_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vmin_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vmin_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vmin_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -717,7 +717,7 @@ define @intrinsic_vmin_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -764,7 +764,7 @@ define @intrinsic_vmin_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -811,7 +811,7 @@ define @intrinsic_vmin_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vmin_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vmin_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vmin_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vmin_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1048,7 +1048,7 @@ define @intrinsic_vmin_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1095,7 +1095,7 @@ define @intrinsic_vmin_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1142,7 +1142,7 @@ define @intrinsic_vmin_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1189,7 +1189,7 @@ define @intrinsic_vmin_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1236,7 +1236,7 @@ define @intrinsic_vmin_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1283,7 +1283,7 @@ define @intrinsic_vmin_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1330,7 +1330,7 @@ define @intrinsic_vmin_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1377,7 +1377,7 @@ define @intrinsic_vmin_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1424,7 +1424,7 @@ define @intrinsic_vmin_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1471,7 +1471,7 @@ define @intrinsic_vmin_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1518,7 +1518,7 @@ define @intrinsic_vmin_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1565,7 +1565,7 @@ define @intrinsic_vmin_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1612,7 +1612,7 @@ define @intrinsic_vmin_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1659,7 +1659,7 @@ define @intrinsic_vmin_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1706,7 +1706,7 @@ define @intrinsic_vmin_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1753,7 +1753,7 @@ define @intrinsic_vmin_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1800,7 +1800,7 @@ define @intrinsic_vmin_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1847,7 +1847,7 @@ define @intrinsic_vmin_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1898,7 +1898,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vmin.vv v8, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 @@ -1957,7 +1957,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vmin.vv v8, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 @@ -2016,7 +2016,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmin.vv v8, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 @@ -2075,7 +2075,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmin.vv v8, v8, v16 ; CHECK-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vmin_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vmin_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vmin_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vmin_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vmin_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -292,7 +292,7 @@ define @intrinsic_vmin_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vmin_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vmin_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vmin_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vmin_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vmin_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vmin_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vmin_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vmin_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -717,7 +717,7 @@ define @intrinsic_vmin_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -764,7 +764,7 @@ define @intrinsic_vmin_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -811,7 +811,7 @@ define @intrinsic_vmin_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vmin_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vmin_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vmin_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vmin_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1048,7 +1048,7 @@ define @intrinsic_vmin_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1095,7 +1095,7 @@ define @intrinsic_vmin_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1142,7 +1142,7 @@ define @intrinsic_vmin_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1189,7 +1189,7 @@ define @intrinsic_vmin_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1236,7 +1236,7 @@ define @intrinsic_vmin_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1283,7 +1283,7 @@ define @intrinsic_vmin_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1330,7 +1330,7 @@ define @intrinsic_vmin_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1377,7 +1377,7 @@ define @intrinsic_vmin_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1424,7 +1424,7 @@ define @intrinsic_vmin_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1471,7 +1471,7 @@ define @intrinsic_vmin_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1518,7 +1518,7 @@ define @intrinsic_vmin_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1565,7 +1565,7 @@ define @intrinsic_vmin_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1612,7 +1612,7 @@ define @intrinsic_vmin_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1659,7 +1659,7 @@ define @intrinsic_vmin_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1706,7 +1706,7 @@ define @intrinsic_vmin_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1753,7 +1753,7 @@ define @intrinsic_vmin_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1800,7 +1800,7 @@ define @intrinsic_vmin_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1847,7 +1847,7 @@ define @intrinsic_vmin_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1894,7 +1894,7 @@ define @intrinsic_vmin_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1941,7 +1941,7 @@ define @intrinsic_vmin_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1988,7 +1988,7 @@ define @intrinsic_vmin_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -2035,7 +2035,7 @@ define @intrinsic_vmin_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmin_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmin-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmin-sdnode.ll @@ -5,7 +5,7 @@ define @vmin_vv_nxv1i8( %va, %vb) { ; CHECK-LABEL: vmin_vv_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v9 ; CHECK-NEXT: ret %cmp = icmp slt %va, %vb @@ -16,7 +16,7 @@ define @vmin_vx_nxv1i8( %va, i8 signext %b) { ; CHECK-LABEL: vmin_vx_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -30,7 +30,7 @@ ; CHECK-LABEL: vmin_vi_nxv1i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 -3, i32 0 @@ -43,7 +43,7 @@ define @vmin_vv_nxv2i8( %va, %vb) { ; CHECK-LABEL: vmin_vv_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v9 ; CHECK-NEXT: ret %cmp = icmp slt %va, %vb @@ -54,7 +54,7 @@ define @vmin_vx_nxv2i8( %va, i8 signext %b) { ; CHECK-LABEL: vmin_vx_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -68,7 +68,7 @@ ; CHECK-LABEL: vmin_vi_nxv2i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 -3, i32 0 @@ -81,7 +81,7 @@ define @vmin_vv_nxv4i8( %va, %vb) { ; CHECK-LABEL: vmin_vv_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v9 ; CHECK-NEXT: ret %cmp = icmp slt %va, %vb @@ -92,7 +92,7 @@ define @vmin_vx_nxv4i8( %va, i8 signext %b) { ; CHECK-LABEL: vmin_vx_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -106,7 +106,7 @@ ; CHECK-LABEL: vmin_vi_nxv4i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 -3, i32 0 @@ -119,7 +119,7 @@ define @vmin_vv_nxv8i8( %va, %vb) { ; CHECK-LABEL: vmin_vv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v9 ; CHECK-NEXT: ret %cmp = icmp slt %va, %vb @@ -130,7 +130,7 @@ define @vmin_vx_nxv8i8( %va, i8 signext %b) { ; CHECK-LABEL: vmin_vx_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -144,7 +144,7 @@ ; CHECK-LABEL: vmin_vi_nxv8i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 -3, i32 0 @@ -157,7 +157,7 @@ define @vmin_vv_nxv16i8( %va, %vb) { ; CHECK-LABEL: vmin_vv_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v10 ; CHECK-NEXT: ret %cmp = icmp slt %va, %vb @@ -168,7 +168,7 @@ define @vmin_vx_nxv16i8( %va, i8 signext %b) { ; CHECK-LABEL: vmin_vx_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -182,7 +182,7 @@ ; CHECK-LABEL: vmin_vi_nxv16i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 -3, i32 0 @@ -195,7 +195,7 @@ define @vmin_vv_nxv32i8( %va, %vb) { ; CHECK-LABEL: vmin_vv_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v12 ; CHECK-NEXT: ret %cmp = icmp slt %va, %vb @@ -206,7 +206,7 @@ define @vmin_vx_nxv32i8( %va, i8 signext %b) { ; CHECK-LABEL: vmin_vx_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -220,7 +220,7 @@ ; CHECK-LABEL: vmin_vi_nxv32i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 -3, i32 0 @@ -233,7 +233,7 @@ define @vmin_vv_nxv64i8( %va, %vb) { ; CHECK-LABEL: vmin_vv_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v16 ; CHECK-NEXT: ret %cmp = icmp slt %va, %vb @@ -244,7 +244,7 @@ define @vmin_vx_nxv64i8( %va, i8 signext %b) { ; CHECK-LABEL: vmin_vx_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -258,7 +258,7 @@ ; CHECK-LABEL: vmin_vi_nxv64i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 -3, i32 0 @@ -271,7 +271,7 @@ define @vmin_vv_nxv1i16( %va, %vb) { ; CHECK-LABEL: vmin_vv_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v9 ; CHECK-NEXT: ret %cmp = icmp slt %va, %vb @@ -282,7 +282,7 @@ define @vmin_vx_nxv1i16( %va, i16 signext %b) { ; CHECK-LABEL: vmin_vx_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -296,7 +296,7 @@ ; CHECK-LABEL: vmin_vi_nxv1i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 -3, i32 0 @@ -309,7 +309,7 @@ define @vmin_vv_nxv2i16( %va, %vb) { ; CHECK-LABEL: vmin_vv_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v9 ; CHECK-NEXT: ret %cmp = icmp slt %va, %vb @@ -320,7 +320,7 @@ define @vmin_vx_nxv2i16( %va, i16 signext %b) { ; CHECK-LABEL: vmin_vx_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -334,7 +334,7 @@ ; CHECK-LABEL: vmin_vi_nxv2i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 -3, i32 0 @@ -347,7 +347,7 @@ define @vmin_vv_nxv4i16( %va, %vb) { ; CHECK-LABEL: vmin_vv_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v9 ; CHECK-NEXT: ret %cmp = icmp slt %va, %vb @@ -358,7 +358,7 @@ define @vmin_vx_nxv4i16( %va, i16 signext %b) { ; CHECK-LABEL: vmin_vx_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -372,7 +372,7 @@ ; CHECK-LABEL: vmin_vi_nxv4i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 -3, i32 0 @@ -385,7 +385,7 @@ define @vmin_vv_nxv8i16( %va, %vb) { ; CHECK-LABEL: vmin_vv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v10 ; CHECK-NEXT: ret %cmp = icmp slt %va, %vb @@ -396,7 +396,7 @@ define @vmin_vx_nxv8i16( %va, i16 signext %b) { ; CHECK-LABEL: vmin_vx_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -410,7 +410,7 @@ ; CHECK-LABEL: vmin_vi_nxv8i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 -3, i32 0 @@ -423,7 +423,7 @@ define @vmin_vv_nxv16i16( %va, %vb) { ; CHECK-LABEL: vmin_vv_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v12 ; CHECK-NEXT: ret %cmp = icmp slt %va, %vb @@ -434,7 +434,7 @@ define @vmin_vx_nxv16i16( %va, i16 signext %b) { ; CHECK-LABEL: vmin_vx_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -448,7 +448,7 @@ ; CHECK-LABEL: vmin_vi_nxv16i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 -3, i32 0 @@ -461,7 +461,7 @@ define @vmin_vv_nxv32i16( %va, %vb) { ; CHECK-LABEL: vmin_vv_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v16 ; CHECK-NEXT: ret %cmp = icmp slt %va, %vb @@ -472,7 +472,7 @@ define @vmin_vx_nxv32i16( %va, i16 signext %b) { ; CHECK-LABEL: vmin_vx_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -486,7 +486,7 @@ ; CHECK-LABEL: vmin_vi_nxv32i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 -3, i32 0 @@ -499,7 +499,7 @@ define @vmin_vv_nxv1i32( %va, %vb) { ; CHECK-LABEL: vmin_vv_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v9 ; CHECK-NEXT: ret %cmp = icmp slt %va, %vb @@ -510,7 +510,7 @@ define @vmin_vx_nxv1i32( %va, i32 signext %b) { ; CHECK-LABEL: vmin_vx_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -524,7 +524,7 @@ ; CHECK-LABEL: vmin_vi_nxv1i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 -3, i32 0 @@ -537,7 +537,7 @@ define @vmin_vv_nxv2i32( %va, %vb) { ; CHECK-LABEL: vmin_vv_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v9 ; CHECK-NEXT: ret %cmp = icmp slt %va, %vb @@ -548,7 +548,7 @@ define @vmin_vx_nxv2i32( %va, i32 signext %b) { ; CHECK-LABEL: vmin_vx_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -562,7 +562,7 @@ ; CHECK-LABEL: vmin_vi_nxv2i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 -3, i32 0 @@ -575,7 +575,7 @@ define @vmin_vv_nxv4i32( %va, %vb) { ; CHECK-LABEL: vmin_vv_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v10 ; CHECK-NEXT: ret %cmp = icmp slt %va, %vb @@ -586,7 +586,7 @@ define @vmin_vx_nxv4i32( %va, i32 signext %b) { ; CHECK-LABEL: vmin_vx_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -600,7 +600,7 @@ ; CHECK-LABEL: vmin_vi_nxv4i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 -3, i32 0 @@ -613,7 +613,7 @@ define @vmin_vv_nxv8i32( %va, %vb) { ; CHECK-LABEL: vmin_vv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v12 ; CHECK-NEXT: ret %cmp = icmp slt %va, %vb @@ -624,7 +624,7 @@ define @vmin_vx_nxv8i32( %va, i32 signext %b) { ; CHECK-LABEL: vmin_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -638,7 +638,7 @@ ; CHECK-LABEL: vmin_vi_nxv8i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 -3, i32 0 @@ -651,7 +651,7 @@ define @vmin_vv_nxv16i32( %va, %vb) { ; CHECK-LABEL: vmin_vv_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v16 ; CHECK-NEXT: ret %cmp = icmp slt %va, %vb @@ -662,7 +662,7 @@ define @vmin_vx_nxv16i32( %va, i32 signext %b) { ; CHECK-LABEL: vmin_vx_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -676,7 +676,7 @@ ; CHECK-LABEL: vmin_vi_nxv16i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 -3, i32 0 @@ -689,7 +689,7 @@ define @vmin_vv_nxv1i64( %va, %vb) { ; CHECK-LABEL: vmin_vv_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v9 ; CHECK-NEXT: ret %cmp = icmp slt %va, %vb @@ -705,7 +705,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vmin.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -713,7 +713,7 @@ ; ; RV64-LABEL: vmin_vx_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV64-NEXT: vmin.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -727,7 +727,7 @@ ; CHECK-LABEL: vmin_vi_nxv1i64_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 -3, i32 0 @@ -740,7 +740,7 @@ define @vmin_vv_nxv2i64( %va, %vb) { ; CHECK-LABEL: vmin_vv_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v10 ; CHECK-NEXT: ret %cmp = icmp slt %va, %vb @@ -756,7 +756,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vmin.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -764,7 +764,7 @@ ; ; RV64-LABEL: vmin_vx_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV64-NEXT: vmin.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -778,7 +778,7 @@ ; CHECK-LABEL: vmin_vi_nxv2i64_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 -3, i32 0 @@ -791,7 +791,7 @@ define @vmin_vv_nxv4i64( %va, %vb) { ; CHECK-LABEL: vmin_vv_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v12 ; CHECK-NEXT: ret %cmp = icmp slt %va, %vb @@ -807,7 +807,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vmin.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -815,7 +815,7 @@ ; ; RV64-LABEL: vmin_vx_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV64-NEXT: vmin.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -829,7 +829,7 @@ ; CHECK-LABEL: vmin_vi_nxv4i64_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 -3, i32 0 @@ -842,7 +842,7 @@ define @vmin_vv_nxv8i64( %va, %vb) { ; CHECK-LABEL: vmin_vv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmin.vv v8, v8, v16 ; CHECK-NEXT: ret %cmp = icmp slt %va, %vb @@ -858,7 +858,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmin.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -866,7 +866,7 @@ ; ; RV64-LABEL: vmin_vx_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vmin.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -880,7 +880,7 @@ ; CHECK-LABEL: vmin_vi_nxv8i64_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; CHECK-NEXT: vmin.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 -3, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vminu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vminu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vminu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vminu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vminu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -292,7 +292,7 @@ define @intrinsic_vminu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vminu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vminu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vminu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vminu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vminu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vminu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vminu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vminu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -717,7 +717,7 @@ define @intrinsic_vminu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -764,7 +764,7 @@ define @intrinsic_vminu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -811,7 +811,7 @@ define @intrinsic_vminu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vminu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vminu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vminu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vminu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1048,7 +1048,7 @@ define @intrinsic_vminu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1095,7 +1095,7 @@ define @intrinsic_vminu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1142,7 +1142,7 @@ define @intrinsic_vminu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1189,7 +1189,7 @@ define @intrinsic_vminu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1236,7 +1236,7 @@ define @intrinsic_vminu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1283,7 +1283,7 @@ define @intrinsic_vminu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1330,7 +1330,7 @@ define @intrinsic_vminu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1377,7 +1377,7 @@ define @intrinsic_vminu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1424,7 +1424,7 @@ define @intrinsic_vminu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1471,7 +1471,7 @@ define @intrinsic_vminu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1518,7 +1518,7 @@ define @intrinsic_vminu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1565,7 +1565,7 @@ define @intrinsic_vminu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1612,7 +1612,7 @@ define @intrinsic_vminu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1659,7 +1659,7 @@ define @intrinsic_vminu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1706,7 +1706,7 @@ define @intrinsic_vminu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1753,7 +1753,7 @@ define @intrinsic_vminu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1800,7 +1800,7 @@ define @intrinsic_vminu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1847,7 +1847,7 @@ define @intrinsic_vminu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1898,7 +1898,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vminu.vv v8, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 @@ -1957,7 +1957,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vminu.vv v8, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 @@ -2016,7 +2016,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vminu.vv v8, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 @@ -2075,7 +2075,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vminu.vv v8, v8, v16 ; CHECK-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vminu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vminu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vminu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vminu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vminu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -292,7 +292,7 @@ define @intrinsic_vminu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vminu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vminu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vminu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vminu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vminu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vminu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vminu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vminu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -717,7 +717,7 @@ define @intrinsic_vminu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -764,7 +764,7 @@ define @intrinsic_vminu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -811,7 +811,7 @@ define @intrinsic_vminu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vminu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vminu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vminu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vminu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1048,7 +1048,7 @@ define @intrinsic_vminu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1095,7 +1095,7 @@ define @intrinsic_vminu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1142,7 +1142,7 @@ define @intrinsic_vminu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1189,7 +1189,7 @@ define @intrinsic_vminu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1236,7 +1236,7 @@ define @intrinsic_vminu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1283,7 +1283,7 @@ define @intrinsic_vminu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1330,7 +1330,7 @@ define @intrinsic_vminu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1377,7 +1377,7 @@ define @intrinsic_vminu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1424,7 +1424,7 @@ define @intrinsic_vminu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1471,7 +1471,7 @@ define @intrinsic_vminu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1518,7 +1518,7 @@ define @intrinsic_vminu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1565,7 +1565,7 @@ define @intrinsic_vminu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1612,7 +1612,7 @@ define @intrinsic_vminu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1659,7 +1659,7 @@ define @intrinsic_vminu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1706,7 +1706,7 @@ define @intrinsic_vminu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1753,7 +1753,7 @@ define @intrinsic_vminu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1800,7 +1800,7 @@ define @intrinsic_vminu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1847,7 +1847,7 @@ define @intrinsic_vminu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1894,7 +1894,7 @@ define @intrinsic_vminu_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1941,7 +1941,7 @@ define @intrinsic_vminu_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1988,7 +1988,7 @@ define @intrinsic_vminu_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -2035,7 +2035,7 @@ define @intrinsic_vminu_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vminu_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vminu-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vminu-sdnode.ll @@ -5,7 +5,7 @@ define @vmin_vv_nxv1i8( %va, %vb) { ; CHECK-LABEL: vmin_vv_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v9 ; CHECK-NEXT: ret %cmp = icmp ult %va, %vb @@ -16,7 +16,7 @@ define @vmin_vx_nxv1i8( %va, i8 signext %b) { ; CHECK-LABEL: vmin_vx_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -30,7 +30,7 @@ ; CHECK-LABEL: vmin_vi_nxv1i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 -3, i32 0 @@ -43,7 +43,7 @@ define @vmin_vv_nxv2i8( %va, %vb) { ; CHECK-LABEL: vmin_vv_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v9 ; CHECK-NEXT: ret %cmp = icmp ult %va, %vb @@ -54,7 +54,7 @@ define @vmin_vx_nxv2i8( %va, i8 signext %b) { ; CHECK-LABEL: vmin_vx_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -68,7 +68,7 @@ ; CHECK-LABEL: vmin_vi_nxv2i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 -3, i32 0 @@ -81,7 +81,7 @@ define @vmin_vv_nxv4i8( %va, %vb) { ; CHECK-LABEL: vmin_vv_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v9 ; CHECK-NEXT: ret %cmp = icmp ult %va, %vb @@ -92,7 +92,7 @@ define @vmin_vx_nxv4i8( %va, i8 signext %b) { ; CHECK-LABEL: vmin_vx_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -106,7 +106,7 @@ ; CHECK-LABEL: vmin_vi_nxv4i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 -3, i32 0 @@ -119,7 +119,7 @@ define @vmin_vv_nxv8i8( %va, %vb) { ; CHECK-LABEL: vmin_vv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v9 ; CHECK-NEXT: ret %cmp = icmp ult %va, %vb @@ -130,7 +130,7 @@ define @vmin_vx_nxv8i8( %va, i8 signext %b) { ; CHECK-LABEL: vmin_vx_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -144,7 +144,7 @@ ; CHECK-LABEL: vmin_vi_nxv8i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 -3, i32 0 @@ -157,7 +157,7 @@ define @vmin_vv_nxv16i8( %va, %vb) { ; CHECK-LABEL: vmin_vv_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v10 ; CHECK-NEXT: ret %cmp = icmp ult %va, %vb @@ -168,7 +168,7 @@ define @vmin_vx_nxv16i8( %va, i8 signext %b) { ; CHECK-LABEL: vmin_vx_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -182,7 +182,7 @@ ; CHECK-LABEL: vmin_vi_nxv16i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 -3, i32 0 @@ -195,7 +195,7 @@ define @vmin_vv_nxv32i8( %va, %vb) { ; CHECK-LABEL: vmin_vv_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v12 ; CHECK-NEXT: ret %cmp = icmp ult %va, %vb @@ -206,7 +206,7 @@ define @vmin_vx_nxv32i8( %va, i8 signext %b) { ; CHECK-LABEL: vmin_vx_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -220,7 +220,7 @@ ; CHECK-LABEL: vmin_vi_nxv32i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 -3, i32 0 @@ -233,7 +233,7 @@ define @vmin_vv_nxv64i8( %va, %vb) { ; CHECK-LABEL: vmin_vv_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v16 ; CHECK-NEXT: ret %cmp = icmp ult %va, %vb @@ -244,7 +244,7 @@ define @vmin_vx_nxv64i8( %va, i8 signext %b) { ; CHECK-LABEL: vmin_vx_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -258,7 +258,7 @@ ; CHECK-LABEL: vmin_vi_nxv64i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 -3, i32 0 @@ -271,7 +271,7 @@ define @vmin_vv_nxv1i16( %va, %vb) { ; CHECK-LABEL: vmin_vv_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v9 ; CHECK-NEXT: ret %cmp = icmp ult %va, %vb @@ -282,7 +282,7 @@ define @vmin_vx_nxv1i16( %va, i16 signext %b) { ; CHECK-LABEL: vmin_vx_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -296,7 +296,7 @@ ; CHECK-LABEL: vmin_vi_nxv1i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 -3, i32 0 @@ -309,7 +309,7 @@ define @vmin_vv_nxv2i16( %va, %vb) { ; CHECK-LABEL: vmin_vv_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v9 ; CHECK-NEXT: ret %cmp = icmp ult %va, %vb @@ -320,7 +320,7 @@ define @vmin_vx_nxv2i16( %va, i16 signext %b) { ; CHECK-LABEL: vmin_vx_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -334,7 +334,7 @@ ; CHECK-LABEL: vmin_vi_nxv2i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 -3, i32 0 @@ -347,7 +347,7 @@ define @vmin_vv_nxv4i16( %va, %vb) { ; CHECK-LABEL: vmin_vv_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v9 ; CHECK-NEXT: ret %cmp = icmp ult %va, %vb @@ -358,7 +358,7 @@ define @vmin_vx_nxv4i16( %va, i16 signext %b) { ; CHECK-LABEL: vmin_vx_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -372,7 +372,7 @@ ; CHECK-LABEL: vmin_vi_nxv4i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 -3, i32 0 @@ -385,7 +385,7 @@ define @vmin_vv_nxv8i16( %va, %vb) { ; CHECK-LABEL: vmin_vv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v10 ; CHECK-NEXT: ret %cmp = icmp ult %va, %vb @@ -396,7 +396,7 @@ define @vmin_vx_nxv8i16( %va, i16 signext %b) { ; CHECK-LABEL: vmin_vx_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -410,7 +410,7 @@ ; CHECK-LABEL: vmin_vi_nxv8i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 -3, i32 0 @@ -423,7 +423,7 @@ define @vmin_vv_nxv16i16( %va, %vb) { ; CHECK-LABEL: vmin_vv_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v12 ; CHECK-NEXT: ret %cmp = icmp ult %va, %vb @@ -434,7 +434,7 @@ define @vmin_vx_nxv16i16( %va, i16 signext %b) { ; CHECK-LABEL: vmin_vx_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -448,7 +448,7 @@ ; CHECK-LABEL: vmin_vi_nxv16i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 -3, i32 0 @@ -461,7 +461,7 @@ define @vmin_vv_nxv32i16( %va, %vb) { ; CHECK-LABEL: vmin_vv_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v16 ; CHECK-NEXT: ret %cmp = icmp ult %va, %vb @@ -472,7 +472,7 @@ define @vmin_vx_nxv32i16( %va, i16 signext %b) { ; CHECK-LABEL: vmin_vx_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -486,7 +486,7 @@ ; CHECK-LABEL: vmin_vi_nxv32i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 -3, i32 0 @@ -499,7 +499,7 @@ define @vmin_vv_nxv1i32( %va, %vb) { ; CHECK-LABEL: vmin_vv_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v9 ; CHECK-NEXT: ret %cmp = icmp ult %va, %vb @@ -510,7 +510,7 @@ define @vmin_vx_nxv1i32( %va, i32 signext %b) { ; CHECK-LABEL: vmin_vx_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -524,7 +524,7 @@ ; CHECK-LABEL: vmin_vi_nxv1i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 -3, i32 0 @@ -537,7 +537,7 @@ define @vmin_vv_nxv2i32( %va, %vb) { ; CHECK-LABEL: vmin_vv_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v9 ; CHECK-NEXT: ret %cmp = icmp ult %va, %vb @@ -548,7 +548,7 @@ define @vmin_vx_nxv2i32( %va, i32 signext %b) { ; CHECK-LABEL: vmin_vx_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -562,7 +562,7 @@ ; CHECK-LABEL: vmin_vi_nxv2i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 -3, i32 0 @@ -575,7 +575,7 @@ define @vmin_vv_nxv4i32( %va, %vb) { ; CHECK-LABEL: vmin_vv_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v10 ; CHECK-NEXT: ret %cmp = icmp ult %va, %vb @@ -586,7 +586,7 @@ define @vmin_vx_nxv4i32( %va, i32 signext %b) { ; CHECK-LABEL: vmin_vx_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -600,7 +600,7 @@ ; CHECK-LABEL: vmin_vi_nxv4i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 -3, i32 0 @@ -613,7 +613,7 @@ define @vmin_vv_nxv8i32( %va, %vb) { ; CHECK-LABEL: vmin_vv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v12 ; CHECK-NEXT: ret %cmp = icmp ult %va, %vb @@ -624,7 +624,7 @@ define @vmin_vx_nxv8i32( %va, i32 signext %b) { ; CHECK-LABEL: vmin_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -638,7 +638,7 @@ ; CHECK-LABEL: vmin_vi_nxv8i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 -3, i32 0 @@ -651,7 +651,7 @@ define @vmin_vv_nxv16i32( %va, %vb) { ; CHECK-LABEL: vmin_vv_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v16 ; CHECK-NEXT: ret %cmp = icmp ult %va, %vb @@ -662,7 +662,7 @@ define @vmin_vx_nxv16i32( %va, i32 signext %b) { ; CHECK-LABEL: vmin_vx_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -676,7 +676,7 @@ ; CHECK-LABEL: vmin_vi_nxv16i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 -3, i32 0 @@ -689,7 +689,7 @@ define @vmin_vv_nxv1i64( %va, %vb) { ; CHECK-LABEL: vmin_vv_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v9 ; CHECK-NEXT: ret %cmp = icmp ult %va, %vb @@ -705,7 +705,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vminu.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -713,7 +713,7 @@ ; ; RV64-LABEL: vmin_vx_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV64-NEXT: vminu.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -727,7 +727,7 @@ ; CHECK-LABEL: vmin_vi_nxv1i64_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 -3, i32 0 @@ -740,7 +740,7 @@ define @vmin_vv_nxv2i64( %va, %vb) { ; CHECK-LABEL: vmin_vv_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v10 ; CHECK-NEXT: ret %cmp = icmp ult %va, %vb @@ -756,7 +756,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vminu.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -764,7 +764,7 @@ ; ; RV64-LABEL: vmin_vx_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV64-NEXT: vminu.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -778,7 +778,7 @@ ; CHECK-LABEL: vmin_vi_nxv2i64_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 -3, i32 0 @@ -791,7 +791,7 @@ define @vmin_vv_nxv4i64( %va, %vb) { ; CHECK-LABEL: vmin_vv_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v12 ; CHECK-NEXT: ret %cmp = icmp ult %va, %vb @@ -807,7 +807,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vminu.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -815,7 +815,7 @@ ; ; RV64-LABEL: vmin_vx_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV64-NEXT: vminu.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -829,7 +829,7 @@ ; CHECK-LABEL: vmin_vi_nxv4i64_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 -3, i32 0 @@ -842,7 +842,7 @@ define @vmin_vv_nxv8i64( %va, %vb) { ; CHECK-LABEL: vmin_vv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vminu.vv v8, v8, v16 ; CHECK-NEXT: ret %cmp = icmp ult %va, %vb @@ -858,7 +858,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vminu.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -866,7 +866,7 @@ ; ; RV64-LABEL: vmin_vx_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vminu.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -880,7 +880,7 @@ ; CHECK-LABEL: vmin_vi_nxv8i64_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -3 -; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; CHECK-NEXT: vminu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 -3, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmnand.ll b/llvm/test/CodeGen/RISCV/rvv/vmnand.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmnand.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmnand.ll @@ -11,7 +11,7 @@ define @intrinsic_vmnand_mm_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnand_mm_nxv1i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmnand.mm v0, v0, v8 ; CHECK-NEXT: ret entry: @@ -31,7 +31,7 @@ define @intrinsic_vmnand_mm_nxv2i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnand_mm_nxv2i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmnand.mm v0, v0, v8 ; CHECK-NEXT: ret entry: @@ -51,7 +51,7 @@ define @intrinsic_vmnand_mm_nxv4i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnand_mm_nxv4i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmnand.mm v0, v0, v8 ; CHECK-NEXT: ret entry: @@ -71,7 +71,7 @@ define @intrinsic_vmnand_mm_nxv8i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnand_mm_nxv8i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmnand.mm v0, v0, v8 ; CHECK-NEXT: ret entry: @@ -91,7 +91,7 @@ define @intrinsic_vmnand_mm_nxv16i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnand_mm_nxv16i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmnand.mm v0, v0, v8 ; CHECK-NEXT: ret entry: @@ -111,7 +111,7 @@ define @intrinsic_vmnand_mm_nxv32i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnand_mm_nxv32i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmnand.mm v0, v0, v8 ; CHECK-NEXT: ret entry: @@ -131,7 +131,7 @@ define @intrinsic_vmnand_mm_nxv64i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnand_mm_nxv64i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmnand.mm v0, v0, v8 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmnor.ll b/llvm/test/CodeGen/RISCV/rvv/vmnor.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmnor.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmnor.ll @@ -11,7 +11,7 @@ define @intrinsic_vmnor_mm_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnor_mm_nxv1i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmnor.mm v0, v0, v8 ; CHECK-NEXT: ret entry: @@ -31,7 +31,7 @@ define @intrinsic_vmnor_mm_nxv2i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnor_mm_nxv2i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmnor.mm v0, v0, v8 ; CHECK-NEXT: ret entry: @@ -51,7 +51,7 @@ define @intrinsic_vmnor_mm_nxv4i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnor_mm_nxv4i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmnor.mm v0, v0, v8 ; CHECK-NEXT: ret entry: @@ -71,7 +71,7 @@ define @intrinsic_vmnor_mm_nxv8i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnor_mm_nxv8i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmnor.mm v0, v0, v8 ; CHECK-NEXT: ret entry: @@ -91,7 +91,7 @@ define @intrinsic_vmnor_mm_nxv16i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnor_mm_nxv16i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmnor.mm v0, v0, v8 ; CHECK-NEXT: ret entry: @@ -111,7 +111,7 @@ define @intrinsic_vmnor_mm_nxv32i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnor_mm_nxv32i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmnor.mm v0, v0, v8 ; CHECK-NEXT: ret entry: @@ -131,7 +131,7 @@ define @intrinsic_vmnor_mm_nxv64i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmnor_mm_nxv64i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmnor.mm v0, v0, v8 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmor.ll b/llvm/test/CodeGen/RISCV/rvv/vmor.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmor.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmor.ll @@ -11,7 +11,7 @@ define @intrinsic_vmor_mm_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmor_mm_nxv1i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmor.mm v0, v0, v8 ; CHECK-NEXT: ret entry: @@ -31,7 +31,7 @@ define @intrinsic_vmor_mm_nxv2i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmor_mm_nxv2i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmor.mm v0, v0, v8 ; CHECK-NEXT: ret entry: @@ -51,7 +51,7 @@ define @intrinsic_vmor_mm_nxv4i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmor_mm_nxv4i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmor.mm v0, v0, v8 ; CHECK-NEXT: ret entry: @@ -71,7 +71,7 @@ define @intrinsic_vmor_mm_nxv8i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmor_mm_nxv8i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmor.mm v0, v0, v8 ; CHECK-NEXT: ret entry: @@ -91,7 +91,7 @@ define @intrinsic_vmor_mm_nxv16i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmor_mm_nxv16i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmor.mm v0, v0, v8 ; CHECK-NEXT: ret entry: @@ -111,7 +111,7 @@ define @intrinsic_vmor_mm_nxv32i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmor_mm_nxv32i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmor.mm v0, v0, v8 ; CHECK-NEXT: ret entry: @@ -131,7 +131,7 @@ define @intrinsic_vmor_mm_nxv64i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmor_mm_nxv64i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmor.mm v0, v0, v8 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmorn.ll b/llvm/test/CodeGen/RISCV/rvv/vmorn.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmorn.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmorn.ll @@ -11,7 +11,7 @@ define @intrinsic_vmorn_mm_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmorn_mm_nxv1i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmorn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: @@ -31,7 +31,7 @@ define @intrinsic_vmorn_mm_nxv2i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmorn_mm_nxv2i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmorn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: @@ -51,7 +51,7 @@ define @intrinsic_vmorn_mm_nxv4i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmorn_mm_nxv4i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmorn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: @@ -71,7 +71,7 @@ define @intrinsic_vmorn_mm_nxv8i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmorn_mm_nxv8i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmorn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: @@ -91,7 +91,7 @@ define @intrinsic_vmorn_mm_nxv16i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmorn_mm_nxv16i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmorn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: @@ -111,7 +111,7 @@ define @intrinsic_vmorn_mm_nxv32i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmorn_mm_nxv32i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmorn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: @@ -131,7 +131,7 @@ define @intrinsic_vmorn_mm_nxv64i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmorn_mm_nxv64i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmorn.mm v0, v0, v8 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv32.ll @@ -9,7 +9,7 @@ define @intrinsic_vmsbc_vv_nxv1i1_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmsbc.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -29,7 +29,7 @@ define @intrinsic_vmsbc_vv_nxv2i1_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmsbc.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -49,7 +49,7 @@ define @intrinsic_vmsbc_vv_nxv4i1_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmsbc.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -69,7 +69,7 @@ define @intrinsic_vmsbc_vv_nxv8i1_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmsbc.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -89,7 +89,7 @@ define @intrinsic_vmsbc_vv_nxv16i1_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmsbc.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -109,7 +109,7 @@ define @intrinsic_vmsbc_vv_nxv32i1_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv32i1_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmsbc.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -129,7 +129,7 @@ define @intrinsic_vmsbc_vv_nxv64i1_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv64i1_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmsbc.vv v0, v8, v16 ; CHECK-NEXT: ret entry: @@ -149,7 +149,7 @@ define @intrinsic_vmsbc_vv_nxv1i1_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmsbc.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -169,7 +169,7 @@ define @intrinsic_vmsbc_vv_nxv2i1_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmsbc.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -189,7 +189,7 @@ define @intrinsic_vmsbc_vv_nxv4i1_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmsbc.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -209,7 +209,7 @@ define @intrinsic_vmsbc_vv_nxv8i1_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmsbc.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -229,7 +229,7 @@ define @intrinsic_vmsbc_vv_nxv16i1_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmsbc.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -249,7 +249,7 @@ define @intrinsic_vmsbc_vv_nxv32i1_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv32i1_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vmsbc.vv v0, v8, v16 ; CHECK-NEXT: ret entry: @@ -269,7 +269,7 @@ define @intrinsic_vmsbc_vv_nxv1i1_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmsbc.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -289,7 +289,7 @@ define @intrinsic_vmsbc_vv_nxv2i1_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmsbc.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -309,7 +309,7 @@ define @intrinsic_vmsbc_vv_nxv4i1_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmsbc.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -329,7 +329,7 @@ define @intrinsic_vmsbc_vv_nxv8i1_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmsbc.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -349,7 +349,7 @@ define @intrinsic_vmsbc_vv_nxv16i1_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmsbc.vv v0, v8, v16 ; CHECK-NEXT: ret entry: @@ -369,7 +369,7 @@ define @intrinsic_vmsbc_vv_nxv1i1_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmsbc.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -389,7 +389,7 @@ define @intrinsic_vmsbc_vv_nxv2i1_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmsbc.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -409,7 +409,7 @@ define @intrinsic_vmsbc_vv_nxv4i1_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmsbc.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -429,7 +429,7 @@ define @intrinsic_vmsbc_vv_nxv8i1_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmsbc.vv v0, v8, v16 ; CHECK-NEXT: ret entry: @@ -449,7 +449,7 @@ define @intrinsic_vmsbc_vx_nxv1i1_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmsbc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -469,7 +469,7 @@ define @intrinsic_vmsbc_vx_nxv2i1_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmsbc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -489,7 +489,7 @@ define @intrinsic_vmsbc_vx_nxv4i1_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmsbc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -509,7 +509,7 @@ define @intrinsic_vmsbc_vx_nxv8i1_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmsbc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -529,7 +529,7 @@ define @intrinsic_vmsbc_vx_nxv16i1_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmsbc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -549,7 +549,7 @@ define @intrinsic_vmsbc_vx_nxv32i1_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv32i1_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmsbc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -569,7 +569,7 @@ define @intrinsic_vmsbc_vx_nxv64i1_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv64i1_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmsbc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -589,7 +589,7 @@ define @intrinsic_vmsbc_vx_nxv1i1_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vmsbc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -609,7 +609,7 @@ define @intrinsic_vmsbc_vx_nxv2i1_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmsbc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -629,7 +629,7 @@ define @intrinsic_vmsbc_vx_nxv4i1_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmsbc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -649,7 +649,7 @@ define @intrinsic_vmsbc_vx_nxv8i1_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmsbc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -669,7 +669,7 @@ define @intrinsic_vmsbc_vx_nxv16i1_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vmsbc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -689,7 +689,7 @@ define @intrinsic_vmsbc_vx_nxv32i1_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv32i1_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vmsbc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -709,7 +709,7 @@ define @intrinsic_vmsbc_vx_nxv1i1_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmsbc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -729,7 +729,7 @@ define @intrinsic_vmsbc_vx_nxv2i1_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmsbc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -749,7 +749,7 @@ define @intrinsic_vmsbc_vx_nxv4i1_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmsbc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -769,7 +769,7 @@ define @intrinsic_vmsbc_vx_nxv8i1_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmsbc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -789,7 +789,7 @@ define @intrinsic_vmsbc_vx_nxv16i1_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vmsbc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -813,7 +813,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vmsbc.vv v0, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 @@ -839,7 +839,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vmsbc.vv v0, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 @@ -865,7 +865,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmsbc.vv v0, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 @@ -891,7 +891,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmsbc.vv v0, v8, v16 ; CHECK-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv64.ll @@ -9,7 +9,7 @@ define @intrinsic_vmsbc_vv_nxv1i1_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmsbc.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -29,7 +29,7 @@ define @intrinsic_vmsbc_vv_nxv2i1_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmsbc.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -49,7 +49,7 @@ define @intrinsic_vmsbc_vv_nxv4i1_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmsbc.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -69,7 +69,7 @@ define @intrinsic_vmsbc_vv_nxv8i1_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmsbc.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -89,7 +89,7 @@ define @intrinsic_vmsbc_vv_nxv16i1_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmsbc.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -109,7 +109,7 @@ define @intrinsic_vmsbc_vv_nxv32i1_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv32i1_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmsbc.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -129,7 +129,7 @@ define @intrinsic_vmsbc_vv_nxv64i1_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv64i1_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmsbc.vv v0, v8, v16 ; CHECK-NEXT: ret entry: @@ -149,7 +149,7 @@ define @intrinsic_vmsbc_vv_nxv1i1_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmsbc.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -169,7 +169,7 @@ define @intrinsic_vmsbc_vv_nxv2i1_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmsbc.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -189,7 +189,7 @@ define @intrinsic_vmsbc_vv_nxv4i1_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmsbc.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -209,7 +209,7 @@ define @intrinsic_vmsbc_vv_nxv8i1_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmsbc.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -229,7 +229,7 @@ define @intrinsic_vmsbc_vv_nxv16i1_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmsbc.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -249,7 +249,7 @@ define @intrinsic_vmsbc_vv_nxv32i1_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv32i1_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vmsbc.vv v0, v8, v16 ; CHECK-NEXT: ret entry: @@ -269,7 +269,7 @@ define @intrinsic_vmsbc_vv_nxv1i1_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmsbc.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -289,7 +289,7 @@ define @intrinsic_vmsbc_vv_nxv2i1_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmsbc.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -309,7 +309,7 @@ define @intrinsic_vmsbc_vv_nxv4i1_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmsbc.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -329,7 +329,7 @@ define @intrinsic_vmsbc_vv_nxv8i1_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmsbc.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -349,7 +349,7 @@ define @intrinsic_vmsbc_vv_nxv16i1_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmsbc.vv v0, v8, v16 ; CHECK-NEXT: ret entry: @@ -369,7 +369,7 @@ define @intrinsic_vmsbc_vv_nxv1i1_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmsbc.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -389,7 +389,7 @@ define @intrinsic_vmsbc_vv_nxv2i1_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmsbc.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -409,7 +409,7 @@ define @intrinsic_vmsbc_vv_nxv4i1_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmsbc.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -429,7 +429,7 @@ define @intrinsic_vmsbc_vv_nxv8i1_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmsbc.vv v0, v8, v16 ; CHECK-NEXT: ret entry: @@ -449,7 +449,7 @@ define @intrinsic_vmsbc_vx_nxv1i1_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmsbc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -469,7 +469,7 @@ define @intrinsic_vmsbc_vx_nxv2i1_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmsbc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -489,7 +489,7 @@ define @intrinsic_vmsbc_vx_nxv4i1_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmsbc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -509,7 +509,7 @@ define @intrinsic_vmsbc_vx_nxv8i1_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmsbc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -529,7 +529,7 @@ define @intrinsic_vmsbc_vx_nxv16i1_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmsbc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -549,7 +549,7 @@ define @intrinsic_vmsbc_vx_nxv32i1_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv32i1_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmsbc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -569,7 +569,7 @@ define @intrinsic_vmsbc_vx_nxv64i1_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv64i1_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmsbc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -589,7 +589,7 @@ define @intrinsic_vmsbc_vx_nxv1i1_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vmsbc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -609,7 +609,7 @@ define @intrinsic_vmsbc_vx_nxv2i1_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmsbc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -629,7 +629,7 @@ define @intrinsic_vmsbc_vx_nxv4i1_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmsbc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -649,7 +649,7 @@ define @intrinsic_vmsbc_vx_nxv8i1_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmsbc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -669,7 +669,7 @@ define @intrinsic_vmsbc_vx_nxv16i1_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vmsbc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -689,7 +689,7 @@ define @intrinsic_vmsbc_vx_nxv32i1_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv32i1_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vmsbc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -709,7 +709,7 @@ define @intrinsic_vmsbc_vx_nxv1i1_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmsbc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -729,7 +729,7 @@ define @intrinsic_vmsbc_vx_nxv2i1_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmsbc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -749,7 +749,7 @@ define @intrinsic_vmsbc_vx_nxv4i1_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmsbc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -769,7 +769,7 @@ define @intrinsic_vmsbc_vx_nxv8i1_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmsbc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -789,7 +789,7 @@ define @intrinsic_vmsbc_vx_nxv16i1_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vmsbc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -809,7 +809,7 @@ define @intrinsic_vmsbc_vx_nxv1i1_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vmsbc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -829,7 +829,7 @@ define @intrinsic_vmsbc_vx_nxv2i1_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vmsbc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -849,7 +849,7 @@ define @intrinsic_vmsbc_vx_nxv4i1_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vmsbc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -869,7 +869,7 @@ define @intrinsic_vmsbc_vx_nxv8i1_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vmsbc.vx v0, v8, a0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i8_nxv1i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmsbc.vvm v10, v8, v9, v0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -33,7 +33,7 @@ define @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i8_nxv2i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmsbc.vvm v10, v8, v9, v0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -56,7 +56,7 @@ define @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i8_nxv4i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmsbc.vvm v10, v8, v9, v0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -79,7 +79,7 @@ define @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmsbc.vvm v10, v8, v9, v0 ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -102,7 +102,7 @@ define @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i8_nxv16i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmsbc.vvm v12, v8, v10, v0 ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: ret @@ -125,7 +125,7 @@ define @intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i8_nxv32i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmsbc.vvm v16, v8, v12, v0 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: ret @@ -148,7 +148,7 @@ define @intrinsic_vmsbc.borrow.in_vvm_nxv64i1_nxv64i8_nxv64i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv64i1_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmsbc.vvm v24, v8, v16, v0 ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: ret @@ -171,7 +171,7 @@ define @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i16_nxv1i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmsbc.vvm v10, v8, v9, v0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -194,7 +194,7 @@ define @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i16_nxv2i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmsbc.vvm v10, v8, v9, v0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -217,7 +217,7 @@ define @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmsbc.vvm v10, v8, v9, v0 ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -240,7 +240,7 @@ define @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i16_nxv8i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmsbc.vvm v12, v8, v10, v0 ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: ret @@ -263,7 +263,7 @@ define @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i16_nxv16i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmsbc.vvm v16, v8, v12, v0 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: ret @@ -286,7 +286,7 @@ define @intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i16_nxv32i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vmsbc.vvm v24, v8, v16, v0 ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: ret @@ -309,7 +309,7 @@ define @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i32_nxv1i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmsbc.vvm v10, v8, v9, v0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -332,7 +332,7 @@ define @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmsbc.vvm v10, v8, v9, v0 ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -355,7 +355,7 @@ define @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i32_nxv4i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmsbc.vvm v12, v8, v10, v0 ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: ret @@ -378,7 +378,7 @@ define @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i32_nxv8i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmsbc.vvm v16, v8, v12, v0 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: ret @@ -401,7 +401,7 @@ define @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i32_nxv16i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmsbc.vvm v24, v8, v16, v0 ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: ret @@ -424,7 +424,7 @@ define @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i64_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmsbc.vvm v10, v8, v9, v0 ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -447,7 +447,7 @@ define @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i64_nxv2i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmsbc.vvm v12, v8, v10, v0 ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: ret @@ -470,7 +470,7 @@ define @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i64_nxv4i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmsbc.vvm v16, v8, v12, v0 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: ret @@ -493,7 +493,7 @@ define @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i64_nxv8i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmsbc.vvm v24, v8, v16, v0 ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: ret @@ -516,7 +516,7 @@ define @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i8_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmsbc.vxm v9, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: ret @@ -539,7 +539,7 @@ define @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i8_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmsbc.vxm v9, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: ret @@ -562,7 +562,7 @@ define @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i8_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmsbc.vxm v9, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: ret @@ -585,7 +585,7 @@ define @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i8_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmsbc.vxm v9, v8, a0, v0 ; CHECK-NEXT: vmv.v.v v0, v9 ; CHECK-NEXT: ret @@ -608,7 +608,7 @@ define @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i8_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmsbc.vxm v10, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -631,7 +631,7 @@ define @intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i8_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmsbc.vxm v12, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: ret @@ -654,7 +654,7 @@ define @intrinsic_vmsbc.borrow.in_vxm_nxv64i1_nxv64i8_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv64i1_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmsbc.vxm v16, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: ret @@ -677,7 +677,7 @@ define @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i16_i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vmsbc.vxm v9, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: ret @@ -700,7 +700,7 @@ define @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i16_i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmsbc.vxm v9, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: ret @@ -723,7 +723,7 @@ define @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i16_i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmsbc.vxm v9, v8, a0, v0 ; CHECK-NEXT: vmv.v.v v0, v9 ; CHECK-NEXT: ret @@ -746,7 +746,7 @@ define @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i16_i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmsbc.vxm v10, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -769,7 +769,7 @@ define @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i16_i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vmsbc.vxm v12, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: ret @@ -792,7 +792,7 @@ define @intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i16_i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vmsbc.vxm v16, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: ret @@ -815,7 +815,7 @@ define @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i32_i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmsbc.vxm v9, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: ret @@ -838,7 +838,7 @@ define @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i32_i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmsbc.vxm v9, v8, a0, v0 ; CHECK-NEXT: vmv.v.v v0, v9 ; CHECK-NEXT: ret @@ -861,7 +861,7 @@ define @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i32_i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmsbc.vxm v10, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -884,7 +884,7 @@ define @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i32_i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmsbc.vxm v12, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: ret @@ -907,7 +907,7 @@ define @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i32_i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vmsbc.vxm v16, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: ret @@ -934,7 +934,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vmsbc.vvm v9, v8, v10, v0 ; CHECK-NEXT: vmv.v.v v0, v9 @@ -963,7 +963,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmsbc.vvm v10, v8, v12, v0 ; CHECK-NEXT: vmv1r.v v0, v10 @@ -992,7 +992,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vmsbc.vvm v12, v8, v16, v0 ; CHECK-NEXT: vmv1r.v v0, v12 @@ -1021,7 +1021,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vlse64.v v24, (a0), zero ; CHECK-NEXT: vmsbc.vvm v16, v8, v24, v0 ; CHECK-NEXT: vmv1r.v v0, v16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i8_nxv1i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmsbc.vvm v10, v8, v9, v0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -33,7 +33,7 @@ define @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i8_nxv2i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmsbc.vvm v10, v8, v9, v0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -56,7 +56,7 @@ define @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i8_nxv4i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmsbc.vvm v10, v8, v9, v0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -79,7 +79,7 @@ define @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmsbc.vvm v10, v8, v9, v0 ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -102,7 +102,7 @@ define @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i8_nxv16i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmsbc.vvm v12, v8, v10, v0 ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: ret @@ -125,7 +125,7 @@ define @intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i8_nxv32i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmsbc.vvm v16, v8, v12, v0 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: ret @@ -148,7 +148,7 @@ define @intrinsic_vmsbc.borrow.in_vvm_nxv64i1_nxv64i8_nxv64i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv64i1_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmsbc.vvm v24, v8, v16, v0 ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: ret @@ -171,7 +171,7 @@ define @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i16_nxv1i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmsbc.vvm v10, v8, v9, v0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -194,7 +194,7 @@ define @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i16_nxv2i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmsbc.vvm v10, v8, v9, v0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -217,7 +217,7 @@ define @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmsbc.vvm v10, v8, v9, v0 ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -240,7 +240,7 @@ define @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i16_nxv8i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmsbc.vvm v12, v8, v10, v0 ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: ret @@ -263,7 +263,7 @@ define @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i16_nxv16i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmsbc.vvm v16, v8, v12, v0 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: ret @@ -286,7 +286,7 @@ define @intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i16_nxv32i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vmsbc.vvm v24, v8, v16, v0 ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: ret @@ -309,7 +309,7 @@ define @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i32_nxv1i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmsbc.vvm v10, v8, v9, v0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -332,7 +332,7 @@ define @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmsbc.vvm v10, v8, v9, v0 ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -355,7 +355,7 @@ define @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i32_nxv4i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmsbc.vvm v12, v8, v10, v0 ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: ret @@ -378,7 +378,7 @@ define @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i32_nxv8i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmsbc.vvm v16, v8, v12, v0 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: ret @@ -401,7 +401,7 @@ define @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i32_nxv16i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmsbc.vvm v24, v8, v16, v0 ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: ret @@ -424,7 +424,7 @@ define @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmsbc.vvm v10, v8, v9, v0 ; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret @@ -447,7 +447,7 @@ define @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i64_nxv2i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmsbc.vvm v12, v8, v10, v0 ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: ret @@ -470,7 +470,7 @@ define @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i64_nxv4i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmsbc.vvm v16, v8, v12, v0 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: ret @@ -493,7 +493,7 @@ define @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i64_nxv8i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmsbc.vvm v24, v8, v16, v0 ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: ret @@ -516,7 +516,7 @@ define @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i8_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmsbc.vxm v9, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: ret @@ -539,7 +539,7 @@ define @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i8_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmsbc.vxm v9, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: ret @@ -562,7 +562,7 @@ define @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i8_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmsbc.vxm v9, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: ret @@ -585,7 +585,7 @@ define @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i8_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmsbc.vxm v9, v8, a0, v0 ; CHECK-NEXT: vmv.v.v v0, v9 ; CHECK-NEXT: ret @@ -608,7 +608,7 @@ define @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i8_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmsbc.vxm v10, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -631,7 +631,7 @@ define @intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i8_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmsbc.vxm v12, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: ret @@ -654,7 +654,7 @@ define @intrinsic_vmsbc.borrow.in_vxm_nxv64i1_nxv64i8_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv64i1_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmsbc.vxm v16, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: ret @@ -677,7 +677,7 @@ define @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i16_i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vmsbc.vxm v9, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: ret @@ -700,7 +700,7 @@ define @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i16_i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmsbc.vxm v9, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: ret @@ -723,7 +723,7 @@ define @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i16_i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmsbc.vxm v9, v8, a0, v0 ; CHECK-NEXT: vmv.v.v v0, v9 ; CHECK-NEXT: ret @@ -746,7 +746,7 @@ define @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i16_i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmsbc.vxm v10, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -769,7 +769,7 @@ define @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i16_i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vmsbc.vxm v12, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: ret @@ -792,7 +792,7 @@ define @intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i16_i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vmsbc.vxm v16, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: ret @@ -815,7 +815,7 @@ define @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i32_i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmsbc.vxm v9, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: ret @@ -838,7 +838,7 @@ define @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i32_i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmsbc.vxm v9, v8, a0, v0 ; CHECK-NEXT: vmv.v.v v0, v9 ; CHECK-NEXT: ret @@ -861,7 +861,7 @@ define @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i32_i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmsbc.vxm v10, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -884,7 +884,7 @@ define @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i32_i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmsbc.vxm v12, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: ret @@ -907,7 +907,7 @@ define @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i32_i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vmsbc.vxm v16, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: ret @@ -930,7 +930,7 @@ define @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i64_i64( %0, i64 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vmsbc.vxm v9, v8, a0, v0 ; CHECK-NEXT: vmv.v.v v0, v9 ; CHECK-NEXT: ret @@ -953,7 +953,7 @@ define @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i64_i64( %0, i64 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vmsbc.vxm v10, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: ret @@ -976,7 +976,7 @@ define @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i64_i64( %0, i64 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vmsbc.vxm v12, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: ret @@ -999,7 +999,7 @@ define @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i64_i64( %0, i64 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vmsbc.vxm v16, v8, a0, v0 ; CHECK-NEXT: vmv1r.v v0, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbf.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbf.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsbf.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsbf.ll @@ -10,7 +10,7 @@ define @intrinsic_vmsbf_m_nxv1i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsbf_m_nxv1i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmsbf.m v8, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: ret @@ -52,7 +52,7 @@ define @intrinsic_vmsbf_m_nxv2i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsbf_m_nxv2i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmsbf.m v8, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: ret @@ -94,7 +94,7 @@ define @intrinsic_vmsbf_m_nxv4i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsbf_m_nxv4i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmsbf.m v8, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: ret @@ -136,7 +136,7 @@ define @intrinsic_vmsbf_m_nxv8i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsbf_m_nxv8i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmsbf.m v8, v0 ; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: ret @@ -178,7 +178,7 @@ define @intrinsic_vmsbf_m_nxv16i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsbf_m_nxv16i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmsbf.m v8, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: ret @@ -220,7 +220,7 @@ define @intrinsic_vmsbf_m_nxv32i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsbf_m_nxv32i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmsbf.m v8, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: ret @@ -262,7 +262,7 @@ define @intrinsic_vmsbf_m_nxv64i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsbf_m_nxv64i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmsbf.m v8, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll @@ -9,7 +9,7 @@ define @intrinsic_vmseq_vv_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmseq.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -61,7 +61,7 @@ define @intrinsic_vmseq_vv_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmseq.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -113,7 +113,7 @@ define @intrinsic_vmseq_vv_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmseq.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -165,7 +165,7 @@ define @intrinsic_vmseq_vv_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmseq.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define @intrinsic_vmseq_vv_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmseq.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -269,7 +269,7 @@ define @intrinsic_vmseq_vv_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmseq.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -321,7 +321,7 @@ define @intrinsic_vmseq_vv_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmseq.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -373,7 +373,7 @@ define @intrinsic_vmseq_vv_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmseq.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -425,7 +425,7 @@ define @intrinsic_vmseq_vv_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmseq.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -477,7 +477,7 @@ define @intrinsic_vmseq_vv_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmseq.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -529,7 +529,7 @@ define @intrinsic_vmseq_vv_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmseq.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -581,7 +581,7 @@ define @intrinsic_vmseq_vv_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmseq.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -633,7 +633,7 @@ define @intrinsic_vmseq_vv_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmseq.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -685,7 +685,7 @@ define @intrinsic_vmseq_vv_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmseq.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -737,7 +737,7 @@ define @intrinsic_vmseq_vv_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmseq.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -789,7 +789,7 @@ define @intrinsic_vmseq_vv_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmseq.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -841,7 +841,7 @@ define @intrinsic_vmseq_vv_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmseq.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -893,7 +893,7 @@ define @intrinsic_vmseq_vv_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmseq.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -945,7 +945,7 @@ define @intrinsic_vmseq_vx_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmseq.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -992,7 +992,7 @@ define @intrinsic_vmseq_vx_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmseq.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1039,7 +1039,7 @@ define @intrinsic_vmseq_vx_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmseq.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1086,7 +1086,7 @@ define @intrinsic_vmseq_vx_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmseq.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1133,7 +1133,7 @@ define @intrinsic_vmseq_vx_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmseq.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1180,7 +1180,7 @@ define @intrinsic_vmseq_vx_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmseq.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1227,7 +1227,7 @@ define @intrinsic_vmseq_vx_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vmseq.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1274,7 +1274,7 @@ define @intrinsic_vmseq_vx_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmseq.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1321,7 +1321,7 @@ define @intrinsic_vmseq_vx_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmseq.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1368,7 +1368,7 @@ define @intrinsic_vmseq_vx_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmseq.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1415,7 +1415,7 @@ define @intrinsic_vmseq_vx_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vmseq.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1462,7 +1462,7 @@ define @intrinsic_vmseq_vx_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmseq.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1509,7 +1509,7 @@ define @intrinsic_vmseq_vx_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmseq.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1556,7 +1556,7 @@ define @intrinsic_vmseq_vx_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmseq.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1603,7 +1603,7 @@ define @intrinsic_vmseq_vx_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmseq.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1654,7 +1654,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vmseq.vv v0, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 @@ -1713,7 +1713,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vmseq.vv v0, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 @@ -1772,7 +1772,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmseq.vv v0, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 @@ -1822,7 +1822,7 @@ define @intrinsic_vmseq_vi_nxv1i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1857,7 +1857,7 @@ define @intrinsic_vmseq_vi_nxv2i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1892,7 +1892,7 @@ define @intrinsic_vmseq_vi_nxv4i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1927,7 +1927,7 @@ define @intrinsic_vmseq_vi_nxv8i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1962,7 +1962,7 @@ define @intrinsic_vmseq_vi_nxv16i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1997,7 +1997,7 @@ define @intrinsic_vmseq_vi_nxv32i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2032,7 +2032,7 @@ define @intrinsic_vmseq_vi_nxv1i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2067,7 +2067,7 @@ define @intrinsic_vmseq_vi_nxv2i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2102,7 +2102,7 @@ define @intrinsic_vmseq_vi_nxv4i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2137,7 +2137,7 @@ define @intrinsic_vmseq_vi_nxv8i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2172,7 +2172,7 @@ define @intrinsic_vmseq_vi_nxv16i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2207,7 +2207,7 @@ define @intrinsic_vmseq_vi_nxv1i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2242,7 +2242,7 @@ define @intrinsic_vmseq_vi_nxv2i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2277,7 +2277,7 @@ define @intrinsic_vmseq_vi_nxv4i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2312,7 +2312,7 @@ define @intrinsic_vmseq_vi_nxv8i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2347,7 +2347,7 @@ define @intrinsic_vmseq_vi_nxv1i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2382,7 +2382,7 @@ define @intrinsic_vmseq_vi_nxv2i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2417,7 +2417,7 @@ define @intrinsic_vmseq_vi_nxv4i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 9 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmseq-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmseq-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmseq-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmseq-rv64.ll @@ -9,7 +9,7 @@ define @intrinsic_vmseq_vv_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmseq.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -61,7 +61,7 @@ define @intrinsic_vmseq_vv_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmseq.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -113,7 +113,7 @@ define @intrinsic_vmseq_vv_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmseq.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -165,7 +165,7 @@ define @intrinsic_vmseq_vv_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmseq.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define @intrinsic_vmseq_vv_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmseq.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -269,7 +269,7 @@ define @intrinsic_vmseq_vv_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmseq.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -321,7 +321,7 @@ define @intrinsic_vmseq_vv_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmseq.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -373,7 +373,7 @@ define @intrinsic_vmseq_vv_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmseq.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -425,7 +425,7 @@ define @intrinsic_vmseq_vv_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmseq.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -477,7 +477,7 @@ define @intrinsic_vmseq_vv_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmseq.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -529,7 +529,7 @@ define @intrinsic_vmseq_vv_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmseq.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -581,7 +581,7 @@ define @intrinsic_vmseq_vv_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmseq.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -633,7 +633,7 @@ define @intrinsic_vmseq_vv_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmseq.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -685,7 +685,7 @@ define @intrinsic_vmseq_vv_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmseq.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -737,7 +737,7 @@ define @intrinsic_vmseq_vv_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmseq.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -789,7 +789,7 @@ define @intrinsic_vmseq_vv_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmseq.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -841,7 +841,7 @@ define @intrinsic_vmseq_vv_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmseq.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -893,7 +893,7 @@ define @intrinsic_vmseq_vv_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmseq.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -945,7 +945,7 @@ define @intrinsic_vmseq_vx_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmseq.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -992,7 +992,7 @@ define @intrinsic_vmseq_vx_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmseq.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1039,7 +1039,7 @@ define @intrinsic_vmseq_vx_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmseq.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1086,7 +1086,7 @@ define @intrinsic_vmseq_vx_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmseq.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1133,7 +1133,7 @@ define @intrinsic_vmseq_vx_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmseq.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1180,7 +1180,7 @@ define @intrinsic_vmseq_vx_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmseq.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1227,7 +1227,7 @@ define @intrinsic_vmseq_vx_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vmseq.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1274,7 +1274,7 @@ define @intrinsic_vmseq_vx_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmseq.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1321,7 +1321,7 @@ define @intrinsic_vmseq_vx_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmseq.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1368,7 +1368,7 @@ define @intrinsic_vmseq_vx_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmseq.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1415,7 +1415,7 @@ define @intrinsic_vmseq_vx_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vmseq.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1462,7 +1462,7 @@ define @intrinsic_vmseq_vx_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmseq.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1509,7 +1509,7 @@ define @intrinsic_vmseq_vx_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmseq.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1556,7 +1556,7 @@ define @intrinsic_vmseq_vx_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmseq.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1603,7 +1603,7 @@ define @intrinsic_vmseq_vx_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmseq.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1650,7 +1650,7 @@ define @intrinsic_vmseq_vx_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vmseq.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1697,7 +1697,7 @@ define @intrinsic_vmseq_vx_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vmseq.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1744,7 +1744,7 @@ define @intrinsic_vmseq_vx_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vx_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vmseq.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1786,7 +1786,7 @@ define @intrinsic_vmseq_vi_nxv1i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1821,7 +1821,7 @@ define @intrinsic_vmseq_vi_nxv2i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1856,7 +1856,7 @@ define @intrinsic_vmseq_vi_nxv4i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1891,7 +1891,7 @@ define @intrinsic_vmseq_vi_nxv8i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1926,7 +1926,7 @@ define @intrinsic_vmseq_vi_nxv16i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1961,7 +1961,7 @@ define @intrinsic_vmseq_vi_nxv32i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1996,7 +1996,7 @@ define @intrinsic_vmseq_vi_nxv1i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2031,7 +2031,7 @@ define @intrinsic_vmseq_vi_nxv2i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2066,7 +2066,7 @@ define @intrinsic_vmseq_vi_nxv4i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2101,7 +2101,7 @@ define @intrinsic_vmseq_vi_nxv8i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2136,7 +2136,7 @@ define @intrinsic_vmseq_vi_nxv16i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2171,7 +2171,7 @@ define @intrinsic_vmseq_vi_nxv1i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2206,7 +2206,7 @@ define @intrinsic_vmseq_vi_nxv2i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2241,7 +2241,7 @@ define @intrinsic_vmseq_vi_nxv4i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2276,7 +2276,7 @@ define @intrinsic_vmseq_vi_nxv8i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2311,7 +2311,7 @@ define @intrinsic_vmseq_vi_nxv1i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2346,7 +2346,7 @@ define @intrinsic_vmseq_vi_nxv2i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2381,7 +2381,7 @@ define @intrinsic_vmseq_vi_nxv4i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmseq_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmseq.vi v0, v8, 9 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmset.ll b/llvm/test/CodeGen/RISCV/rvv/vmset.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmset.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmset.ll @@ -9,7 +9,7 @@ define @intrinsic_vmset_m_pseudo_nxv1i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv1i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: ret entry: @@ -25,7 +25,7 @@ define @intrinsic_vmset_m_pseudo_nxv2i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv2i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: ret entry: @@ -41,7 +41,7 @@ define @intrinsic_vmset_m_pseudo_nxv4i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv4i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vmset_m_pseudo_nxv8i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv8i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: ret entry: @@ -73,7 +73,7 @@ define @intrinsic_vmset_m_pseudo_nxv16i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv16i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: ret entry: @@ -89,7 +89,7 @@ define @intrinsic_vmset_m_pseudo_nxv32i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv32i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: ret entry: @@ -105,7 +105,7 @@ define @intrinsic_vmset_m_pseudo_nxv64i1(iXLen %0) nounwind { ; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv64i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll @@ -9,7 +9,7 @@ define @intrinsic_vmsge_vv_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmsle.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -61,7 +61,7 @@ define @intrinsic_vmsge_vv_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmsle.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -113,7 +113,7 @@ define @intrinsic_vmsge_vv_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmsle.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -165,7 +165,7 @@ define @intrinsic_vmsge_vv_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmsle.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define @intrinsic_vmsge_vv_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmsle.vv v0, v10, v8 ; CHECK-NEXT: ret entry: @@ -269,7 +269,7 @@ define @intrinsic_vmsge_vv_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmsle.vv v0, v12, v8 ; CHECK-NEXT: ret entry: @@ -321,7 +321,7 @@ define @intrinsic_vmsge_vv_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmsle.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -373,7 +373,7 @@ define @intrinsic_vmsge_vv_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmsle.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -425,7 +425,7 @@ define @intrinsic_vmsge_vv_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmsle.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -477,7 +477,7 @@ define @intrinsic_vmsge_vv_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmsle.vv v0, v10, v8 ; CHECK-NEXT: ret entry: @@ -529,7 +529,7 @@ define @intrinsic_vmsge_vv_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmsle.vv v0, v12, v8 ; CHECK-NEXT: ret entry: @@ -581,7 +581,7 @@ define @intrinsic_vmsge_vv_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmsle.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -633,7 +633,7 @@ define @intrinsic_vmsge_vv_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmsle.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -685,7 +685,7 @@ define @intrinsic_vmsge_vv_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmsle.vv v0, v10, v8 ; CHECK-NEXT: ret entry: @@ -737,7 +737,7 @@ define @intrinsic_vmsge_vv_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmsle.vv v0, v12, v8 ; CHECK-NEXT: ret entry: @@ -789,7 +789,7 @@ define @intrinsic_vmsge_vv_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmsle.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -841,7 +841,7 @@ define @intrinsic_vmsge_vv_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmsle.vv v0, v10, v8 ; CHECK-NEXT: ret entry: @@ -893,7 +893,7 @@ define @intrinsic_vmsge_vv_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmsle.vv v0, v12, v8 ; CHECK-NEXT: ret entry: @@ -945,7 +945,7 @@ define @intrinsic_vmsge_vx_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret @@ -993,7 +993,7 @@ define @intrinsic_vmsge_vx_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret @@ -1041,7 +1041,7 @@ define @intrinsic_vmsge_vx_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret @@ -1089,7 +1089,7 @@ define @intrinsic_vmsge_vx_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret @@ -1137,7 +1137,7 @@ define @intrinsic_vmsge_vx_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmslt.vx v10, v8, a0 ; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret @@ -1185,7 +1185,7 @@ define @intrinsic_vmsge_vx_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmslt.vx v12, v8, a0 ; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret @@ -1233,7 +1233,7 @@ define @intrinsic_vmsge_vx_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret @@ -1281,7 +1281,7 @@ define @intrinsic_vmsge_vx_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret @@ -1329,7 +1329,7 @@ define @intrinsic_vmsge_vx_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret @@ -1377,7 +1377,7 @@ define @intrinsic_vmsge_vx_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmslt.vx v10, v8, a0 ; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret @@ -1425,7 +1425,7 @@ define @intrinsic_vmsge_vx_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vmslt.vx v12, v8, a0 ; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret @@ -1473,7 +1473,7 @@ define @intrinsic_vmsge_vx_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret @@ -1521,7 +1521,7 @@ define @intrinsic_vmsge_vx_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret @@ -1569,7 +1569,7 @@ define @intrinsic_vmsge_vx_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmslt.vx v10, v8, a0 ; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret @@ -1617,7 +1617,7 @@ define @intrinsic_vmsge_vx_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmslt.vx v12, v8, a0 ; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret @@ -1669,7 +1669,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vmsle.vv v0, v9, v8 ; CHECK-NEXT: addi sp, sp, 16 @@ -1728,7 +1728,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vmsle.vv v0, v10, v8 ; CHECK-NEXT: addi sp, sp, 16 @@ -1787,7 +1787,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmsle.vv v0, v12, v8 ; CHECK-NEXT: addi sp, sp, 16 @@ -1837,7 +1837,7 @@ define @intrinsic_vmsge_vi_nxv1i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, -16 ; CHECK-NEXT: ret entry: @@ -1872,7 +1872,7 @@ define @intrinsic_vmsge_vi_nxv2i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, -14 ; CHECK-NEXT: ret entry: @@ -1907,7 +1907,7 @@ define @intrinsic_vmsge_vi_nxv4i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, -12 ; CHECK-NEXT: ret entry: @@ -1942,7 +1942,7 @@ define @intrinsic_vmsge_vi_nxv8i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, -10 ; CHECK-NEXT: ret entry: @@ -1977,7 +1977,7 @@ define @intrinsic_vmsge_vi_nxv16i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, -8 ; CHECK-NEXT: ret entry: @@ -2012,7 +2012,7 @@ define @intrinsic_vmsge_vi_nxv32i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, -6 ; CHECK-NEXT: ret entry: @@ -2047,7 +2047,7 @@ define @intrinsic_vmsge_vi_nxv1i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, -4 ; CHECK-NEXT: ret entry: @@ -2082,7 +2082,7 @@ define @intrinsic_vmsge_vi_nxv2i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, -2 ; CHECK-NEXT: ret entry: @@ -2117,7 +2117,7 @@ define @intrinsic_vmsge_vi_nxv4i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, -1 ; CHECK-NEXT: ret entry: @@ -2152,7 +2152,7 @@ define @intrinsic_vmsge_vi_nxv8i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 1 ; CHECK-NEXT: ret entry: @@ -2187,7 +2187,7 @@ define @intrinsic_vmsge_vi_nxv16i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 3 ; CHECK-NEXT: ret entry: @@ -2222,7 +2222,7 @@ define @intrinsic_vmsge_vi_nxv1i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 5 ; CHECK-NEXT: ret entry: @@ -2257,7 +2257,7 @@ define @intrinsic_vmsge_vi_nxv2i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 7 ; CHECK-NEXT: ret entry: @@ -2292,7 +2292,7 @@ define @intrinsic_vmsge_vi_nxv4i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2327,7 +2327,7 @@ define @intrinsic_vmsge_vi_nxv8i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 11 ; CHECK-NEXT: ret entry: @@ -2362,7 +2362,7 @@ define @intrinsic_vmsge_vi_nxv1i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 8 ; CHECK-NEXT: ret entry: @@ -2397,7 +2397,7 @@ define @intrinsic_vmsge_vi_nxv2i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 8 ; CHECK-NEXT: ret entry: @@ -2432,7 +2432,7 @@ define @intrinsic_vmsge_vi_nxv4i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 8 ; CHECK-NEXT: ret entry: @@ -2468,7 +2468,7 @@ define @intrinsic_vmsge_maskedoff_mask_vx_nxv1i8_i8( %0, %1, i8 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret @@ -2486,7 +2486,7 @@ define @intrinsic_vmsge_maskedoff_mask_vx_nxv2i8_i8( %0, %1, i8 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret @@ -2504,7 +2504,7 @@ define @intrinsic_vmsge_maskedoff_mask_vx_nxv4i8_i8( %0, %1, i8 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret @@ -2522,7 +2522,7 @@ define @intrinsic_vmsge_maskedoff_mask_vx_nxv8i8_i8( %0, %1, i8 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret @@ -2540,7 +2540,7 @@ define @intrinsic_vmsge_maskedoff_mask_vx_nxv16i8_i8( %0, %1, i8 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmslt.vx v10, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v10 ; CHECK-NEXT: ret @@ -2558,7 +2558,7 @@ define @intrinsic_vmsge_maskedoff_mask_vx_nxv32i8_i8( %0, %1, i8 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmslt.vx v12, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v12 ; CHECK-NEXT: ret @@ -2576,7 +2576,7 @@ define @intrinsic_vmsge_maskedoff_mask_vx_nxv1i16_i16( %0, %1, i16 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret @@ -2594,7 +2594,7 @@ define @intrinsic_vmsge_maskedoff_mask_vx_nxv2i16_i16( %0, %1, i16 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret @@ -2612,7 +2612,7 @@ define @intrinsic_vmsge_maskedoff_mask_vx_nxv4i16_i16( %0, %1, i16 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret @@ -2630,7 +2630,7 @@ define @intrinsic_vmsge_maskedoff_mask_vx_nxv8i16_i16( %0, %1, i16 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmslt.vx v10, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v10 ; CHECK-NEXT: ret @@ -2648,7 +2648,7 @@ define @intrinsic_vmsge_maskedoff_mask_vx_nxv16i16_i16( %0, %1, i16 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vmslt.vx v12, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v12 ; CHECK-NEXT: ret @@ -2666,7 +2666,7 @@ define @intrinsic_vmsge_maskedoff_mask_vx_nxv1i32_i32( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret @@ -2684,7 +2684,7 @@ define @intrinsic_vmsge_maskedoff_mask_vx_nxv2i32_i32( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret @@ -2702,7 +2702,7 @@ define @intrinsic_vmsge_maskedoff_mask_vx_nxv4i32_i32( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmslt.vx v10, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v10 ; CHECK-NEXT: ret @@ -2720,7 +2720,7 @@ define @intrinsic_vmsge_maskedoff_mask_vx_nxv8i32_i32( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmslt.vx v12, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v12 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll @@ -9,7 +9,7 @@ define @intrinsic_vmsge_vv_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmsle.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -61,7 +61,7 @@ define @intrinsic_vmsge_vv_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmsle.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -113,7 +113,7 @@ define @intrinsic_vmsge_vv_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmsle.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -165,7 +165,7 @@ define @intrinsic_vmsge_vv_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmsle.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define @intrinsic_vmsge_vv_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmsle.vv v0, v10, v8 ; CHECK-NEXT: ret entry: @@ -269,7 +269,7 @@ define @intrinsic_vmsge_vv_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmsle.vv v0, v12, v8 ; CHECK-NEXT: ret entry: @@ -321,7 +321,7 @@ define @intrinsic_vmsge_vv_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmsle.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -373,7 +373,7 @@ define @intrinsic_vmsge_vv_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmsle.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -425,7 +425,7 @@ define @intrinsic_vmsge_vv_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmsle.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -477,7 +477,7 @@ define @intrinsic_vmsge_vv_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmsle.vv v0, v10, v8 ; CHECK-NEXT: ret entry: @@ -529,7 +529,7 @@ define @intrinsic_vmsge_vv_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmsle.vv v0, v12, v8 ; CHECK-NEXT: ret entry: @@ -581,7 +581,7 @@ define @intrinsic_vmsge_vv_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmsle.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -633,7 +633,7 @@ define @intrinsic_vmsge_vv_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmsle.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -685,7 +685,7 @@ define @intrinsic_vmsge_vv_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmsle.vv v0, v10, v8 ; CHECK-NEXT: ret entry: @@ -737,7 +737,7 @@ define @intrinsic_vmsge_vv_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmsle.vv v0, v12, v8 ; CHECK-NEXT: ret entry: @@ -789,7 +789,7 @@ define @intrinsic_vmsge_vv_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmsle.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -841,7 +841,7 @@ define @intrinsic_vmsge_vv_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmsle.vv v0, v10, v8 ; CHECK-NEXT: ret entry: @@ -893,7 +893,7 @@ define @intrinsic_vmsge_vv_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmsle.vv v0, v12, v8 ; CHECK-NEXT: ret entry: @@ -945,7 +945,7 @@ define @intrinsic_vmsge_vx_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret @@ -993,7 +993,7 @@ define @intrinsic_vmsge_vx_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret @@ -1041,7 +1041,7 @@ define @intrinsic_vmsge_vx_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret @@ -1089,7 +1089,7 @@ define @intrinsic_vmsge_vx_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret @@ -1137,7 +1137,7 @@ define @intrinsic_vmsge_vx_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmslt.vx v10, v8, a0 ; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret @@ -1185,7 +1185,7 @@ define @intrinsic_vmsge_vx_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmslt.vx v12, v8, a0 ; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret @@ -1233,7 +1233,7 @@ define @intrinsic_vmsge_vx_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret @@ -1281,7 +1281,7 @@ define @intrinsic_vmsge_vx_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret @@ -1329,7 +1329,7 @@ define @intrinsic_vmsge_vx_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret @@ -1377,7 +1377,7 @@ define @intrinsic_vmsge_vx_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmslt.vx v10, v8, a0 ; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret @@ -1425,7 +1425,7 @@ define @intrinsic_vmsge_vx_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vmslt.vx v12, v8, a0 ; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret @@ -1473,7 +1473,7 @@ define @intrinsic_vmsge_vx_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret @@ -1521,7 +1521,7 @@ define @intrinsic_vmsge_vx_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret @@ -1569,7 +1569,7 @@ define @intrinsic_vmsge_vx_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmslt.vx v10, v8, a0 ; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret @@ -1617,7 +1617,7 @@ define @intrinsic_vmsge_vx_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmslt.vx v12, v8, a0 ; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret @@ -1665,7 +1665,7 @@ define @intrinsic_vmsge_vx_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret @@ -1713,7 +1713,7 @@ define @intrinsic_vmsge_vx_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vmslt.vx v10, v8, a0 ; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret @@ -1761,7 +1761,7 @@ define @intrinsic_vmsge_vx_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vmslt.vx v12, v8, a0 ; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret @@ -1804,7 +1804,7 @@ define @intrinsic_vmsge_vi_nxv1i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, -16 ; CHECK-NEXT: ret entry: @@ -1839,7 +1839,7 @@ define @intrinsic_vmsge_vi_nxv2i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, -14 ; CHECK-NEXT: ret entry: @@ -1874,7 +1874,7 @@ define @intrinsic_vmsge_vi_nxv4i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, -12 ; CHECK-NEXT: ret entry: @@ -1909,7 +1909,7 @@ define @intrinsic_vmsge_vi_nxv8i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, -10 ; CHECK-NEXT: ret entry: @@ -1944,7 +1944,7 @@ define @intrinsic_vmsge_vi_nxv16i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, -8 ; CHECK-NEXT: ret entry: @@ -1979,7 +1979,7 @@ define @intrinsic_vmsge_vi_nxv32i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, -6 ; CHECK-NEXT: ret entry: @@ -2014,7 +2014,7 @@ define @intrinsic_vmsge_vi_nxv1i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, -4 ; CHECK-NEXT: ret entry: @@ -2049,7 +2049,7 @@ define @intrinsic_vmsge_vi_nxv2i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, -2 ; CHECK-NEXT: ret entry: @@ -2084,7 +2084,7 @@ define @intrinsic_vmsge_vi_nxv4i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, -1 ; CHECK-NEXT: ret entry: @@ -2119,7 +2119,7 @@ define @intrinsic_vmsge_vi_nxv8i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 1 ; CHECK-NEXT: ret entry: @@ -2154,7 +2154,7 @@ define @intrinsic_vmsge_vi_nxv16i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 3 ; CHECK-NEXT: ret entry: @@ -2189,7 +2189,7 @@ define @intrinsic_vmsge_vi_nxv1i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 5 ; CHECK-NEXT: ret entry: @@ -2224,7 +2224,7 @@ define @intrinsic_vmsge_vi_nxv2i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 7 ; CHECK-NEXT: ret entry: @@ -2259,7 +2259,7 @@ define @intrinsic_vmsge_vi_nxv4i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2294,7 +2294,7 @@ define @intrinsic_vmsge_vi_nxv8i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 11 ; CHECK-NEXT: ret entry: @@ -2329,7 +2329,7 @@ define @intrinsic_vmsge_vi_nxv1i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 13 ; CHECK-NEXT: ret entry: @@ -2364,7 +2364,7 @@ define @intrinsic_vmsge_vi_nxv2i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 15 ; CHECK-NEXT: ret entry: @@ -2399,7 +2399,7 @@ define @intrinsic_vmsge_vi_nxv4i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, -15 ; CHECK-NEXT: ret entry: @@ -2435,7 +2435,7 @@ define @intrinsic_vmsge_maskedoff_mask_vx_nxv1i8_i8( %0, %1, i8 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret @@ -2453,7 +2453,7 @@ define @intrinsic_vmsge_maskedoff_mask_vx_nxv2i8_i8( %0, %1, i8 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret @@ -2471,7 +2471,7 @@ define @intrinsic_vmsge_maskedoff_mask_vx_nxv4i8_i8( %0, %1, i8 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret @@ -2489,7 +2489,7 @@ define @intrinsic_vmsge_maskedoff_mask_vx_nxv8i8_i8( %0, %1, i8 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret @@ -2507,7 +2507,7 @@ define @intrinsic_vmsge_maskedoff_mask_vx_nxv16i8_i8( %0, %1, i8 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmslt.vx v10, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v10 ; CHECK-NEXT: ret @@ -2525,7 +2525,7 @@ define @intrinsic_vmsge_maskedoff_mask_vx_nxv32i8_i8( %0, %1, i8 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmslt.vx v12, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v12 ; CHECK-NEXT: ret @@ -2543,7 +2543,7 @@ define @intrinsic_vmsge_maskedoff_mask_vx_nxv1i16_i16( %0, %1, i16 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret @@ -2561,7 +2561,7 @@ define @intrinsic_vmsge_maskedoff_mask_vx_nxv2i16_i16( %0, %1, i16 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret @@ -2579,7 +2579,7 @@ define @intrinsic_vmsge_maskedoff_mask_vx_nxv4i16_i16( %0, %1, i16 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret @@ -2597,7 +2597,7 @@ define @intrinsic_vmsge_maskedoff_mask_vx_nxv8i16_i16( %0, %1, i16 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmslt.vx v10, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v10 ; CHECK-NEXT: ret @@ -2615,7 +2615,7 @@ define @intrinsic_vmsge_maskedoff_mask_vx_nxv16i16_i16( %0, %1, i16 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vmslt.vx v12, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v12 ; CHECK-NEXT: ret @@ -2633,7 +2633,7 @@ define @intrinsic_vmsge_maskedoff_mask_vx_nxv1i32_i32( %0, %1, i32 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret @@ -2651,7 +2651,7 @@ define @intrinsic_vmsge_maskedoff_mask_vx_nxv2i32_i32( %0, %1, i32 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret @@ -2669,7 +2669,7 @@ define @intrinsic_vmsge_maskedoff_mask_vx_nxv4i32_i32( %0, %1, i32 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmslt.vx v10, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v10 ; CHECK-NEXT: ret @@ -2687,7 +2687,7 @@ define @intrinsic_vmsge_maskedoff_mask_vx_nxv8i32_i32( %0, %1, i32 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmslt.vx v12, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v12 ; CHECK-NEXT: ret @@ -2705,7 +2705,7 @@ define @intrinsic_vmsge_maskedoff_mask_vx_nxv1i64_i64( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vmslt.vx v8, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret @@ -2723,7 +2723,7 @@ define @intrinsic_vmsge_maskedoff_mask_vx_nxv2i64_i64( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vmslt.vx v10, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v10 ; CHECK-NEXT: ret @@ -2741,7 +2741,7 @@ define @intrinsic_vmsge_maskedoff_mask_vx_nxv4i64_i64( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vmslt.vx v12, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v12 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll @@ -9,7 +9,7 @@ define @intrinsic_vmsgeu_vv_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -61,7 +61,7 @@ define @intrinsic_vmsgeu_vv_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -113,7 +113,7 @@ define @intrinsic_vmsgeu_vv_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -165,7 +165,7 @@ define @intrinsic_vmsgeu_vv_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define @intrinsic_vmsgeu_vv_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v10, v8 ; CHECK-NEXT: ret entry: @@ -269,7 +269,7 @@ define @intrinsic_vmsgeu_vv_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v12, v8 ; CHECK-NEXT: ret entry: @@ -321,7 +321,7 @@ define @intrinsic_vmsgeu_vv_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -373,7 +373,7 @@ define @intrinsic_vmsgeu_vv_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -425,7 +425,7 @@ define @intrinsic_vmsgeu_vv_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -477,7 +477,7 @@ define @intrinsic_vmsgeu_vv_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v10, v8 ; CHECK-NEXT: ret entry: @@ -529,7 +529,7 @@ define @intrinsic_vmsgeu_vv_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v12, v8 ; CHECK-NEXT: ret entry: @@ -581,7 +581,7 @@ define @intrinsic_vmsgeu_vv_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -633,7 +633,7 @@ define @intrinsic_vmsgeu_vv_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -685,7 +685,7 @@ define @intrinsic_vmsgeu_vv_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v10, v8 ; CHECK-NEXT: ret entry: @@ -737,7 +737,7 @@ define @intrinsic_vmsgeu_vv_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v12, v8 ; CHECK-NEXT: ret entry: @@ -789,7 +789,7 @@ define @intrinsic_vmsgeu_vv_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -841,7 +841,7 @@ define @intrinsic_vmsgeu_vv_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v10, v8 ; CHECK-NEXT: ret entry: @@ -893,7 +893,7 @@ define @intrinsic_vmsgeu_vv_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v12, v8 ; CHECK-NEXT: ret entry: @@ -945,7 +945,7 @@ define @intrinsic_vmsgeu_vx_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret @@ -993,7 +993,7 @@ define @intrinsic_vmsgeu_vx_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret @@ -1041,7 +1041,7 @@ define @intrinsic_vmsgeu_vx_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret @@ -1089,7 +1089,7 @@ define @intrinsic_vmsgeu_vx_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret @@ -1137,7 +1137,7 @@ define @intrinsic_vmsgeu_vx_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmsltu.vx v10, v8, a0 ; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret @@ -1185,7 +1185,7 @@ define @intrinsic_vmsgeu_vx_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmsltu.vx v12, v8, a0 ; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret @@ -1233,7 +1233,7 @@ define @intrinsic_vmsgeu_vx_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret @@ -1281,7 +1281,7 @@ define @intrinsic_vmsgeu_vx_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret @@ -1329,7 +1329,7 @@ define @intrinsic_vmsgeu_vx_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret @@ -1377,7 +1377,7 @@ define @intrinsic_vmsgeu_vx_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmsltu.vx v10, v8, a0 ; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret @@ -1425,7 +1425,7 @@ define @intrinsic_vmsgeu_vx_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vmsltu.vx v12, v8, a0 ; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret @@ -1473,7 +1473,7 @@ define @intrinsic_vmsgeu_vx_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret @@ -1521,7 +1521,7 @@ define @intrinsic_vmsgeu_vx_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret @@ -1569,7 +1569,7 @@ define @intrinsic_vmsgeu_vx_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmsltu.vx v10, v8, a0 ; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret @@ -1617,7 +1617,7 @@ define @intrinsic_vmsgeu_vx_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmsltu.vx v12, v8, a0 ; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret @@ -1669,7 +1669,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vmsleu.vv v0, v9, v8 ; CHECK-NEXT: addi sp, sp, 16 @@ -1728,7 +1728,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vmsleu.vv v0, v10, v8 ; CHECK-NEXT: addi sp, sp, 16 @@ -1787,7 +1787,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmsleu.vv v0, v12, v8 ; CHECK-NEXT: addi sp, sp, 16 @@ -1837,7 +1837,7 @@ define @intrinsic_vmsgeu_vi_nxv1i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, -16 ; CHECK-NEXT: ret entry: @@ -1872,7 +1872,7 @@ define @intrinsic_vmsgeu_vi_nxv2i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, -14 ; CHECK-NEXT: ret entry: @@ -1907,7 +1907,7 @@ define @intrinsic_vmsgeu_vi_nxv4i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, -12 ; CHECK-NEXT: ret entry: @@ -1942,7 +1942,7 @@ define @intrinsic_vmsgeu_vi_nxv8i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, -10 ; CHECK-NEXT: ret entry: @@ -1977,7 +1977,7 @@ define @intrinsic_vmsgeu_vi_nxv16i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, -8 ; CHECK-NEXT: ret entry: @@ -2012,7 +2012,7 @@ define @intrinsic_vmsgeu_vi_nxv32i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, -6 ; CHECK-NEXT: ret entry: @@ -2047,7 +2047,7 @@ define @intrinsic_vmsgeu_vi_nxv1i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, -4 ; CHECK-NEXT: ret entry: @@ -2082,7 +2082,7 @@ define @intrinsic_vmsgeu_vi_nxv2i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, -2 ; CHECK-NEXT: ret entry: @@ -2097,7 +2097,7 @@ define @intrinsic_vmsgeu_mask_vi_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmor.mm v0, v9, v0 ; CHECK-NEXT: ret entry: @@ -2129,7 +2129,7 @@ define @intrinsic_vmsgeu_vi_nxv4i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: ret entry: @@ -2164,7 +2164,7 @@ define @intrinsic_vmsgeu_vi_nxv8i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 1 ; CHECK-NEXT: ret entry: @@ -2199,7 +2199,7 @@ define @intrinsic_vmsgeu_vi_nxv16i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 3 ; CHECK-NEXT: ret entry: @@ -2234,7 +2234,7 @@ define @intrinsic_vmsgeu_vi_nxv1i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 5 ; CHECK-NEXT: ret entry: @@ -2269,7 +2269,7 @@ define @intrinsic_vmsgeu_vi_nxv2i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 7 ; CHECK-NEXT: ret entry: @@ -2304,7 +2304,7 @@ define @intrinsic_vmsgeu_vi_nxv4i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2339,7 +2339,7 @@ define @intrinsic_vmsgeu_vi_nxv8i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 11 ; CHECK-NEXT: ret entry: @@ -2374,7 +2374,7 @@ define @intrinsic_vmsgeu_vi_nxv1i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 13 ; CHECK-NEXT: ret entry: @@ -2409,7 +2409,7 @@ define @intrinsic_vmsgeu_vi_nxv2i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 15 ; CHECK-NEXT: ret entry: @@ -2444,7 +2444,7 @@ define @intrinsic_vmsgeu_vi_nxv4i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, -15 ; CHECK-NEXT: ret entry: @@ -2480,7 +2480,7 @@ define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i8_i8( %0, %1, i8 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret @@ -2498,7 +2498,7 @@ define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i8_i8( %0, %1, i8 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret @@ -2516,7 +2516,7 @@ define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i8_i8( %0, %1, i8 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret @@ -2534,7 +2534,7 @@ define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i8_i8( %0, %1, i8 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret @@ -2552,7 +2552,7 @@ define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i8_i8( %0, %1, i8 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmsltu.vx v10, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v10 ; CHECK-NEXT: ret @@ -2570,7 +2570,7 @@ define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv32i8_i8( %0, %1, i8 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmsltu.vx v12, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v12 ; CHECK-NEXT: ret @@ -2588,7 +2588,7 @@ define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i16_i16( %0, %1, i16 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret @@ -2606,7 +2606,7 @@ define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i16_i16( %0, %1, i16 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret @@ -2624,7 +2624,7 @@ define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i16_i16( %0, %1, i16 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret @@ -2642,7 +2642,7 @@ define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i16_i16( %0, %1, i16 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmsltu.vx v10, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v10 ; CHECK-NEXT: ret @@ -2660,7 +2660,7 @@ define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i16_i16( %0, %1, i16 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vmsltu.vx v12, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v12 ; CHECK-NEXT: ret @@ -2678,7 +2678,7 @@ define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i32_i32( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret @@ -2696,7 +2696,7 @@ define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i32_i32( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret @@ -2714,7 +2714,7 @@ define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i32_i32( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmsltu.vx v10, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v10 ; CHECK-NEXT: ret @@ -2732,7 +2732,7 @@ define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i32_i32( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmsltu.vx v12, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v12 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll @@ -9,7 +9,7 @@ define @intrinsic_vmsgeu_vv_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -61,7 +61,7 @@ define @intrinsic_vmsgeu_vv_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -113,7 +113,7 @@ define @intrinsic_vmsgeu_vv_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -165,7 +165,7 @@ define @intrinsic_vmsgeu_vv_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define @intrinsic_vmsgeu_vv_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v10, v8 ; CHECK-NEXT: ret entry: @@ -269,7 +269,7 @@ define @intrinsic_vmsgeu_vv_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v12, v8 ; CHECK-NEXT: ret entry: @@ -321,7 +321,7 @@ define @intrinsic_vmsgeu_vv_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -373,7 +373,7 @@ define @intrinsic_vmsgeu_vv_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -425,7 +425,7 @@ define @intrinsic_vmsgeu_vv_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -477,7 +477,7 @@ define @intrinsic_vmsgeu_vv_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v10, v8 ; CHECK-NEXT: ret entry: @@ -529,7 +529,7 @@ define @intrinsic_vmsgeu_vv_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v12, v8 ; CHECK-NEXT: ret entry: @@ -581,7 +581,7 @@ define @intrinsic_vmsgeu_vv_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -633,7 +633,7 @@ define @intrinsic_vmsgeu_vv_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -685,7 +685,7 @@ define @intrinsic_vmsgeu_vv_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v10, v8 ; CHECK-NEXT: ret entry: @@ -737,7 +737,7 @@ define @intrinsic_vmsgeu_vv_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v12, v8 ; CHECK-NEXT: ret entry: @@ -789,7 +789,7 @@ define @intrinsic_vmsgeu_vv_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -841,7 +841,7 @@ define @intrinsic_vmsgeu_vv_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v10, v8 ; CHECK-NEXT: ret entry: @@ -893,7 +893,7 @@ define @intrinsic_vmsgeu_vv_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v12, v8 ; CHECK-NEXT: ret entry: @@ -945,7 +945,7 @@ define @intrinsic_vmsgeu_vx_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret @@ -993,7 +993,7 @@ define @intrinsic_vmsgeu_vx_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret @@ -1041,7 +1041,7 @@ define @intrinsic_vmsgeu_vx_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret @@ -1089,7 +1089,7 @@ define @intrinsic_vmsgeu_vx_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret @@ -1137,7 +1137,7 @@ define @intrinsic_vmsgeu_vx_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmsltu.vx v10, v8, a0 ; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret @@ -1185,7 +1185,7 @@ define @intrinsic_vmsgeu_vx_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmsltu.vx v12, v8, a0 ; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret @@ -1233,7 +1233,7 @@ define @intrinsic_vmsgeu_vx_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret @@ -1281,7 +1281,7 @@ define @intrinsic_vmsgeu_vx_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret @@ -1329,7 +1329,7 @@ define @intrinsic_vmsgeu_vx_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret @@ -1377,7 +1377,7 @@ define @intrinsic_vmsgeu_vx_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmsltu.vx v10, v8, a0 ; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret @@ -1425,7 +1425,7 @@ define @intrinsic_vmsgeu_vx_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vmsltu.vx v12, v8, a0 ; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret @@ -1473,7 +1473,7 @@ define @intrinsic_vmsgeu_vx_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret @@ -1521,7 +1521,7 @@ define @intrinsic_vmsgeu_vx_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret @@ -1569,7 +1569,7 @@ define @intrinsic_vmsgeu_vx_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmsltu.vx v10, v8, a0 ; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret @@ -1617,7 +1617,7 @@ define @intrinsic_vmsgeu_vx_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmsltu.vx v12, v8, a0 ; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret @@ -1665,7 +1665,7 @@ define @intrinsic_vmsgeu_vx_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret @@ -1713,7 +1713,7 @@ define @intrinsic_vmsgeu_vx_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vmsltu.vx v10, v8, a0 ; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret @@ -1761,7 +1761,7 @@ define @intrinsic_vmsgeu_vx_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vmsltu.vx v12, v8, a0 ; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret @@ -1804,7 +1804,7 @@ define @intrinsic_vmsgeu_vi_nxv1i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, -16 ; CHECK-NEXT: ret entry: @@ -1839,7 +1839,7 @@ define @intrinsic_vmsgeu_vi_nxv2i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, -14 ; CHECK-NEXT: ret entry: @@ -1874,7 +1874,7 @@ define @intrinsic_vmsgeu_vi_nxv4i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, -12 ; CHECK-NEXT: ret entry: @@ -1909,7 +1909,7 @@ define @intrinsic_vmsgeu_vi_nxv8i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, -10 ; CHECK-NEXT: ret entry: @@ -1944,7 +1944,7 @@ define @intrinsic_vmsgeu_vi_nxv16i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, -8 ; CHECK-NEXT: ret entry: @@ -1979,7 +1979,7 @@ define @intrinsic_vmsgeu_vi_nxv32i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, -6 ; CHECK-NEXT: ret entry: @@ -2014,7 +2014,7 @@ define @intrinsic_vmsgeu_vi_nxv1i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, -4 ; CHECK-NEXT: ret entry: @@ -2049,7 +2049,7 @@ define @intrinsic_vmsgeu_vi_nxv2i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, -2 ; CHECK-NEXT: ret entry: @@ -2064,7 +2064,7 @@ define @intrinsic_vmsgeu_mask_vi_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmor.mm v0, v9, v0 ; CHECK-NEXT: ret entry: @@ -2081,7 +2081,7 @@ define @intrinsic_vmsgeu_vi_nxv4i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: ret entry: @@ -2131,7 +2131,7 @@ define @intrinsic_vmsgeu_vi_nxv8i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 1 ; CHECK-NEXT: ret entry: @@ -2166,7 +2166,7 @@ define @intrinsic_vmsgeu_vi_nxv16i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 3 ; CHECK-NEXT: ret entry: @@ -2201,7 +2201,7 @@ define @intrinsic_vmsgeu_vi_nxv1i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 5 ; CHECK-NEXT: ret entry: @@ -2236,7 +2236,7 @@ define @intrinsic_vmsgeu_vi_nxv2i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 7 ; CHECK-NEXT: ret entry: @@ -2271,7 +2271,7 @@ define @intrinsic_vmsgeu_vi_nxv4i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2306,7 +2306,7 @@ define @intrinsic_vmsgeu_vi_nxv8i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 11 ; CHECK-NEXT: ret entry: @@ -2341,7 +2341,7 @@ define @intrinsic_vmsgeu_vi_nxv1i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 13 ; CHECK-NEXT: ret entry: @@ -2376,7 +2376,7 @@ define @intrinsic_vmsgeu_vi_nxv2i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 15 ; CHECK-NEXT: ret entry: @@ -2411,7 +2411,7 @@ define @intrinsic_vmsgeu_vi_nxv4i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, -15 ; CHECK-NEXT: ret entry: @@ -2447,7 +2447,7 @@ define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i8_i8( %0, %1, i8 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret @@ -2465,7 +2465,7 @@ define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i8_i8( %0, %1, i8 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret @@ -2483,7 +2483,7 @@ define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i8_i8( %0, %1, i8 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret @@ -2501,7 +2501,7 @@ define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i8_i8( %0, %1, i8 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret @@ -2519,7 +2519,7 @@ define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i8_i8( %0, %1, i8 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmsltu.vx v10, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v10 ; CHECK-NEXT: ret @@ -2537,7 +2537,7 @@ define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv32i8_i8( %0, %1, i8 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmsltu.vx v12, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v12 ; CHECK-NEXT: ret @@ -2555,7 +2555,7 @@ define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i16_i16( %0, %1, i16 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret @@ -2573,7 +2573,7 @@ define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i16_i16( %0, %1, i16 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret @@ -2591,7 +2591,7 @@ define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i16_i16( %0, %1, i16 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret @@ -2609,7 +2609,7 @@ define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i16_i16( %0, %1, i16 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmsltu.vx v10, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v10 ; CHECK-NEXT: ret @@ -2627,7 +2627,7 @@ define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i16_i16( %0, %1, i16 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vmsltu.vx v12, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v12 ; CHECK-NEXT: ret @@ -2645,7 +2645,7 @@ define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i32_i32( %0, %1, i32 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret @@ -2663,7 +2663,7 @@ define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i32_i32( %0, %1, i32 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret @@ -2681,7 +2681,7 @@ define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i32_i32( %0, %1, i32 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmsltu.vx v10, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v10 ; CHECK-NEXT: ret @@ -2699,7 +2699,7 @@ define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i32_i32( %0, %1, i32 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmsltu.vx v12, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v12 ; CHECK-NEXT: ret @@ -2717,7 +2717,7 @@ define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i64_i64( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vmsltu.vx v8, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v8 ; CHECK-NEXT: ret @@ -2735,7 +2735,7 @@ define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i64_i64( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vmsltu.vx v10, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v10 ; CHECK-NEXT: ret @@ -2753,7 +2753,7 @@ define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i64_i64( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vmsltu.vx v12, v8, a0 ; CHECK-NEXT: vmandn.mm v0, v0, v12 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll @@ -9,7 +9,7 @@ define @intrinsic_vmsgt_vv_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmslt.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -61,7 +61,7 @@ define @intrinsic_vmsgt_vv_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmslt.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -113,7 +113,7 @@ define @intrinsic_vmsgt_vv_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmslt.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -165,7 +165,7 @@ define @intrinsic_vmsgt_vv_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmslt.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define @intrinsic_vmsgt_vv_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmslt.vv v0, v10, v8 ; CHECK-NEXT: ret entry: @@ -269,7 +269,7 @@ define @intrinsic_vmsgt_vv_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmslt.vv v0, v12, v8 ; CHECK-NEXT: ret entry: @@ -321,7 +321,7 @@ define @intrinsic_vmsgt_vv_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmslt.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -373,7 +373,7 @@ define @intrinsic_vmsgt_vv_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmslt.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -425,7 +425,7 @@ define @intrinsic_vmsgt_vv_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmslt.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -477,7 +477,7 @@ define @intrinsic_vmsgt_vv_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmslt.vv v0, v10, v8 ; CHECK-NEXT: ret entry: @@ -529,7 +529,7 @@ define @intrinsic_vmsgt_vv_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmslt.vv v0, v12, v8 ; CHECK-NEXT: ret entry: @@ -581,7 +581,7 @@ define @intrinsic_vmsgt_vv_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmslt.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -633,7 +633,7 @@ define @intrinsic_vmsgt_vv_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmslt.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -685,7 +685,7 @@ define @intrinsic_vmsgt_vv_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmslt.vv v0, v10, v8 ; CHECK-NEXT: ret entry: @@ -737,7 +737,7 @@ define @intrinsic_vmsgt_vv_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmslt.vv v0, v12, v8 ; CHECK-NEXT: ret entry: @@ -789,7 +789,7 @@ define @intrinsic_vmsgt_vv_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmslt.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -841,7 +841,7 @@ define @intrinsic_vmsgt_vv_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmslt.vv v0, v10, v8 ; CHECK-NEXT: ret entry: @@ -893,7 +893,7 @@ define @intrinsic_vmsgt_vv_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmslt.vv v0, v12, v8 ; CHECK-NEXT: ret entry: @@ -945,7 +945,7 @@ define @intrinsic_vmsgt_vx_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmsgt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -992,7 +992,7 @@ define @intrinsic_vmsgt_vx_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmsgt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1039,7 +1039,7 @@ define @intrinsic_vmsgt_vx_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmsgt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1086,7 +1086,7 @@ define @intrinsic_vmsgt_vx_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmsgt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1133,7 +1133,7 @@ define @intrinsic_vmsgt_vx_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmsgt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1180,7 +1180,7 @@ define @intrinsic_vmsgt_vx_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmsgt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1227,7 +1227,7 @@ define @intrinsic_vmsgt_vx_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vmsgt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1274,7 +1274,7 @@ define @intrinsic_vmsgt_vx_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmsgt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1321,7 +1321,7 @@ define @intrinsic_vmsgt_vx_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmsgt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1368,7 +1368,7 @@ define @intrinsic_vmsgt_vx_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmsgt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1415,7 +1415,7 @@ define @intrinsic_vmsgt_vx_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vmsgt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1462,7 +1462,7 @@ define @intrinsic_vmsgt_vx_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmsgt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1509,7 +1509,7 @@ define @intrinsic_vmsgt_vx_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmsgt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1556,7 +1556,7 @@ define @intrinsic_vmsgt_vx_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmsgt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1603,7 +1603,7 @@ define @intrinsic_vmsgt_vx_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmsgt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1654,7 +1654,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vmslt.vv v0, v9, v8 ; CHECK-NEXT: addi sp, sp, 16 @@ -1713,7 +1713,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vmslt.vv v0, v10, v8 ; CHECK-NEXT: addi sp, sp, 16 @@ -1772,7 +1772,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmslt.vv v0, v12, v8 ; CHECK-NEXT: addi sp, sp, 16 @@ -1822,7 +1822,7 @@ define @intrinsic_vmsgt_vi_nxv1i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1857,7 +1857,7 @@ define @intrinsic_vmsgt_vi_nxv2i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1892,7 +1892,7 @@ define @intrinsic_vmsgt_vi_nxv4i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1927,7 +1927,7 @@ define @intrinsic_vmsgt_vi_nxv8i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1962,7 +1962,7 @@ define @intrinsic_vmsgt_vi_nxv16i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1997,7 +1997,7 @@ define @intrinsic_vmsgt_vi_nxv32i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2032,7 +2032,7 @@ define @intrinsic_vmsgt_vi_nxv1i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2067,7 +2067,7 @@ define @intrinsic_vmsgt_vi_nxv2i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2102,7 +2102,7 @@ define @intrinsic_vmsgt_vi_nxv4i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2137,7 +2137,7 @@ define @intrinsic_vmsgt_vi_nxv8i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2172,7 +2172,7 @@ define @intrinsic_vmsgt_vi_nxv16i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2207,7 +2207,7 @@ define @intrinsic_vmsgt_vi_nxv1i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2242,7 +2242,7 @@ define @intrinsic_vmsgt_vi_nxv2i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2277,7 +2277,7 @@ define @intrinsic_vmsgt_vi_nxv4i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2312,7 +2312,7 @@ define @intrinsic_vmsgt_vi_nxv8i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2347,7 +2347,7 @@ define @intrinsic_vmsgt_vi_nxv1i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2382,7 +2382,7 @@ define @intrinsic_vmsgt_vi_nxv2i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2417,7 +2417,7 @@ define @intrinsic_vmsgt_vi_nxv4i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 9 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll @@ -9,7 +9,7 @@ define @intrinsic_vmsgt_vv_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmslt.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -61,7 +61,7 @@ define @intrinsic_vmsgt_vv_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmslt.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -113,7 +113,7 @@ define @intrinsic_vmsgt_vv_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmslt.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -165,7 +165,7 @@ define @intrinsic_vmsgt_vv_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmslt.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define @intrinsic_vmsgt_vv_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmslt.vv v0, v10, v8 ; CHECK-NEXT: ret entry: @@ -269,7 +269,7 @@ define @intrinsic_vmsgt_vv_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmslt.vv v0, v12, v8 ; CHECK-NEXT: ret entry: @@ -321,7 +321,7 @@ define @intrinsic_vmsgt_vv_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmslt.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -373,7 +373,7 @@ define @intrinsic_vmsgt_vv_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmslt.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -425,7 +425,7 @@ define @intrinsic_vmsgt_vv_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmslt.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -477,7 +477,7 @@ define @intrinsic_vmsgt_vv_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmslt.vv v0, v10, v8 ; CHECK-NEXT: ret entry: @@ -529,7 +529,7 @@ define @intrinsic_vmsgt_vv_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmslt.vv v0, v12, v8 ; CHECK-NEXT: ret entry: @@ -581,7 +581,7 @@ define @intrinsic_vmsgt_vv_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmslt.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -633,7 +633,7 @@ define @intrinsic_vmsgt_vv_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmslt.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -685,7 +685,7 @@ define @intrinsic_vmsgt_vv_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmslt.vv v0, v10, v8 ; CHECK-NEXT: ret entry: @@ -737,7 +737,7 @@ define @intrinsic_vmsgt_vv_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmslt.vv v0, v12, v8 ; CHECK-NEXT: ret entry: @@ -789,7 +789,7 @@ define @intrinsic_vmsgt_vv_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmslt.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -841,7 +841,7 @@ define @intrinsic_vmsgt_vv_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmslt.vv v0, v10, v8 ; CHECK-NEXT: ret entry: @@ -893,7 +893,7 @@ define @intrinsic_vmsgt_vv_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmslt.vv v0, v12, v8 ; CHECK-NEXT: ret entry: @@ -945,7 +945,7 @@ define @intrinsic_vmsgt_vx_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmsgt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -992,7 +992,7 @@ define @intrinsic_vmsgt_vx_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmsgt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1039,7 +1039,7 @@ define @intrinsic_vmsgt_vx_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmsgt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1086,7 +1086,7 @@ define @intrinsic_vmsgt_vx_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmsgt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1133,7 +1133,7 @@ define @intrinsic_vmsgt_vx_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmsgt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1180,7 +1180,7 @@ define @intrinsic_vmsgt_vx_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmsgt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1227,7 +1227,7 @@ define @intrinsic_vmsgt_vx_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vmsgt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1274,7 +1274,7 @@ define @intrinsic_vmsgt_vx_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmsgt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1321,7 +1321,7 @@ define @intrinsic_vmsgt_vx_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmsgt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1368,7 +1368,7 @@ define @intrinsic_vmsgt_vx_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmsgt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1415,7 +1415,7 @@ define @intrinsic_vmsgt_vx_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vmsgt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1462,7 +1462,7 @@ define @intrinsic_vmsgt_vx_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmsgt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1509,7 +1509,7 @@ define @intrinsic_vmsgt_vx_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmsgt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1556,7 +1556,7 @@ define @intrinsic_vmsgt_vx_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmsgt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1603,7 +1603,7 @@ define @intrinsic_vmsgt_vx_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmsgt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1650,7 +1650,7 @@ define @intrinsic_vmsgt_vx_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vmsgt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1697,7 +1697,7 @@ define @intrinsic_vmsgt_vx_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vmsgt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1744,7 +1744,7 @@ define @intrinsic_vmsgt_vx_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vx_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vmsgt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1786,7 +1786,7 @@ define @intrinsic_vmsgt_vi_nxv1i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1821,7 +1821,7 @@ define @intrinsic_vmsgt_vi_nxv2i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1856,7 +1856,7 @@ define @intrinsic_vmsgt_vi_nxv4i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1891,7 +1891,7 @@ define @intrinsic_vmsgt_vi_nxv8i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1926,7 +1926,7 @@ define @intrinsic_vmsgt_vi_nxv16i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1961,7 +1961,7 @@ define @intrinsic_vmsgt_vi_nxv32i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1996,7 +1996,7 @@ define @intrinsic_vmsgt_vi_nxv1i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2031,7 +2031,7 @@ define @intrinsic_vmsgt_vi_nxv2i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2066,7 +2066,7 @@ define @intrinsic_vmsgt_vi_nxv4i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2101,7 +2101,7 @@ define @intrinsic_vmsgt_vi_nxv8i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2136,7 +2136,7 @@ define @intrinsic_vmsgt_vi_nxv16i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2171,7 +2171,7 @@ define @intrinsic_vmsgt_vi_nxv1i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2206,7 +2206,7 @@ define @intrinsic_vmsgt_vi_nxv2i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2241,7 +2241,7 @@ define @intrinsic_vmsgt_vi_nxv4i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2276,7 +2276,7 @@ define @intrinsic_vmsgt_vi_nxv8i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2311,7 +2311,7 @@ define @intrinsic_vmsgt_vi_nxv1i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2346,7 +2346,7 @@ define @intrinsic_vmsgt_vi_nxv2i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2381,7 +2381,7 @@ define @intrinsic_vmsgt_vi_nxv4i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgt_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmsgt.vi v0, v8, 9 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll @@ -9,7 +9,7 @@ define @intrinsic_vmsgtu_vv_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -61,7 +61,7 @@ define @intrinsic_vmsgtu_vv_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -113,7 +113,7 @@ define @intrinsic_vmsgtu_vv_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -165,7 +165,7 @@ define @intrinsic_vmsgtu_vv_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define @intrinsic_vmsgtu_vv_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v10, v8 ; CHECK-NEXT: ret entry: @@ -269,7 +269,7 @@ define @intrinsic_vmsgtu_vv_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v12, v8 ; CHECK-NEXT: ret entry: @@ -321,7 +321,7 @@ define @intrinsic_vmsgtu_vv_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -373,7 +373,7 @@ define @intrinsic_vmsgtu_vv_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -425,7 +425,7 @@ define @intrinsic_vmsgtu_vv_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -477,7 +477,7 @@ define @intrinsic_vmsgtu_vv_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v10, v8 ; CHECK-NEXT: ret entry: @@ -529,7 +529,7 @@ define @intrinsic_vmsgtu_vv_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v12, v8 ; CHECK-NEXT: ret entry: @@ -581,7 +581,7 @@ define @intrinsic_vmsgtu_vv_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -633,7 +633,7 @@ define @intrinsic_vmsgtu_vv_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -685,7 +685,7 @@ define @intrinsic_vmsgtu_vv_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v10, v8 ; CHECK-NEXT: ret entry: @@ -737,7 +737,7 @@ define @intrinsic_vmsgtu_vv_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v12, v8 ; CHECK-NEXT: ret entry: @@ -789,7 +789,7 @@ define @intrinsic_vmsgtu_vv_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -841,7 +841,7 @@ define @intrinsic_vmsgtu_vv_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v10, v8 ; CHECK-NEXT: ret entry: @@ -893,7 +893,7 @@ define @intrinsic_vmsgtu_vv_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v12, v8 ; CHECK-NEXT: ret entry: @@ -945,7 +945,7 @@ define @intrinsic_vmsgtu_vx_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmsgtu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -992,7 +992,7 @@ define @intrinsic_vmsgtu_vx_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmsgtu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1039,7 +1039,7 @@ define @intrinsic_vmsgtu_vx_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmsgtu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1086,7 +1086,7 @@ define @intrinsic_vmsgtu_vx_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmsgtu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1133,7 +1133,7 @@ define @intrinsic_vmsgtu_vx_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmsgtu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1180,7 +1180,7 @@ define @intrinsic_vmsgtu_vx_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmsgtu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1227,7 +1227,7 @@ define @intrinsic_vmsgtu_vx_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vmsgtu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1274,7 +1274,7 @@ define @intrinsic_vmsgtu_vx_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmsgtu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1321,7 +1321,7 @@ define @intrinsic_vmsgtu_vx_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmsgtu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1368,7 +1368,7 @@ define @intrinsic_vmsgtu_vx_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmsgtu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1415,7 +1415,7 @@ define @intrinsic_vmsgtu_vx_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vmsgtu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1462,7 +1462,7 @@ define @intrinsic_vmsgtu_vx_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmsgtu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1509,7 +1509,7 @@ define @intrinsic_vmsgtu_vx_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmsgtu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1556,7 +1556,7 @@ define @intrinsic_vmsgtu_vx_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmsgtu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1603,7 +1603,7 @@ define @intrinsic_vmsgtu_vx_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmsgtu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1654,7 +1654,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vmsltu.vv v0, v9, v8 ; CHECK-NEXT: addi sp, sp, 16 @@ -1713,7 +1713,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vmsltu.vv v0, v10, v8 ; CHECK-NEXT: addi sp, sp, 16 @@ -1772,7 +1772,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmsltu.vv v0, v12, v8 ; CHECK-NEXT: addi sp, sp, 16 @@ -1822,7 +1822,7 @@ define @intrinsic_vmsgtu_vi_nxv1i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1857,7 +1857,7 @@ define @intrinsic_vmsgtu_vi_nxv2i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1892,7 +1892,7 @@ define @intrinsic_vmsgtu_vi_nxv4i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1927,7 +1927,7 @@ define @intrinsic_vmsgtu_vi_nxv8i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1962,7 +1962,7 @@ define @intrinsic_vmsgtu_vi_nxv16i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1997,7 +1997,7 @@ define @intrinsic_vmsgtu_vi_nxv32i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2032,7 +2032,7 @@ define @intrinsic_vmsgtu_vi_nxv1i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2067,7 +2067,7 @@ define @intrinsic_vmsgtu_vi_nxv2i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2102,7 +2102,7 @@ define @intrinsic_vmsgtu_vi_nxv4i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2137,7 +2137,7 @@ define @intrinsic_vmsgtu_vi_nxv8i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2172,7 +2172,7 @@ define @intrinsic_vmsgtu_vi_nxv16i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2207,7 +2207,7 @@ define @intrinsic_vmsgtu_vi_nxv1i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2242,7 +2242,7 @@ define @intrinsic_vmsgtu_vi_nxv2i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2277,7 +2277,7 @@ define @intrinsic_vmsgtu_vi_nxv4i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2312,7 +2312,7 @@ define @intrinsic_vmsgtu_vi_nxv8i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2347,7 +2347,7 @@ define @intrinsic_vmsgtu_vi_nxv1i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2382,7 +2382,7 @@ define @intrinsic_vmsgtu_vi_nxv2i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2417,7 +2417,7 @@ define @intrinsic_vmsgtu_vi_nxv4i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll @@ -9,7 +9,7 @@ define @intrinsic_vmsgtu_vv_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -61,7 +61,7 @@ define @intrinsic_vmsgtu_vv_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -113,7 +113,7 @@ define @intrinsic_vmsgtu_vv_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -165,7 +165,7 @@ define @intrinsic_vmsgtu_vv_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define @intrinsic_vmsgtu_vv_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v10, v8 ; CHECK-NEXT: ret entry: @@ -269,7 +269,7 @@ define @intrinsic_vmsgtu_vv_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v12, v8 ; CHECK-NEXT: ret entry: @@ -321,7 +321,7 @@ define @intrinsic_vmsgtu_vv_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -373,7 +373,7 @@ define @intrinsic_vmsgtu_vv_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -425,7 +425,7 @@ define @intrinsic_vmsgtu_vv_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -477,7 +477,7 @@ define @intrinsic_vmsgtu_vv_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v10, v8 ; CHECK-NEXT: ret entry: @@ -529,7 +529,7 @@ define @intrinsic_vmsgtu_vv_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v12, v8 ; CHECK-NEXT: ret entry: @@ -581,7 +581,7 @@ define @intrinsic_vmsgtu_vv_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -633,7 +633,7 @@ define @intrinsic_vmsgtu_vv_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -685,7 +685,7 @@ define @intrinsic_vmsgtu_vv_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v10, v8 ; CHECK-NEXT: ret entry: @@ -737,7 +737,7 @@ define @intrinsic_vmsgtu_vv_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v12, v8 ; CHECK-NEXT: ret entry: @@ -789,7 +789,7 @@ define @intrinsic_vmsgtu_vv_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v9, v8 ; CHECK-NEXT: ret entry: @@ -841,7 +841,7 @@ define @intrinsic_vmsgtu_vv_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v10, v8 ; CHECK-NEXT: ret entry: @@ -893,7 +893,7 @@ define @intrinsic_vmsgtu_vv_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v12, v8 ; CHECK-NEXT: ret entry: @@ -945,7 +945,7 @@ define @intrinsic_vmsgtu_vx_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmsgtu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -992,7 +992,7 @@ define @intrinsic_vmsgtu_vx_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmsgtu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1039,7 +1039,7 @@ define @intrinsic_vmsgtu_vx_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmsgtu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1086,7 +1086,7 @@ define @intrinsic_vmsgtu_vx_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmsgtu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1133,7 +1133,7 @@ define @intrinsic_vmsgtu_vx_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmsgtu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1180,7 +1180,7 @@ define @intrinsic_vmsgtu_vx_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmsgtu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1227,7 +1227,7 @@ define @intrinsic_vmsgtu_vx_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vmsgtu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1274,7 +1274,7 @@ define @intrinsic_vmsgtu_vx_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmsgtu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1321,7 +1321,7 @@ define @intrinsic_vmsgtu_vx_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmsgtu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1368,7 +1368,7 @@ define @intrinsic_vmsgtu_vx_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmsgtu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1415,7 +1415,7 @@ define @intrinsic_vmsgtu_vx_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vmsgtu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1462,7 +1462,7 @@ define @intrinsic_vmsgtu_vx_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmsgtu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1509,7 +1509,7 @@ define @intrinsic_vmsgtu_vx_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmsgtu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1556,7 +1556,7 @@ define @intrinsic_vmsgtu_vx_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmsgtu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1603,7 +1603,7 @@ define @intrinsic_vmsgtu_vx_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmsgtu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1650,7 +1650,7 @@ define @intrinsic_vmsgtu_vx_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vmsgtu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1697,7 +1697,7 @@ define @intrinsic_vmsgtu_vx_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vmsgtu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1744,7 +1744,7 @@ define @intrinsic_vmsgtu_vx_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vmsgtu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1786,7 +1786,7 @@ define @intrinsic_vmsgtu_vi_nxv1i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1821,7 +1821,7 @@ define @intrinsic_vmsgtu_vi_nxv2i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1856,7 +1856,7 @@ define @intrinsic_vmsgtu_vi_nxv4i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1891,7 +1891,7 @@ define @intrinsic_vmsgtu_vi_nxv8i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1926,7 +1926,7 @@ define @intrinsic_vmsgtu_vi_nxv16i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1961,7 +1961,7 @@ define @intrinsic_vmsgtu_vi_nxv32i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1996,7 +1996,7 @@ define @intrinsic_vmsgtu_vi_nxv1i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2031,7 +2031,7 @@ define @intrinsic_vmsgtu_vi_nxv2i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2066,7 +2066,7 @@ define @intrinsic_vmsgtu_vi_nxv4i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2101,7 +2101,7 @@ define @intrinsic_vmsgtu_vi_nxv8i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2136,7 +2136,7 @@ define @intrinsic_vmsgtu_vi_nxv16i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2171,7 +2171,7 @@ define @intrinsic_vmsgtu_vi_nxv1i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2206,7 +2206,7 @@ define @intrinsic_vmsgtu_vi_nxv2i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2241,7 +2241,7 @@ define @intrinsic_vmsgtu_vi_nxv4i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2276,7 +2276,7 @@ define @intrinsic_vmsgtu_vi_nxv8i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2311,7 +2311,7 @@ define @intrinsic_vmsgtu_vi_nxv1i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2346,7 +2346,7 @@ define @intrinsic_vmsgtu_vi_nxv2i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2381,7 +2381,7 @@ define @intrinsic_vmsgtu_vi_nxv4i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmsgtu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsif.ll b/llvm/test/CodeGen/RISCV/rvv/vmsif.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsif.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsif.ll @@ -10,7 +10,7 @@ define @intrinsic_vmsif_m_nxv1i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsif_m_nxv1i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmsif.m v8, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: ret @@ -52,7 +52,7 @@ define @intrinsic_vmsif_m_nxv2i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsif_m_nxv2i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmsif.m v8, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: ret @@ -94,7 +94,7 @@ define @intrinsic_vmsif_m_nxv4i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsif_m_nxv4i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmsif.m v8, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: ret @@ -136,7 +136,7 @@ define @intrinsic_vmsif_m_nxv8i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsif_m_nxv8i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmsif.m v8, v0 ; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: ret @@ -178,7 +178,7 @@ define @intrinsic_vmsif_m_nxv16i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsif_m_nxv16i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmsif.m v8, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: ret @@ -220,7 +220,7 @@ define @intrinsic_vmsif_m_nxv32i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsif_m_nxv32i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmsif.m v8, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: ret @@ -262,7 +262,7 @@ define @intrinsic_vmsif_m_nxv64i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsif_m_nxv64i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmsif.m v8, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll @@ -9,7 +9,7 @@ define @intrinsic_vmsle_vv_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmsle.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -61,7 +61,7 @@ define @intrinsic_vmsle_vv_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmsle.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -113,7 +113,7 @@ define @intrinsic_vmsle_vv_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmsle.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -165,7 +165,7 @@ define @intrinsic_vmsle_vv_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmsle.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define @intrinsic_vmsle_vv_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmsle.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -269,7 +269,7 @@ define @intrinsic_vmsle_vv_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmsle.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -321,7 +321,7 @@ define @intrinsic_vmsle_vv_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmsle.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -373,7 +373,7 @@ define @intrinsic_vmsle_vv_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmsle.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -425,7 +425,7 @@ define @intrinsic_vmsle_vv_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmsle.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -477,7 +477,7 @@ define @intrinsic_vmsle_vv_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmsle.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -529,7 +529,7 @@ define @intrinsic_vmsle_vv_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmsle.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -581,7 +581,7 @@ define @intrinsic_vmsle_vv_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmsle.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -633,7 +633,7 @@ define @intrinsic_vmsle_vv_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmsle.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -685,7 +685,7 @@ define @intrinsic_vmsle_vv_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmsle.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -737,7 +737,7 @@ define @intrinsic_vmsle_vv_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmsle.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -789,7 +789,7 @@ define @intrinsic_vmsle_vv_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmsle.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -841,7 +841,7 @@ define @intrinsic_vmsle_vv_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmsle.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -893,7 +893,7 @@ define @intrinsic_vmsle_vv_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmsle.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -945,7 +945,7 @@ define @intrinsic_vmsle_vx_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmsle.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -992,7 +992,7 @@ define @intrinsic_vmsle_vx_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmsle.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1039,7 +1039,7 @@ define @intrinsic_vmsle_vx_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmsle.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1086,7 +1086,7 @@ define @intrinsic_vmsle_vx_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmsle.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1133,7 +1133,7 @@ define @intrinsic_vmsle_vx_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmsle.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1180,7 +1180,7 @@ define @intrinsic_vmsle_vx_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmsle.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1227,7 +1227,7 @@ define @intrinsic_vmsle_vx_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vmsle.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1274,7 +1274,7 @@ define @intrinsic_vmsle_vx_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmsle.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1321,7 +1321,7 @@ define @intrinsic_vmsle_vx_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmsle.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1368,7 +1368,7 @@ define @intrinsic_vmsle_vx_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmsle.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1415,7 +1415,7 @@ define @intrinsic_vmsle_vx_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vmsle.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1462,7 +1462,7 @@ define @intrinsic_vmsle_vx_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmsle.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1509,7 +1509,7 @@ define @intrinsic_vmsle_vx_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmsle.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1556,7 +1556,7 @@ define @intrinsic_vmsle_vx_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmsle.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1603,7 +1603,7 @@ define @intrinsic_vmsle_vx_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmsle.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1654,7 +1654,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vmsle.vv v0, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 @@ -1713,7 +1713,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vmsle.vv v0, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 @@ -1772,7 +1772,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmsle.vv v0, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 @@ -1822,7 +1822,7 @@ define @intrinsic_vmsle_vi_nxv1i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1857,7 +1857,7 @@ define @intrinsic_vmsle_vi_nxv2i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1892,7 +1892,7 @@ define @intrinsic_vmsle_vi_nxv4i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1927,7 +1927,7 @@ define @intrinsic_vmsle_vi_nxv8i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1962,7 +1962,7 @@ define @intrinsic_vmsle_vi_nxv16i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1997,7 +1997,7 @@ define @intrinsic_vmsle_vi_nxv32i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2032,7 +2032,7 @@ define @intrinsic_vmsle_vi_nxv1i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2067,7 +2067,7 @@ define @intrinsic_vmsle_vi_nxv2i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2102,7 +2102,7 @@ define @intrinsic_vmsle_vi_nxv4i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2137,7 +2137,7 @@ define @intrinsic_vmsle_vi_nxv8i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2172,7 +2172,7 @@ define @intrinsic_vmsle_vi_nxv16i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2207,7 +2207,7 @@ define @intrinsic_vmsle_vi_nxv1i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2242,7 +2242,7 @@ define @intrinsic_vmsle_vi_nxv2i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2277,7 +2277,7 @@ define @intrinsic_vmsle_vi_nxv4i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2312,7 +2312,7 @@ define @intrinsic_vmsle_vi_nxv8i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2347,7 +2347,7 @@ define @intrinsic_vmsle_vi_nxv1i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2382,7 +2382,7 @@ define @intrinsic_vmsle_vi_nxv2i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2417,7 +2417,7 @@ define @intrinsic_vmsle_vi_nxv4i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 9 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsle-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsle-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsle-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsle-rv64.ll @@ -9,7 +9,7 @@ define @intrinsic_vmsle_vv_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmsle.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -61,7 +61,7 @@ define @intrinsic_vmsle_vv_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmsle.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -113,7 +113,7 @@ define @intrinsic_vmsle_vv_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmsle.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -165,7 +165,7 @@ define @intrinsic_vmsle_vv_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmsle.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define @intrinsic_vmsle_vv_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmsle.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -269,7 +269,7 @@ define @intrinsic_vmsle_vv_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmsle.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -321,7 +321,7 @@ define @intrinsic_vmsle_vv_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmsle.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -373,7 +373,7 @@ define @intrinsic_vmsle_vv_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmsle.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -425,7 +425,7 @@ define @intrinsic_vmsle_vv_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmsle.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -477,7 +477,7 @@ define @intrinsic_vmsle_vv_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmsle.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -529,7 +529,7 @@ define @intrinsic_vmsle_vv_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmsle.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -581,7 +581,7 @@ define @intrinsic_vmsle_vv_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmsle.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -633,7 +633,7 @@ define @intrinsic_vmsle_vv_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmsle.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -685,7 +685,7 @@ define @intrinsic_vmsle_vv_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmsle.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -737,7 +737,7 @@ define @intrinsic_vmsle_vv_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmsle.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -789,7 +789,7 @@ define @intrinsic_vmsle_vv_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmsle.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -841,7 +841,7 @@ define @intrinsic_vmsle_vv_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmsle.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -893,7 +893,7 @@ define @intrinsic_vmsle_vv_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmsle.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -945,7 +945,7 @@ define @intrinsic_vmsle_vx_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmsle.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -992,7 +992,7 @@ define @intrinsic_vmsle_vx_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmsle.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1039,7 +1039,7 @@ define @intrinsic_vmsle_vx_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmsle.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1086,7 +1086,7 @@ define @intrinsic_vmsle_vx_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmsle.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1133,7 +1133,7 @@ define @intrinsic_vmsle_vx_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmsle.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1180,7 +1180,7 @@ define @intrinsic_vmsle_vx_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmsle.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1227,7 +1227,7 @@ define @intrinsic_vmsle_vx_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vmsle.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1274,7 +1274,7 @@ define @intrinsic_vmsle_vx_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmsle.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1321,7 +1321,7 @@ define @intrinsic_vmsle_vx_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmsle.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1368,7 +1368,7 @@ define @intrinsic_vmsle_vx_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmsle.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1415,7 +1415,7 @@ define @intrinsic_vmsle_vx_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vmsle.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1462,7 +1462,7 @@ define @intrinsic_vmsle_vx_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmsle.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1509,7 +1509,7 @@ define @intrinsic_vmsle_vx_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmsle.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1556,7 +1556,7 @@ define @intrinsic_vmsle_vx_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmsle.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1603,7 +1603,7 @@ define @intrinsic_vmsle_vx_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmsle.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1650,7 +1650,7 @@ define @intrinsic_vmsle_vx_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vmsle.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1697,7 +1697,7 @@ define @intrinsic_vmsle_vx_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vmsle.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1744,7 +1744,7 @@ define @intrinsic_vmsle_vx_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vx_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vmsle.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1786,7 +1786,7 @@ define @intrinsic_vmsle_vi_nxv1i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1821,7 +1821,7 @@ define @intrinsic_vmsle_vi_nxv2i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1856,7 +1856,7 @@ define @intrinsic_vmsle_vi_nxv4i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1891,7 +1891,7 @@ define @intrinsic_vmsle_vi_nxv8i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1926,7 +1926,7 @@ define @intrinsic_vmsle_vi_nxv16i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1961,7 +1961,7 @@ define @intrinsic_vmsle_vi_nxv32i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1996,7 +1996,7 @@ define @intrinsic_vmsle_vi_nxv1i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2031,7 +2031,7 @@ define @intrinsic_vmsle_vi_nxv2i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2066,7 +2066,7 @@ define @intrinsic_vmsle_vi_nxv4i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2101,7 +2101,7 @@ define @intrinsic_vmsle_vi_nxv8i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2136,7 +2136,7 @@ define @intrinsic_vmsle_vi_nxv16i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2171,7 +2171,7 @@ define @intrinsic_vmsle_vi_nxv1i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2206,7 +2206,7 @@ define @intrinsic_vmsle_vi_nxv2i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2241,7 +2241,7 @@ define @intrinsic_vmsle_vi_nxv4i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2276,7 +2276,7 @@ define @intrinsic_vmsle_vi_nxv8i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2311,7 +2311,7 @@ define @intrinsic_vmsle_vi_nxv1i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2346,7 +2346,7 @@ define @intrinsic_vmsle_vi_nxv2i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2381,7 +2381,7 @@ define @intrinsic_vmsle_vi_nxv4i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsle_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 9 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll @@ -9,7 +9,7 @@ define @intrinsic_vmsleu_vv_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -61,7 +61,7 @@ define @intrinsic_vmsleu_vv_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -113,7 +113,7 @@ define @intrinsic_vmsleu_vv_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -165,7 +165,7 @@ define @intrinsic_vmsleu_vv_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define @intrinsic_vmsleu_vv_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -269,7 +269,7 @@ define @intrinsic_vmsleu_vv_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -321,7 +321,7 @@ define @intrinsic_vmsleu_vv_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -373,7 +373,7 @@ define @intrinsic_vmsleu_vv_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -425,7 +425,7 @@ define @intrinsic_vmsleu_vv_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -477,7 +477,7 @@ define @intrinsic_vmsleu_vv_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -529,7 +529,7 @@ define @intrinsic_vmsleu_vv_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -581,7 +581,7 @@ define @intrinsic_vmsleu_vv_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -633,7 +633,7 @@ define @intrinsic_vmsleu_vv_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -685,7 +685,7 @@ define @intrinsic_vmsleu_vv_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -737,7 +737,7 @@ define @intrinsic_vmsleu_vv_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -789,7 +789,7 @@ define @intrinsic_vmsleu_vv_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -841,7 +841,7 @@ define @intrinsic_vmsleu_vv_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -893,7 +893,7 @@ define @intrinsic_vmsleu_vv_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -945,7 +945,7 @@ define @intrinsic_vmsleu_vx_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmsleu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -992,7 +992,7 @@ define @intrinsic_vmsleu_vx_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmsleu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1039,7 +1039,7 @@ define @intrinsic_vmsleu_vx_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmsleu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1086,7 +1086,7 @@ define @intrinsic_vmsleu_vx_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmsleu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1133,7 +1133,7 @@ define @intrinsic_vmsleu_vx_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmsleu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1180,7 +1180,7 @@ define @intrinsic_vmsleu_vx_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmsleu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1227,7 +1227,7 @@ define @intrinsic_vmsleu_vx_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vmsleu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1274,7 +1274,7 @@ define @intrinsic_vmsleu_vx_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmsleu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1321,7 +1321,7 @@ define @intrinsic_vmsleu_vx_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmsleu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1368,7 +1368,7 @@ define @intrinsic_vmsleu_vx_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmsleu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1415,7 +1415,7 @@ define @intrinsic_vmsleu_vx_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vmsleu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1462,7 +1462,7 @@ define @intrinsic_vmsleu_vx_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmsleu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1509,7 +1509,7 @@ define @intrinsic_vmsleu_vx_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmsleu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1556,7 +1556,7 @@ define @intrinsic_vmsleu_vx_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmsleu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1603,7 +1603,7 @@ define @intrinsic_vmsleu_vx_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmsleu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1654,7 +1654,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vmsleu.vv v0, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 @@ -1713,7 +1713,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vmsleu.vv v0, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 @@ -1772,7 +1772,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmsleu.vv v0, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 @@ -1822,7 +1822,7 @@ define @intrinsic_vmsleu_vi_nxv1i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1857,7 +1857,7 @@ define @intrinsic_vmsleu_vi_nxv2i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1892,7 +1892,7 @@ define @intrinsic_vmsleu_vi_nxv4i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1927,7 +1927,7 @@ define @intrinsic_vmsleu_vi_nxv8i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1962,7 +1962,7 @@ define @intrinsic_vmsleu_vi_nxv16i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1997,7 +1997,7 @@ define @intrinsic_vmsleu_vi_nxv32i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2032,7 +2032,7 @@ define @intrinsic_vmsleu_vi_nxv1i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2067,7 +2067,7 @@ define @intrinsic_vmsleu_vi_nxv2i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2102,7 +2102,7 @@ define @intrinsic_vmsleu_vi_nxv4i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2137,7 +2137,7 @@ define @intrinsic_vmsleu_vi_nxv8i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2172,7 +2172,7 @@ define @intrinsic_vmsleu_vi_nxv16i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2207,7 +2207,7 @@ define @intrinsic_vmsleu_vi_nxv1i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2242,7 +2242,7 @@ define @intrinsic_vmsleu_vi_nxv2i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2277,7 +2277,7 @@ define @intrinsic_vmsleu_vi_nxv4i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2312,7 +2312,7 @@ define @intrinsic_vmsleu_vi_nxv8i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2347,7 +2347,7 @@ define @intrinsic_vmsleu_vi_nxv1i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2382,7 +2382,7 @@ define @intrinsic_vmsleu_vi_nxv2i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2417,7 +2417,7 @@ define @intrinsic_vmsleu_vi_nxv4i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv64.ll @@ -9,7 +9,7 @@ define @intrinsic_vmsleu_vv_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -61,7 +61,7 @@ define @intrinsic_vmsleu_vv_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -113,7 +113,7 @@ define @intrinsic_vmsleu_vv_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -165,7 +165,7 @@ define @intrinsic_vmsleu_vv_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define @intrinsic_vmsleu_vv_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -269,7 +269,7 @@ define @intrinsic_vmsleu_vv_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -321,7 +321,7 @@ define @intrinsic_vmsleu_vv_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -373,7 +373,7 @@ define @intrinsic_vmsleu_vv_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -425,7 +425,7 @@ define @intrinsic_vmsleu_vv_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -477,7 +477,7 @@ define @intrinsic_vmsleu_vv_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -529,7 +529,7 @@ define @intrinsic_vmsleu_vv_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -581,7 +581,7 @@ define @intrinsic_vmsleu_vv_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -633,7 +633,7 @@ define @intrinsic_vmsleu_vv_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -685,7 +685,7 @@ define @intrinsic_vmsleu_vv_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -737,7 +737,7 @@ define @intrinsic_vmsleu_vv_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -789,7 +789,7 @@ define @intrinsic_vmsleu_vv_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -841,7 +841,7 @@ define @intrinsic_vmsleu_vv_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -893,7 +893,7 @@ define @intrinsic_vmsleu_vv_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmsleu.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -945,7 +945,7 @@ define @intrinsic_vmsleu_vx_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmsleu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -992,7 +992,7 @@ define @intrinsic_vmsleu_vx_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmsleu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1039,7 +1039,7 @@ define @intrinsic_vmsleu_vx_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmsleu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1086,7 +1086,7 @@ define @intrinsic_vmsleu_vx_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmsleu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1133,7 +1133,7 @@ define @intrinsic_vmsleu_vx_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmsleu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1180,7 +1180,7 @@ define @intrinsic_vmsleu_vx_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmsleu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1227,7 +1227,7 @@ define @intrinsic_vmsleu_vx_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vmsleu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1274,7 +1274,7 @@ define @intrinsic_vmsleu_vx_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmsleu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1321,7 +1321,7 @@ define @intrinsic_vmsleu_vx_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmsleu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1368,7 +1368,7 @@ define @intrinsic_vmsleu_vx_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmsleu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1415,7 +1415,7 @@ define @intrinsic_vmsleu_vx_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vmsleu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1462,7 +1462,7 @@ define @intrinsic_vmsleu_vx_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmsleu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1509,7 +1509,7 @@ define @intrinsic_vmsleu_vx_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmsleu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1556,7 +1556,7 @@ define @intrinsic_vmsleu_vx_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmsleu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1603,7 +1603,7 @@ define @intrinsic_vmsleu_vx_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmsleu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1650,7 +1650,7 @@ define @intrinsic_vmsleu_vx_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vmsleu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1697,7 +1697,7 @@ define @intrinsic_vmsleu_vx_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vmsleu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1744,7 +1744,7 @@ define @intrinsic_vmsleu_vx_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vx_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vmsleu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1786,7 +1786,7 @@ define @intrinsic_vmsleu_vi_nxv1i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1821,7 +1821,7 @@ define @intrinsic_vmsleu_vi_nxv2i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1856,7 +1856,7 @@ define @intrinsic_vmsleu_vi_nxv4i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1891,7 +1891,7 @@ define @intrinsic_vmsleu_vi_nxv8i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1926,7 +1926,7 @@ define @intrinsic_vmsleu_vi_nxv16i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1961,7 +1961,7 @@ define @intrinsic_vmsleu_vi_nxv32i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1996,7 +1996,7 @@ define @intrinsic_vmsleu_vi_nxv1i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2031,7 +2031,7 @@ define @intrinsic_vmsleu_vi_nxv2i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2066,7 +2066,7 @@ define @intrinsic_vmsleu_vi_nxv4i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2101,7 +2101,7 @@ define @intrinsic_vmsleu_vi_nxv8i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2136,7 +2136,7 @@ define @intrinsic_vmsleu_vi_nxv16i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2171,7 +2171,7 @@ define @intrinsic_vmsleu_vi_nxv1i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2206,7 +2206,7 @@ define @intrinsic_vmsleu_vi_nxv2i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2241,7 +2241,7 @@ define @intrinsic_vmsleu_vi_nxv4i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2276,7 +2276,7 @@ define @intrinsic_vmsleu_vi_nxv8i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2311,7 +2311,7 @@ define @intrinsic_vmsleu_vi_nxv1i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2346,7 +2346,7 @@ define @intrinsic_vmsleu_vi_nxv2i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2381,7 +2381,7 @@ define @intrinsic_vmsleu_vi_nxv4i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsleu_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll @@ -9,7 +9,7 @@ define @intrinsic_vmslt_vv_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmslt.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -61,7 +61,7 @@ define @intrinsic_vmslt_vv_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmslt.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -113,7 +113,7 @@ define @intrinsic_vmslt_vv_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmslt.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -165,7 +165,7 @@ define @intrinsic_vmslt_vv_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmslt.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define @intrinsic_vmslt_vv_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmslt.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -269,7 +269,7 @@ define @intrinsic_vmslt_vv_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmslt.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -321,7 +321,7 @@ define @intrinsic_vmslt_vv_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmslt.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -373,7 +373,7 @@ define @intrinsic_vmslt_vv_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmslt.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -425,7 +425,7 @@ define @intrinsic_vmslt_vv_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmslt.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -477,7 +477,7 @@ define @intrinsic_vmslt_vv_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmslt.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -529,7 +529,7 @@ define @intrinsic_vmslt_vv_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmslt.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -581,7 +581,7 @@ define @intrinsic_vmslt_vv_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmslt.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -633,7 +633,7 @@ define @intrinsic_vmslt_vv_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmslt.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -685,7 +685,7 @@ define @intrinsic_vmslt_vv_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmslt.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -737,7 +737,7 @@ define @intrinsic_vmslt_vv_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmslt.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -789,7 +789,7 @@ define @intrinsic_vmslt_vv_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmslt.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -841,7 +841,7 @@ define @intrinsic_vmslt_vv_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmslt.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -893,7 +893,7 @@ define @intrinsic_vmslt_vv_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmslt.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -945,7 +945,7 @@ define @intrinsic_vmslt_vx_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -992,7 +992,7 @@ define @intrinsic_vmslt_vx_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1039,7 +1039,7 @@ define @intrinsic_vmslt_vx_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1086,7 +1086,7 @@ define @intrinsic_vmslt_vx_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1133,7 +1133,7 @@ define @intrinsic_vmslt_vx_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1180,7 +1180,7 @@ define @intrinsic_vmslt_vx_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1227,7 +1227,7 @@ define @intrinsic_vmslt_vx_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1274,7 +1274,7 @@ define @intrinsic_vmslt_vx_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1321,7 +1321,7 @@ define @intrinsic_vmslt_vx_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1368,7 +1368,7 @@ define @intrinsic_vmslt_vx_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1415,7 +1415,7 @@ define @intrinsic_vmslt_vx_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1462,7 +1462,7 @@ define @intrinsic_vmslt_vx_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1509,7 +1509,7 @@ define @intrinsic_vmslt_vx_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1556,7 +1556,7 @@ define @intrinsic_vmslt_vx_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1603,7 +1603,7 @@ define @intrinsic_vmslt_vx_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1654,7 +1654,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vmslt.vv v0, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 @@ -1713,7 +1713,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vmslt.vv v0, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 @@ -1772,7 +1772,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmslt.vv v0, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 @@ -1822,7 +1822,7 @@ define @intrinsic_vmslt_vi_nxv1i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, -16 ; CHECK-NEXT: ret entry: @@ -1857,7 +1857,7 @@ define @intrinsic_vmslt_vi_nxv2i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, -14 ; CHECK-NEXT: ret entry: @@ -1892,7 +1892,7 @@ define @intrinsic_vmslt_vi_nxv4i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, -12 ; CHECK-NEXT: ret entry: @@ -1927,7 +1927,7 @@ define @intrinsic_vmslt_vi_nxv8i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, -10 ; CHECK-NEXT: ret entry: @@ -1962,7 +1962,7 @@ define @intrinsic_vmslt_vi_nxv16i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, -8 ; CHECK-NEXT: ret entry: @@ -1997,7 +1997,7 @@ define @intrinsic_vmslt_vi_nxv32i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, -6 ; CHECK-NEXT: ret entry: @@ -2032,7 +2032,7 @@ define @intrinsic_vmslt_vi_nxv1i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, -4 ; CHECK-NEXT: ret entry: @@ -2067,7 +2067,7 @@ define @intrinsic_vmslt_vi_nxv2i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, -2 ; CHECK-NEXT: ret entry: @@ -2102,7 +2102,7 @@ define @intrinsic_vmslt_vi_nxv4i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmslt.vx v0, v8, zero ; CHECK-NEXT: ret entry: @@ -2137,7 +2137,7 @@ define @intrinsic_vmslt_vi_nxv8i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 1 ; CHECK-NEXT: ret entry: @@ -2172,7 +2172,7 @@ define @intrinsic_vmslt_vi_nxv16i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 3 ; CHECK-NEXT: ret entry: @@ -2207,7 +2207,7 @@ define @intrinsic_vmslt_vi_nxv1i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 5 ; CHECK-NEXT: ret entry: @@ -2242,7 +2242,7 @@ define @intrinsic_vmslt_vi_nxv2i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 7 ; CHECK-NEXT: ret entry: @@ -2277,7 +2277,7 @@ define @intrinsic_vmslt_vi_nxv4i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2312,7 +2312,7 @@ define @intrinsic_vmslt_vi_nxv8i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 11 ; CHECK-NEXT: ret entry: @@ -2347,7 +2347,7 @@ define @intrinsic_vmslt_vi_nxv1i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 8 ; CHECK-NEXT: ret entry: @@ -2382,7 +2382,7 @@ define @intrinsic_vmslt_vi_nxv2i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 8 ; CHECK-NEXT: ret entry: @@ -2417,7 +2417,7 @@ define @intrinsic_vmslt_vi_nxv4i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 8 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll @@ -9,7 +9,7 @@ define @intrinsic_vmslt_vv_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmslt.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -61,7 +61,7 @@ define @intrinsic_vmslt_vv_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmslt.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -113,7 +113,7 @@ define @intrinsic_vmslt_vv_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmslt.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -165,7 +165,7 @@ define @intrinsic_vmslt_vv_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmslt.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define @intrinsic_vmslt_vv_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmslt.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -269,7 +269,7 @@ define @intrinsic_vmslt_vv_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmslt.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -321,7 +321,7 @@ define @intrinsic_vmslt_vv_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmslt.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -373,7 +373,7 @@ define @intrinsic_vmslt_vv_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmslt.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -425,7 +425,7 @@ define @intrinsic_vmslt_vv_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmslt.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -477,7 +477,7 @@ define @intrinsic_vmslt_vv_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmslt.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -529,7 +529,7 @@ define @intrinsic_vmslt_vv_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmslt.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -581,7 +581,7 @@ define @intrinsic_vmslt_vv_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmslt.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -633,7 +633,7 @@ define @intrinsic_vmslt_vv_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmslt.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -685,7 +685,7 @@ define @intrinsic_vmslt_vv_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmslt.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -737,7 +737,7 @@ define @intrinsic_vmslt_vv_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmslt.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -789,7 +789,7 @@ define @intrinsic_vmslt_vv_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmslt.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -841,7 +841,7 @@ define @intrinsic_vmslt_vv_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmslt.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -893,7 +893,7 @@ define @intrinsic_vmslt_vv_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmslt.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -945,7 +945,7 @@ define @intrinsic_vmslt_vx_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -992,7 +992,7 @@ define @intrinsic_vmslt_vx_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1039,7 +1039,7 @@ define @intrinsic_vmslt_vx_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1086,7 +1086,7 @@ define @intrinsic_vmslt_vx_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1133,7 +1133,7 @@ define @intrinsic_vmslt_vx_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1180,7 +1180,7 @@ define @intrinsic_vmslt_vx_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1227,7 +1227,7 @@ define @intrinsic_vmslt_vx_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1274,7 +1274,7 @@ define @intrinsic_vmslt_vx_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1321,7 +1321,7 @@ define @intrinsic_vmslt_vx_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1368,7 +1368,7 @@ define @intrinsic_vmslt_vx_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1415,7 +1415,7 @@ define @intrinsic_vmslt_vx_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1462,7 +1462,7 @@ define @intrinsic_vmslt_vx_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1509,7 +1509,7 @@ define @intrinsic_vmslt_vx_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1556,7 +1556,7 @@ define @intrinsic_vmslt_vx_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1603,7 +1603,7 @@ define @intrinsic_vmslt_vx_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1650,7 +1650,7 @@ define @intrinsic_vmslt_vx_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1697,7 +1697,7 @@ define @intrinsic_vmslt_vx_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1744,7 +1744,7 @@ define @intrinsic_vmslt_vx_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vmslt.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1786,7 +1786,7 @@ define @intrinsic_vmslt_vi_nxv1i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, -16 ; CHECK-NEXT: ret entry: @@ -1821,7 +1821,7 @@ define @intrinsic_vmslt_vi_nxv2i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, -14 ; CHECK-NEXT: ret entry: @@ -1856,7 +1856,7 @@ define @intrinsic_vmslt_vi_nxv4i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, -12 ; CHECK-NEXT: ret entry: @@ -1891,7 +1891,7 @@ define @intrinsic_vmslt_vi_nxv8i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, -10 ; CHECK-NEXT: ret entry: @@ -1926,7 +1926,7 @@ define @intrinsic_vmslt_vi_nxv16i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, -8 ; CHECK-NEXT: ret entry: @@ -1961,7 +1961,7 @@ define @intrinsic_vmslt_vi_nxv32i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, -6 ; CHECK-NEXT: ret entry: @@ -1996,7 +1996,7 @@ define @intrinsic_vmslt_vi_nxv1i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, -4 ; CHECK-NEXT: ret entry: @@ -2031,7 +2031,7 @@ define @intrinsic_vmslt_vi_nxv2i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, -2 ; CHECK-NEXT: ret entry: @@ -2066,7 +2066,7 @@ define @intrinsic_vmslt_vi_nxv4i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmslt.vx v0, v8, zero ; CHECK-NEXT: ret entry: @@ -2101,7 +2101,7 @@ define @intrinsic_vmslt_vi_nxv8i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 1 ; CHECK-NEXT: ret entry: @@ -2136,7 +2136,7 @@ define @intrinsic_vmslt_vi_nxv16i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 3 ; CHECK-NEXT: ret entry: @@ -2171,7 +2171,7 @@ define @intrinsic_vmslt_vi_nxv1i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 5 ; CHECK-NEXT: ret entry: @@ -2206,7 +2206,7 @@ define @intrinsic_vmslt_vi_nxv2i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 7 ; CHECK-NEXT: ret entry: @@ -2241,7 +2241,7 @@ define @intrinsic_vmslt_vi_nxv4i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2276,7 +2276,7 @@ define @intrinsic_vmslt_vi_nxv8i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 11 ; CHECK-NEXT: ret entry: @@ -2311,7 +2311,7 @@ define @intrinsic_vmslt_vi_nxv1i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 13 ; CHECK-NEXT: ret entry: @@ -2346,7 +2346,7 @@ define @intrinsic_vmslt_vi_nxv2i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, 15 ; CHECK-NEXT: ret entry: @@ -2381,7 +2381,7 @@ define @intrinsic_vmslt_vi_nxv4i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmsle.vi v0, v8, -15 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll @@ -9,7 +9,7 @@ define @intrinsic_vmsltu_vv_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -61,7 +61,7 @@ define @intrinsic_vmsltu_vv_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -113,7 +113,7 @@ define @intrinsic_vmsltu_vv_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -165,7 +165,7 @@ define @intrinsic_vmsltu_vv_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define @intrinsic_vmsltu_vv_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -269,7 +269,7 @@ define @intrinsic_vmsltu_vv_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -321,7 +321,7 @@ define @intrinsic_vmsltu_vv_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -373,7 +373,7 @@ define @intrinsic_vmsltu_vv_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -425,7 +425,7 @@ define @intrinsic_vmsltu_vv_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -477,7 +477,7 @@ define @intrinsic_vmsltu_vv_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -529,7 +529,7 @@ define @intrinsic_vmsltu_vv_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -581,7 +581,7 @@ define @intrinsic_vmsltu_vv_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -633,7 +633,7 @@ define @intrinsic_vmsltu_vv_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -685,7 +685,7 @@ define @intrinsic_vmsltu_vv_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -737,7 +737,7 @@ define @intrinsic_vmsltu_vv_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -789,7 +789,7 @@ define @intrinsic_vmsltu_vv_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -841,7 +841,7 @@ define @intrinsic_vmsltu_vv_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -893,7 +893,7 @@ define @intrinsic_vmsltu_vv_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -945,7 +945,7 @@ define @intrinsic_vmsltu_vx_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -992,7 +992,7 @@ define @intrinsic_vmsltu_vx_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1039,7 +1039,7 @@ define @intrinsic_vmsltu_vx_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1086,7 +1086,7 @@ define @intrinsic_vmsltu_vx_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1133,7 +1133,7 @@ define @intrinsic_vmsltu_vx_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1180,7 +1180,7 @@ define @intrinsic_vmsltu_vx_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1227,7 +1227,7 @@ define @intrinsic_vmsltu_vx_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1274,7 +1274,7 @@ define @intrinsic_vmsltu_vx_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1321,7 +1321,7 @@ define @intrinsic_vmsltu_vx_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1368,7 +1368,7 @@ define @intrinsic_vmsltu_vx_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1415,7 +1415,7 @@ define @intrinsic_vmsltu_vx_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1462,7 +1462,7 @@ define @intrinsic_vmsltu_vx_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1509,7 +1509,7 @@ define @intrinsic_vmsltu_vx_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1556,7 +1556,7 @@ define @intrinsic_vmsltu_vx_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1603,7 +1603,7 @@ define @intrinsic_vmsltu_vx_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1654,7 +1654,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vmsltu.vv v0, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 @@ -1713,7 +1713,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vmsltu.vv v0, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 @@ -1772,7 +1772,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmsltu.vv v0, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 @@ -1822,7 +1822,7 @@ define @intrinsic_vmsltu_vi_nxv1i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, -16 ; CHECK-NEXT: ret entry: @@ -1857,7 +1857,7 @@ define @intrinsic_vmsltu_vi_nxv2i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, -14 ; CHECK-NEXT: ret entry: @@ -1892,7 +1892,7 @@ define @intrinsic_vmsltu_vi_nxv4i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, -12 ; CHECK-NEXT: ret entry: @@ -1927,7 +1927,7 @@ define @intrinsic_vmsltu_vi_nxv8i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, -10 ; CHECK-NEXT: ret entry: @@ -1962,7 +1962,7 @@ define @intrinsic_vmsltu_vi_nxv16i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, -8 ; CHECK-NEXT: ret entry: @@ -1997,7 +1997,7 @@ define @intrinsic_vmsltu_vi_nxv32i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, -6 ; CHECK-NEXT: ret entry: @@ -2032,7 +2032,7 @@ define @intrinsic_vmsltu_vi_nxv1i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, -4 ; CHECK-NEXT: ret entry: @@ -2067,7 +2067,7 @@ define @intrinsic_vmsltu_vi_nxv2i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, -2 ; CHECK-NEXT: ret entry: @@ -2102,7 +2102,7 @@ define @intrinsic_vmsltu_vi_nxv4i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmsltu.vx v0, v8, zero ; CHECK-NEXT: ret entry: @@ -2137,7 +2137,7 @@ define @intrinsic_vmsltu_vi_nxv8i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 1 ; CHECK-NEXT: ret entry: @@ -2172,7 +2172,7 @@ define @intrinsic_vmsltu_vi_nxv16i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 3 ; CHECK-NEXT: ret entry: @@ -2207,7 +2207,7 @@ define @intrinsic_vmsltu_vi_nxv1i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 5 ; CHECK-NEXT: ret entry: @@ -2242,7 +2242,7 @@ define @intrinsic_vmsltu_vi_nxv2i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 7 ; CHECK-NEXT: ret entry: @@ -2277,7 +2277,7 @@ define @intrinsic_vmsltu_vi_nxv4i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2312,7 +2312,7 @@ define @intrinsic_vmsltu_vi_nxv8i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 11 ; CHECK-NEXT: ret entry: @@ -2347,7 +2347,7 @@ define @intrinsic_vmsltu_vi_nxv1i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 13 ; CHECK-NEXT: ret entry: @@ -2382,7 +2382,7 @@ define @intrinsic_vmsltu_vi_nxv2i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 15 ; CHECK-NEXT: ret entry: @@ -2417,7 +2417,7 @@ define @intrinsic_vmsltu_vi_nxv4i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, -15 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll @@ -9,7 +9,7 @@ define @intrinsic_vmsltu_vv_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -61,7 +61,7 @@ define @intrinsic_vmsltu_vv_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -113,7 +113,7 @@ define @intrinsic_vmsltu_vv_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -165,7 +165,7 @@ define @intrinsic_vmsltu_vv_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define @intrinsic_vmsltu_vv_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -269,7 +269,7 @@ define @intrinsic_vmsltu_vv_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -321,7 +321,7 @@ define @intrinsic_vmsltu_vv_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -373,7 +373,7 @@ define @intrinsic_vmsltu_vv_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -425,7 +425,7 @@ define @intrinsic_vmsltu_vv_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -477,7 +477,7 @@ define @intrinsic_vmsltu_vv_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -529,7 +529,7 @@ define @intrinsic_vmsltu_vv_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -581,7 +581,7 @@ define @intrinsic_vmsltu_vv_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -633,7 +633,7 @@ define @intrinsic_vmsltu_vv_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -685,7 +685,7 @@ define @intrinsic_vmsltu_vv_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -737,7 +737,7 @@ define @intrinsic_vmsltu_vv_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -789,7 +789,7 @@ define @intrinsic_vmsltu_vv_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -841,7 +841,7 @@ define @intrinsic_vmsltu_vv_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -893,7 +893,7 @@ define @intrinsic_vmsltu_vv_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmsltu.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -945,7 +945,7 @@ define @intrinsic_vmsltu_vx_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -992,7 +992,7 @@ define @intrinsic_vmsltu_vx_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1039,7 +1039,7 @@ define @intrinsic_vmsltu_vx_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1086,7 +1086,7 @@ define @intrinsic_vmsltu_vx_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1133,7 +1133,7 @@ define @intrinsic_vmsltu_vx_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1180,7 +1180,7 @@ define @intrinsic_vmsltu_vx_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1227,7 +1227,7 @@ define @intrinsic_vmsltu_vx_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1274,7 +1274,7 @@ define @intrinsic_vmsltu_vx_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1321,7 +1321,7 @@ define @intrinsic_vmsltu_vx_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1368,7 +1368,7 @@ define @intrinsic_vmsltu_vx_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1415,7 +1415,7 @@ define @intrinsic_vmsltu_vx_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1462,7 +1462,7 @@ define @intrinsic_vmsltu_vx_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1509,7 +1509,7 @@ define @intrinsic_vmsltu_vx_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1556,7 +1556,7 @@ define @intrinsic_vmsltu_vx_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1603,7 +1603,7 @@ define @intrinsic_vmsltu_vx_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1650,7 +1650,7 @@ define @intrinsic_vmsltu_vx_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1697,7 +1697,7 @@ define @intrinsic_vmsltu_vx_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1744,7 +1744,7 @@ define @intrinsic_vmsltu_vx_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vx_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vmsltu.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1786,7 +1786,7 @@ define @intrinsic_vmsltu_vi_nxv1i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, -16 ; CHECK-NEXT: ret entry: @@ -1821,7 +1821,7 @@ define @intrinsic_vmsltu_vi_nxv2i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, -14 ; CHECK-NEXT: ret entry: @@ -1856,7 +1856,7 @@ define @intrinsic_vmsltu_vi_nxv4i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, -12 ; CHECK-NEXT: ret entry: @@ -1891,7 +1891,7 @@ define @intrinsic_vmsltu_vi_nxv8i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, -10 ; CHECK-NEXT: ret entry: @@ -1926,7 +1926,7 @@ define @intrinsic_vmsltu_vi_nxv16i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, -8 ; CHECK-NEXT: ret entry: @@ -1961,7 +1961,7 @@ define @intrinsic_vmsltu_vi_nxv32i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, -6 ; CHECK-NEXT: ret entry: @@ -1996,7 +1996,7 @@ define @intrinsic_vmsltu_vi_nxv1i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, -4 ; CHECK-NEXT: ret entry: @@ -2031,7 +2031,7 @@ define @intrinsic_vmsltu_vi_nxv2i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, -2 ; CHECK-NEXT: ret entry: @@ -2066,7 +2066,7 @@ define @intrinsic_vmsltu_vi_nxv4i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmsltu.vx v0, v8, zero ; CHECK-NEXT: ret entry: @@ -2101,7 +2101,7 @@ define @intrinsic_vmsltu_vi_nxv8i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 1 ; CHECK-NEXT: ret entry: @@ -2136,7 +2136,7 @@ define @intrinsic_vmsltu_vi_nxv16i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 3 ; CHECK-NEXT: ret entry: @@ -2171,7 +2171,7 @@ define @intrinsic_vmsltu_vi_nxv1i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 5 ; CHECK-NEXT: ret entry: @@ -2206,7 +2206,7 @@ define @intrinsic_vmsltu_vi_nxv2i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 7 ; CHECK-NEXT: ret entry: @@ -2241,7 +2241,7 @@ define @intrinsic_vmsltu_vi_nxv4i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2276,7 +2276,7 @@ define @intrinsic_vmsltu_vi_nxv8i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 11 ; CHECK-NEXT: ret entry: @@ -2311,7 +2311,7 @@ define @intrinsic_vmsltu_vi_nxv1i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 13 ; CHECK-NEXT: ret entry: @@ -2346,7 +2346,7 @@ define @intrinsic_vmsltu_vi_nxv2i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, 15 ; CHECK-NEXT: ret entry: @@ -2381,7 +2381,7 @@ define @intrinsic_vmsltu_vi_nxv4i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmsleu.vi v0, v8, -15 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll @@ -9,7 +9,7 @@ define @intrinsic_vmsne_vv_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmsne.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -61,7 +61,7 @@ define @intrinsic_vmsne_vv_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmsne.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -113,7 +113,7 @@ define @intrinsic_vmsne_vv_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -165,7 +165,7 @@ define @intrinsic_vmsne_vv_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmsne.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define @intrinsic_vmsne_vv_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmsne.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -269,7 +269,7 @@ define @intrinsic_vmsne_vv_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmsne.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -321,7 +321,7 @@ define @intrinsic_vmsne_vv_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmsne.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -373,7 +373,7 @@ define @intrinsic_vmsne_vv_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmsne.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -425,7 +425,7 @@ define @intrinsic_vmsne_vv_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmsne.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -477,7 +477,7 @@ define @intrinsic_vmsne_vv_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmsne.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -529,7 +529,7 @@ define @intrinsic_vmsne_vv_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmsne.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -581,7 +581,7 @@ define @intrinsic_vmsne_vv_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmsne.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -633,7 +633,7 @@ define @intrinsic_vmsne_vv_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmsne.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -685,7 +685,7 @@ define @intrinsic_vmsne_vv_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmsne.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -737,7 +737,7 @@ define @intrinsic_vmsne_vv_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmsne.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -789,7 +789,7 @@ define @intrinsic_vmsne_vv_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmsne.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -841,7 +841,7 @@ define @intrinsic_vmsne_vv_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmsne.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -893,7 +893,7 @@ define @intrinsic_vmsne_vv_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmsne.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -945,7 +945,7 @@ define @intrinsic_vmsne_vx_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmsne.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -992,7 +992,7 @@ define @intrinsic_vmsne_vx_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmsne.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1039,7 +1039,7 @@ define @intrinsic_vmsne_vx_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1086,7 +1086,7 @@ define @intrinsic_vmsne_vx_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmsne.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1133,7 +1133,7 @@ define @intrinsic_vmsne_vx_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmsne.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1180,7 +1180,7 @@ define @intrinsic_vmsne_vx_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmsne.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1227,7 +1227,7 @@ define @intrinsic_vmsne_vx_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vmsne.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1274,7 +1274,7 @@ define @intrinsic_vmsne_vx_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmsne.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1321,7 +1321,7 @@ define @intrinsic_vmsne_vx_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmsne.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1368,7 +1368,7 @@ define @intrinsic_vmsne_vx_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmsne.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1415,7 +1415,7 @@ define @intrinsic_vmsne_vx_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vmsne.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1462,7 +1462,7 @@ define @intrinsic_vmsne_vx_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmsne.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1509,7 +1509,7 @@ define @intrinsic_vmsne_vx_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmsne.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1556,7 +1556,7 @@ define @intrinsic_vmsne_vx_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmsne.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1603,7 +1603,7 @@ define @intrinsic_vmsne_vx_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmsne.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1654,7 +1654,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vmsne.vv v0, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 @@ -1713,7 +1713,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vmsne.vv v0, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 @@ -1772,7 +1772,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vmsne.vv v0, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 @@ -1822,7 +1822,7 @@ define @intrinsic_vmsne_vi_nxv1i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1857,7 +1857,7 @@ define @intrinsic_vmsne_vi_nxv2i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1892,7 +1892,7 @@ define @intrinsic_vmsne_vi_nxv4i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1927,7 +1927,7 @@ define @intrinsic_vmsne_vi_nxv8i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1962,7 +1962,7 @@ define @intrinsic_vmsne_vi_nxv16i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1997,7 +1997,7 @@ define @intrinsic_vmsne_vi_nxv32i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2032,7 +2032,7 @@ define @intrinsic_vmsne_vi_nxv1i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2067,7 +2067,7 @@ define @intrinsic_vmsne_vi_nxv2i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2102,7 +2102,7 @@ define @intrinsic_vmsne_vi_nxv4i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2137,7 +2137,7 @@ define @intrinsic_vmsne_vi_nxv8i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2172,7 +2172,7 @@ define @intrinsic_vmsne_vi_nxv16i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2207,7 +2207,7 @@ define @intrinsic_vmsne_vi_nxv1i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2242,7 +2242,7 @@ define @intrinsic_vmsne_vi_nxv2i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2277,7 +2277,7 @@ define @intrinsic_vmsne_vi_nxv4i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2312,7 +2312,7 @@ define @intrinsic_vmsne_vi_nxv8i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2347,7 +2347,7 @@ define @intrinsic_vmsne_vi_nxv1i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2382,7 +2382,7 @@ define @intrinsic_vmsne_vi_nxv2i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2417,7 +2417,7 @@ define @intrinsic_vmsne_vi_nxv4i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 9 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsne-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsne-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsne-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsne-rv64.ll @@ -9,7 +9,7 @@ define @intrinsic_vmsne_vv_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmsne.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -61,7 +61,7 @@ define @intrinsic_vmsne_vv_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmsne.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -113,7 +113,7 @@ define @intrinsic_vmsne_vv_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -165,7 +165,7 @@ define @intrinsic_vmsne_vv_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmsne.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define @intrinsic_vmsne_vv_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmsne.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -269,7 +269,7 @@ define @intrinsic_vmsne_vv_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmsne.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -321,7 +321,7 @@ define @intrinsic_vmsne_vv_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmsne.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -373,7 +373,7 @@ define @intrinsic_vmsne_vv_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmsne.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -425,7 +425,7 @@ define @intrinsic_vmsne_vv_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmsne.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -477,7 +477,7 @@ define @intrinsic_vmsne_vv_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmsne.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -529,7 +529,7 @@ define @intrinsic_vmsne_vv_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmsne.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -581,7 +581,7 @@ define @intrinsic_vmsne_vv_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmsne.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -633,7 +633,7 @@ define @intrinsic_vmsne_vv_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmsne.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -685,7 +685,7 @@ define @intrinsic_vmsne_vv_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmsne.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -737,7 +737,7 @@ define @intrinsic_vmsne_vv_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmsne.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -789,7 +789,7 @@ define @intrinsic_vmsne_vv_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmsne.vv v0, v8, v9 ; CHECK-NEXT: ret entry: @@ -841,7 +841,7 @@ define @intrinsic_vmsne_vv_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmsne.vv v0, v8, v10 ; CHECK-NEXT: ret entry: @@ -893,7 +893,7 @@ define @intrinsic_vmsne_vv_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmsne.vv v0, v8, v12 ; CHECK-NEXT: ret entry: @@ -945,7 +945,7 @@ define @intrinsic_vmsne_vx_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmsne.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -992,7 +992,7 @@ define @intrinsic_vmsne_vx_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmsne.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1039,7 +1039,7 @@ define @intrinsic_vmsne_vx_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1086,7 +1086,7 @@ define @intrinsic_vmsne_vx_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmsne.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1133,7 +1133,7 @@ define @intrinsic_vmsne_vx_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmsne.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1180,7 +1180,7 @@ define @intrinsic_vmsne_vx_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmsne.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1227,7 +1227,7 @@ define @intrinsic_vmsne_vx_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vmsne.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1274,7 +1274,7 @@ define @intrinsic_vmsne_vx_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmsne.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1321,7 +1321,7 @@ define @intrinsic_vmsne_vx_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmsne.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1368,7 +1368,7 @@ define @intrinsic_vmsne_vx_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmsne.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1415,7 +1415,7 @@ define @intrinsic_vmsne_vx_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vmsne.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1462,7 +1462,7 @@ define @intrinsic_vmsne_vx_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmsne.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1509,7 +1509,7 @@ define @intrinsic_vmsne_vx_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmsne.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1556,7 +1556,7 @@ define @intrinsic_vmsne_vx_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmsne.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1603,7 +1603,7 @@ define @intrinsic_vmsne_vx_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmsne.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1650,7 +1650,7 @@ define @intrinsic_vmsne_vx_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vmsne.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1697,7 +1697,7 @@ define @intrinsic_vmsne_vx_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vmsne.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1744,7 +1744,7 @@ define @intrinsic_vmsne_vx_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vx_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vmsne.vx v0, v8, a0 ; CHECK-NEXT: ret entry: @@ -1786,7 +1786,7 @@ define @intrinsic_vmsne_vi_nxv1i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1821,7 +1821,7 @@ define @intrinsic_vmsne_vi_nxv2i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1856,7 +1856,7 @@ define @intrinsic_vmsne_vi_nxv4i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1891,7 +1891,7 @@ define @intrinsic_vmsne_vi_nxv8i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1926,7 +1926,7 @@ define @intrinsic_vmsne_vi_nxv16i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1961,7 +1961,7 @@ define @intrinsic_vmsne_vi_nxv32i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -1996,7 +1996,7 @@ define @intrinsic_vmsne_vi_nxv1i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2031,7 +2031,7 @@ define @intrinsic_vmsne_vi_nxv2i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2066,7 +2066,7 @@ define @intrinsic_vmsne_vi_nxv4i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2101,7 +2101,7 @@ define @intrinsic_vmsne_vi_nxv8i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2136,7 +2136,7 @@ define @intrinsic_vmsne_vi_nxv16i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2171,7 +2171,7 @@ define @intrinsic_vmsne_vi_nxv1i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2206,7 +2206,7 @@ define @intrinsic_vmsne_vi_nxv2i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2241,7 +2241,7 @@ define @intrinsic_vmsne_vi_nxv4i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2276,7 +2276,7 @@ define @intrinsic_vmsne_vi_nxv8i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2311,7 +2311,7 @@ define @intrinsic_vmsne_vi_nxv1i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2346,7 +2346,7 @@ define @intrinsic_vmsne_vi_nxv2i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 9 ; CHECK-NEXT: ret entry: @@ -2381,7 +2381,7 @@ define @intrinsic_vmsne_vi_nxv4i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmsne_vi_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 9 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsof.ll b/llvm/test/CodeGen/RISCV/rvv/vmsof.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsof.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsof.ll @@ -10,7 +10,7 @@ define @intrinsic_vmsof_m_nxv1i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsof_m_nxv1i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmsof.m v8, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: ret @@ -52,7 +52,7 @@ define @intrinsic_vmsof_m_nxv2i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsof_m_nxv2i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmsof.m v8, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: ret @@ -94,7 +94,7 @@ define @intrinsic_vmsof_m_nxv4i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsof_m_nxv4i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmsof.m v8, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: ret @@ -136,7 +136,7 @@ define @intrinsic_vmsof_m_nxv8i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsof_m_nxv8i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmsof.m v8, v0 ; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: ret @@ -178,7 +178,7 @@ define @intrinsic_vmsof_m_nxv16i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsof_m_nxv16i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmsof.m v8, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: ret @@ -220,7 +220,7 @@ define @intrinsic_vmsof_m_nxv32i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsof_m_nxv32i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmsof.m v8, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: ret @@ -262,7 +262,7 @@ define @intrinsic_vmsof_m_nxv64i1( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vmsof_m_nxv64i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmsof.m v8, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmul-sdnode.ll @@ -8,7 +8,7 @@ define @vmul_vv_nxv1i8( %va, %vb) { ; CHECK-LABEL: vmul_vv_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = mul %va, %vb @@ -18,7 +18,7 @@ define @vmul_vx_nxv1i8( %va, i8 signext %b) { ; CHECK-LABEL: vmul_vx_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -31,7 +31,7 @@ ; CHECK-LABEL: vmul_vi_nxv1i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -7 -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 -7, i32 0 @@ -43,7 +43,7 @@ define @vmul_vv_nxv2i8( %va, %vb) { ; CHECK-LABEL: vmul_vv_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = mul %va, %vb @@ -53,7 +53,7 @@ define @vmul_vx_nxv2i8( %va, i8 signext %b) { ; CHECK-LABEL: vmul_vx_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -66,7 +66,7 @@ ; CHECK-LABEL: vmul_vi_nxv2i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -7 -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 -7, i32 0 @@ -78,7 +78,7 @@ define @vmul_vv_nxv4i8( %va, %vb) { ; CHECK-LABEL: vmul_vv_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = mul %va, %vb @@ -88,7 +88,7 @@ define @vmul_vx_nxv4i8( %va, i8 signext %b) { ; CHECK-LABEL: vmul_vx_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -101,7 +101,7 @@ ; CHECK-LABEL: vmul_vi_nxv4i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -7 -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 -7, i32 0 @@ -113,7 +113,7 @@ define @vmul_vv_nxv8i8( %va, %vb) { ; CHECK-LABEL: vmul_vv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = mul %va, %vb @@ -123,7 +123,7 @@ define @vmul_vx_nxv8i8( %va, i8 signext %b) { ; CHECK-LABEL: vmul_vx_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -136,7 +136,7 @@ ; CHECK-LABEL: vmul_vi_nxv8i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -7 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 -7, i32 0 @@ -148,7 +148,7 @@ define @vmul_vv_nxv16i8( %va, %vb) { ; CHECK-LABEL: vmul_vv_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = mul %va, %vb @@ -158,7 +158,7 @@ define @vmul_vx_nxv16i8( %va, i8 signext %b) { ; CHECK-LABEL: vmul_vx_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -171,7 +171,7 @@ ; CHECK-LABEL: vmul_vi_nxv16i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -7 -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 -7, i32 0 @@ -183,7 +183,7 @@ define @vmul_vv_nxv32i8( %va, %vb) { ; CHECK-LABEL: vmul_vv_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = mul %va, %vb @@ -193,7 +193,7 @@ define @vmul_vx_nxv32i8( %va, i8 signext %b) { ; CHECK-LABEL: vmul_vx_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -206,7 +206,7 @@ ; CHECK-LABEL: vmul_vi_nxv32i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -7 -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 -7, i32 0 @@ -218,7 +218,7 @@ define @vmul_vv_nxv64i8( %va, %vb) { ; CHECK-LABEL: vmul_vv_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = mul %va, %vb @@ -228,7 +228,7 @@ define @vmul_vx_nxv64i8( %va, i8 signext %b) { ; CHECK-LABEL: vmul_vx_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -241,7 +241,7 @@ ; CHECK-LABEL: vmul_vi_nxv64i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -7 -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 -7, i32 0 @@ -253,7 +253,7 @@ define @vmul_vv_nxv1i16( %va, %vb) { ; CHECK-LABEL: vmul_vv_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = mul %va, %vb @@ -263,7 +263,7 @@ define @vmul_vx_nxv1i16( %va, i16 signext %b) { ; CHECK-LABEL: vmul_vx_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -276,7 +276,7 @@ ; CHECK-LABEL: vmul_vi_nxv1i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -7 -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 -7, i32 0 @@ -288,7 +288,7 @@ define @vmul_vv_nxv2i16( %va, %vb) { ; CHECK-LABEL: vmul_vv_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = mul %va, %vb @@ -298,7 +298,7 @@ define @vmul_vx_nxv2i16( %va, i16 signext %b) { ; CHECK-LABEL: vmul_vx_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -311,7 +311,7 @@ ; CHECK-LABEL: vmul_vi_nxv2i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -7 -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 -7, i32 0 @@ -323,7 +323,7 @@ define @vmul_vv_nxv4i16( %va, %vb) { ; CHECK-LABEL: vmul_vv_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = mul %va, %vb @@ -333,7 +333,7 @@ define @vmul_vx_nxv4i16( %va, i16 signext %b) { ; CHECK-LABEL: vmul_vx_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -346,7 +346,7 @@ ; CHECK-LABEL: vmul_vi_nxv4i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -7 -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 -7, i32 0 @@ -358,7 +358,7 @@ define @vmul_vv_nxv8i16( %va, %vb) { ; CHECK-LABEL: vmul_vv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = mul %va, %vb @@ -368,7 +368,7 @@ define @vmul_vx_nxv8i16( %va, i16 signext %b) { ; CHECK-LABEL: vmul_vx_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -381,7 +381,7 @@ ; CHECK-LABEL: vmul_vi_nxv8i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -7 -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 -7, i32 0 @@ -393,7 +393,7 @@ define @vmul_vv_nxv16i16( %va, %vb) { ; CHECK-LABEL: vmul_vv_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = mul %va, %vb @@ -403,7 +403,7 @@ define @vmul_vx_nxv16i16( %va, i16 signext %b) { ; CHECK-LABEL: vmul_vx_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -416,7 +416,7 @@ ; CHECK-LABEL: vmul_vi_nxv16i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -7 -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 -7, i32 0 @@ -428,7 +428,7 @@ define @vmul_vv_nxv32i16( %va, %vb) { ; CHECK-LABEL: vmul_vv_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = mul %va, %vb @@ -438,7 +438,7 @@ define @vmul_vx_nxv32i16( %va, i16 signext %b) { ; CHECK-LABEL: vmul_vx_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -451,7 +451,7 @@ ; CHECK-LABEL: vmul_vi_nxv32i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -7 -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 -7, i32 0 @@ -463,7 +463,7 @@ define @vmul_vv_nxv1i32( %va, %vb) { ; CHECK-LABEL: vmul_vv_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = mul %va, %vb @@ -473,7 +473,7 @@ define @vmul_vx_nxv1i32( %va, i32 signext %b) { ; CHECK-LABEL: vmul_vx_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -486,7 +486,7 @@ ; CHECK-LABEL: vmul_vi_nxv1i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -7 -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 -7, i32 0 @@ -498,7 +498,7 @@ define @vmul_vv_nxv2i32( %va, %vb) { ; CHECK-LABEL: vmul_vv_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = mul %va, %vb @@ -508,7 +508,7 @@ define @vmul_vx_nxv2i32( %va, i32 signext %b) { ; CHECK-LABEL: vmul_vx_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -521,7 +521,7 @@ ; CHECK-LABEL: vmul_vi_nxv2i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -7 -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 -7, i32 0 @@ -533,7 +533,7 @@ define @vmul_vv_nxv4i32( %va, %vb) { ; CHECK-LABEL: vmul_vv_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = mul %va, %vb @@ -543,7 +543,7 @@ define @vmul_vx_nxv4i32( %va, i32 signext %b) { ; CHECK-LABEL: vmul_vx_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -556,7 +556,7 @@ ; CHECK-LABEL: vmul_vi_nxv4i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -7 -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 -7, i32 0 @@ -568,7 +568,7 @@ define @vmul_vv_nxv8i32( %va, %vb) { ; CHECK-LABEL: vmul_vv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = mul %va, %vb @@ -578,7 +578,7 @@ define @vmul_vx_nxv8i32( %va, i32 signext %b) { ; CHECK-LABEL: vmul_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -591,7 +591,7 @@ ; CHECK-LABEL: vmul_vi_nxv8i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -7 -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 -7, i32 0 @@ -603,7 +603,7 @@ define @vmul_vv_nxv16i32( %va, %vb) { ; CHECK-LABEL: vmul_vv_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = mul %va, %vb @@ -613,7 +613,7 @@ define @vmul_vx_nxv16i32( %va, i32 signext %b) { ; CHECK-LABEL: vmul_vx_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -626,7 +626,7 @@ ; CHECK-LABEL: vmul_vi_nxv16i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -7 -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 -7, i32 0 @@ -638,7 +638,7 @@ define @vmul_vv_nxv1i64( %va, %vb) { ; CHECK-LABEL: vmul_vv_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = mul %va, %vb @@ -653,7 +653,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vmul.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -661,7 +661,7 @@ ; ; RV64-LABEL: vmul_vx_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -674,7 +674,7 @@ ; CHECK-LABEL: vmul_vi_nxv1i64_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -7 -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 -7, i32 0 @@ -686,7 +686,7 @@ define @vmul_vi_nxv1i64_1( %va) { ; CHECK-LABEL: vmul_vi_nxv1i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i64 2, i32 0 @@ -698,7 +698,7 @@ define @vmul_vi_nxv1i64_2( %va) { ; CHECK-LABEL: vmul_vi_nxv1i64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 4 ; CHECK-NEXT: ret %head = insertelement poison, i64 16, i32 0 @@ -710,7 +710,7 @@ define @vmul_vv_nxv2i64( %va, %vb) { ; CHECK-LABEL: vmul_vv_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = mul %va, %vb @@ -725,7 +725,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vmul.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -733,7 +733,7 @@ ; ; RV64-LABEL: vmul_vx_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -746,7 +746,7 @@ ; CHECK-LABEL: vmul_vi_nxv2i64_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -7 -; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 -7, i32 0 @@ -758,7 +758,7 @@ define @vmul_vi_nxv2i64_1( %va) { ; CHECK-LABEL: vmul_vi_nxv2i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i64 2, i32 0 @@ -770,7 +770,7 @@ define @vmul_vi_nxv2i64_2( %va) { ; CHECK-LABEL: vmul_vi_nxv2i64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 4 ; CHECK-NEXT: ret %head = insertelement poison, i64 16, i32 0 @@ -782,7 +782,7 @@ define @vmul_vv_nxv4i64( %va, %vb) { ; CHECK-LABEL: vmul_vv_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = mul %va, %vb @@ -797,7 +797,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vmul.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -805,7 +805,7 @@ ; ; RV64-LABEL: vmul_vx_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -818,7 +818,7 @@ ; CHECK-LABEL: vmul_vi_nxv4i64_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -7 -; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 -7, i32 0 @@ -830,7 +830,7 @@ define @vmul_vi_nxv4i64_1( %va) { ; CHECK-LABEL: vmul_vi_nxv4i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i64 2, i32 0 @@ -842,7 +842,7 @@ define @vmul_vi_nxv4i64_2( %va) { ; CHECK-LABEL: vmul_vi_nxv4i64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 4 ; CHECK-NEXT: ret %head = insertelement poison, i64 16, i32 0 @@ -854,7 +854,7 @@ define @vmul_vv_nxv8i64( %va, %vb) { ; CHECK-LABEL: vmul_vv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = mul %va, %vb @@ -869,7 +869,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmul.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -877,7 +877,7 @@ ; ; RV64-LABEL: vmul_vx_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -890,7 +890,7 @@ ; CHECK-LABEL: vmul_vi_nxv8i64_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -7 -; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 -7, i32 0 @@ -902,7 +902,7 @@ define @vmul_vi_nxv8i64_1( %va) { ; CHECK-LABEL: vmul_vi_nxv8i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i64 2, i32 0 @@ -914,7 +914,7 @@ define @vmul_vi_nxv8i64_2( %va) { ; CHECK-LABEL: vmul_vi_nxv8i64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 4 ; CHECK-NEXT: ret %head = insertelement poison, i64 16, i32 0 @@ -930,7 +930,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v8, (a0), zero ; RV32-NEXT: sw a3, 12(sp) ; RV32-NEXT: sw a2, 8(sp) @@ -941,7 +941,7 @@ ; ; RV64NOM-LABEL: vmul_xx_nxv8i64: ; RV64NOM: # %bb.0: -; RV64NOM-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64NOM-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64NOM-NEXT: vmv.v.x v8, a0 ; RV64NOM-NEXT: vmul.vx v8, v8, a1 ; RV64NOM-NEXT: ret @@ -949,7 +949,7 @@ ; RV64M-LABEL: vmul_xx_nxv8i64: ; RV64M: # %bb.0: ; RV64M-NEXT: mul a0, a0, a1 -; RV64M-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64M-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64M-NEXT: vmv.v.x v8, a0 ; RV64M-NEXT: ret %head1 = insertelement poison, i64 %a, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll @@ -33,7 +33,7 @@ define @vmul_vv_nxv1i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv1i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -57,7 +57,7 @@ define @vmul_vx_nxv1i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_nxv1i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -83,7 +83,7 @@ define @vmul_vv_nxv2i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -107,7 +107,7 @@ define @vmul_vx_nxv2i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_nxv2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -133,7 +133,7 @@ define @vmul_vv_nxv4i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -157,7 +157,7 @@ define @vmul_vx_nxv4i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_nxv4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -183,7 +183,7 @@ define @vmul_vv_nxv8i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -207,7 +207,7 @@ define @vmul_vx_nxv8i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_nxv8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -233,7 +233,7 @@ define @vmul_vv_nxv16i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -257,7 +257,7 @@ define @vmul_vx_nxv16i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_nxv16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -283,7 +283,7 @@ define @vmul_vv_nxv32i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv32i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -307,7 +307,7 @@ define @vmul_vx_nxv32i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_nxv32i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -333,7 +333,7 @@ define @vmul_vv_nxv64i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv64i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -357,7 +357,7 @@ define @vmul_vx_nxv64i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_nxv64i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -383,7 +383,7 @@ define @vmul_vv_nxv1i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv1i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -407,7 +407,7 @@ define @vmul_vx_nxv1i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_nxv1i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -433,7 +433,7 @@ define @vmul_vv_nxv2i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -457,7 +457,7 @@ define @vmul_vx_nxv2i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_nxv2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -483,7 +483,7 @@ define @vmul_vv_nxv4i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -507,7 +507,7 @@ define @vmul_vx_nxv4i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_nxv4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -533,7 +533,7 @@ define @vmul_vv_nxv8i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -557,7 +557,7 @@ define @vmul_vx_nxv8i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_nxv8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -583,7 +583,7 @@ define @vmul_vv_nxv16i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -607,7 +607,7 @@ define @vmul_vx_nxv16i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_nxv16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -633,7 +633,7 @@ define @vmul_vv_nxv32i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv32i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -657,7 +657,7 @@ define @vmul_vx_nxv32i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_nxv32i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -683,7 +683,7 @@ define @vmul_vv_nxv1i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv1i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -707,7 +707,7 @@ define @vmul_vx_nxv1i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_nxv1i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -733,7 +733,7 @@ define @vmul_vv_nxv2i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -757,7 +757,7 @@ define @vmul_vx_nxv2i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -783,7 +783,7 @@ define @vmul_vv_nxv4i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -807,7 +807,7 @@ define @vmul_vx_nxv4i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_nxv4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -833,7 +833,7 @@ define @vmul_vv_nxv7i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv7i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -857,7 +857,7 @@ define @vmul_vx_nxv7i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_nxv7i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -883,7 +883,7 @@ define @vmul_vv_nxv8i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -907,7 +907,7 @@ define @vmul_vx_nxv8i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_nxv8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -933,7 +933,7 @@ define @vmul_vv_nxv16i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -969,7 +969,7 @@ define @vmul_vx_nxv16i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vx_nxv16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -995,7 +995,7 @@ define @vmul_vv_nxv1i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv1i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1012,7 +1012,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vmul.vv v8, v8, v9, v0.t @@ -1038,16 +1038,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vmul.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vmul_vx_nxv1i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1073,7 +1073,7 @@ define @vmul_vv_nxv2i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1090,7 +1090,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vmul.vv v8, v8, v10, v0.t @@ -1116,16 +1116,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vmul.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vmul_vx_nxv2i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1151,7 +1151,7 @@ define @vmul_vv_nxv4i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv4i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1168,7 +1168,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vmul.vv v8, v8, v12, v0.t @@ -1194,16 +1194,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vmul.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vmul_vx_nxv4i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1229,7 +1229,7 @@ define @vmul_vv_nxv8i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv8i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1246,7 +1246,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vmul.vv v8, v8, v16, v0.t @@ -1272,16 +1272,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vmul.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vmul_vx_nxv8i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul.ll b/llvm/test/CodeGen/RISCV/rvv/vmul.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmul.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmul.ll @@ -12,7 +12,7 @@ define @intrinsic_vmul_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -58,7 +58,7 @@ define @intrinsic_vmul_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vmul_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -150,7 +150,7 @@ define @intrinsic_vmul_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -196,7 +196,7 @@ define @intrinsic_vmul_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -242,7 +242,7 @@ define @intrinsic_vmul_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -288,7 +288,7 @@ define @intrinsic_vmul_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -335,7 +335,7 @@ define @intrinsic_vmul_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -381,7 +381,7 @@ define @intrinsic_vmul_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -427,7 +427,7 @@ define @intrinsic_vmul_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -473,7 +473,7 @@ define @intrinsic_vmul_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -519,7 +519,7 @@ define @intrinsic_vmul_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -565,7 +565,7 @@ define @intrinsic_vmul_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -612,7 +612,7 @@ define @intrinsic_vmul_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -658,7 +658,7 @@ define @intrinsic_vmul_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -704,7 +704,7 @@ define @intrinsic_vmul_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -750,7 +750,7 @@ define @intrinsic_vmul_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -796,7 +796,7 @@ define @intrinsic_vmul_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -843,7 +843,7 @@ define @intrinsic_vmul_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -889,7 +889,7 @@ define @intrinsic_vmul_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -935,7 +935,7 @@ define @intrinsic_vmul_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -981,7 +981,7 @@ define @intrinsic_vmul_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmul.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1028,7 +1028,7 @@ define @intrinsic_vmul_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1074,7 +1074,7 @@ define @intrinsic_vmul_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1120,7 +1120,7 @@ define @intrinsic_vmul_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1166,7 +1166,7 @@ define @intrinsic_vmul_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1212,7 +1212,7 @@ define @intrinsic_vmul_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1258,7 +1258,7 @@ define @intrinsic_vmul_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1304,7 +1304,7 @@ define @intrinsic_vmul_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1350,7 +1350,7 @@ define @intrinsic_vmul_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1396,7 +1396,7 @@ define @intrinsic_vmul_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1442,7 +1442,7 @@ define @intrinsic_vmul_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1488,7 +1488,7 @@ define @intrinsic_vmul_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1534,7 +1534,7 @@ define @intrinsic_vmul_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1580,7 +1580,7 @@ define @intrinsic_vmul_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1626,7 +1626,7 @@ define @intrinsic_vmul_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1672,7 +1672,7 @@ define @intrinsic_vmul_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1718,7 +1718,7 @@ define @intrinsic_vmul_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1764,7 +1764,7 @@ define @intrinsic_vmul_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1810,7 +1810,7 @@ define @intrinsic_vmul_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmul_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1860,7 +1860,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vmul.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -1868,7 +1868,7 @@ ; ; RV64-LABEL: intrinsic_vmul_vx_nxv1i64_nxv1i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: ret entry: @@ -1930,7 +1930,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vmul.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -1938,7 +1938,7 @@ ; ; RV64-LABEL: intrinsic_vmul_vx_nxv2i64_nxv2i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: ret entry: @@ -2000,7 +2000,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vmul.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -2008,7 +2008,7 @@ ; ; RV64-LABEL: intrinsic_vmul_vx_nxv4i64_nxv4i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: ret entry: @@ -2070,7 +2070,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmul.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -2078,7 +2078,7 @@ ; ; RV64-LABEL: intrinsic_vmul_vx_nxv8i64_nxv8i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulh-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmulh-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmulh-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulh-sdnode.ll @@ -8,7 +8,7 @@ ; CHECK-LABEL: srem_eq_fold_nxv4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 42 -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: li a1, -85 ; CHECK-NEXT: vmacc.vx v9, a1, v8 @@ -28,7 +28,7 @@ define @vmulh_vv_nxv1i32( %va, %vb) { ; CHECK-LABEL: vmulh_vv_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmulh.vv v8, v9, v8 ; CHECK-NEXT: ret %vc = sext %vb to @@ -44,7 +44,7 @@ define @vmulh_vx_nxv1i32( %va, i32 %x) { ; CHECK-LABEL: vmulh_vx_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmulh.vx v8, v8, a0 ; CHECK-NEXT: ret %head1 = insertelement poison, i32 %x, i32 0 @@ -63,7 +63,7 @@ ; RV32-LABEL: vmulh_vi_nxv1i32_0: ; RV32: # %bb.0: ; RV32-NEXT: li a0, -7 -; RV32-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; RV32-NEXT: vmulh.vx v8, v8, a0 ; RV32-NEXT: ret ; @@ -72,7 +72,7 @@ ; RV64-NEXT: li a0, 1 ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: addi a0, a0, -7 -; RV64-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; RV64-NEXT: vmulh.vx v8, v8, a0 ; RV64-NEXT: ret %head1 = insertelement poison, i32 -7, i32 0 @@ -91,7 +91,7 @@ ; CHECK-LABEL: vmulh_vi_nxv1i32_1: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmulh.vx v8, v8, a0 ; CHECK-NEXT: ret %head1 = insertelement poison, i32 16, i32 0 @@ -109,7 +109,7 @@ define @vmulh_vv_nxv2i32( %va, %vb) { ; CHECK-LABEL: vmulh_vv_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vmulh.vv v8, v9, v8 ; CHECK-NEXT: ret %vc = sext %vb to @@ -125,7 +125,7 @@ define @vmulh_vx_nxv2i32( %va, i32 %x) { ; CHECK-LABEL: vmulh_vx_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vmulh.vx v8, v8, a0 ; CHECK-NEXT: ret %head1 = insertelement poison, i32 %x, i32 0 @@ -144,7 +144,7 @@ ; RV32-LABEL: vmulh_vi_nxv2i32_0: ; RV32: # %bb.0: ; RV32-NEXT: li a0, -7 -; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; RV32-NEXT: vmulh.vx v8, v8, a0 ; RV32-NEXT: ret ; @@ -153,7 +153,7 @@ ; RV64-NEXT: li a0, 1 ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: addi a0, a0, -7 -; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; RV64-NEXT: vmulh.vx v8, v8, a0 ; RV64-NEXT: ret %head1 = insertelement poison, i32 -7, i32 0 @@ -172,7 +172,7 @@ ; CHECK-LABEL: vmulh_vi_nxv2i32_1: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vmulh.vx v8, v8, a0 ; CHECK-NEXT: ret %head1 = insertelement poison, i32 16, i32 0 @@ -190,7 +190,7 @@ define @vmulh_vv_nxv4i32( %va, %vb) { ; CHECK-LABEL: vmulh_vv_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vmulh.vv v8, v10, v8 ; CHECK-NEXT: ret %vc = sext %vb to @@ -206,7 +206,7 @@ define @vmulh_vx_nxv4i32( %va, i32 %x) { ; CHECK-LABEL: vmulh_vx_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vmulh.vx v8, v8, a0 ; CHECK-NEXT: ret %head1 = insertelement poison, i32 %x, i32 0 @@ -225,7 +225,7 @@ ; RV32-LABEL: vmulh_vi_nxv4i32_0: ; RV32: # %bb.0: ; RV32-NEXT: li a0, -7 -; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; RV32-NEXT: vmulh.vx v8, v8, a0 ; RV32-NEXT: ret ; @@ -234,7 +234,7 @@ ; RV64-NEXT: li a0, 1 ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: addi a0, a0, -7 -; RV64-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; RV64-NEXT: vmulh.vx v8, v8, a0 ; RV64-NEXT: ret %head1 = insertelement poison, i32 -7, i32 0 @@ -253,7 +253,7 @@ ; CHECK-LABEL: vmulh_vi_nxv4i32_1: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vmulh.vx v8, v8, a0 ; CHECK-NEXT: ret %head1 = insertelement poison, i32 16, i32 0 @@ -271,7 +271,7 @@ define @vmulh_vv_nxv8i32( %va, %vb) { ; CHECK-LABEL: vmulh_vv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmulh.vv v8, v12, v8 ; CHECK-NEXT: ret %vc = sext %vb to @@ -287,7 +287,7 @@ define @vmulh_vx_nxv8i32( %va, i32 %x) { ; CHECK-LABEL: vmulh_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vmulh.vx v8, v8, a0 ; CHECK-NEXT: ret %head1 = insertelement poison, i32 %x, i32 0 @@ -306,7 +306,7 @@ ; RV32-LABEL: vmulh_vi_nxv8i32_0: ; RV32: # %bb.0: ; RV32-NEXT: li a0, -7 -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vmulh.vx v8, v8, a0 ; RV32-NEXT: ret ; @@ -315,7 +315,7 @@ ; RV64-NEXT: li a0, 1 ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: addi a0, a0, -7 -; RV64-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV64-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV64-NEXT: vmulh.vx v8, v8, a0 ; RV64-NEXT: ret %head1 = insertelement poison, i32 -7, i32 0 @@ -334,7 +334,7 @@ ; CHECK-LABEL: vmulh_vi_nxv8i32_1: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vmulh.vx v8, v8, a0 ; CHECK-NEXT: ret %head1 = insertelement poison, i32 16, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulh.ll b/llvm/test/CodeGen/RISCV/rvv/vmulh.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmulh.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulh.ll @@ -12,7 +12,7 @@ define @intrinsic_vmulh_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmulh.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -58,7 +58,7 @@ define @intrinsic_vmulh_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmulh.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vmulh_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmulh.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -150,7 +150,7 @@ define @intrinsic_vmulh_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmulh.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -196,7 +196,7 @@ define @intrinsic_vmulh_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmulh.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -242,7 +242,7 @@ define @intrinsic_vmulh_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmulh.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -288,7 +288,7 @@ define @intrinsic_vmulh_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmulh.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -335,7 +335,7 @@ define @intrinsic_vmulh_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmulh.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -381,7 +381,7 @@ define @intrinsic_vmulh_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmulh.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -427,7 +427,7 @@ define @intrinsic_vmulh_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmulh.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -473,7 +473,7 @@ define @intrinsic_vmulh_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmulh.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -519,7 +519,7 @@ define @intrinsic_vmulh_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmulh.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -565,7 +565,7 @@ define @intrinsic_vmulh_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vmulh.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -612,7 +612,7 @@ define @intrinsic_vmulh_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmulh.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -658,7 +658,7 @@ define @intrinsic_vmulh_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmulh.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -704,7 +704,7 @@ define @intrinsic_vmulh_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmulh.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -750,7 +750,7 @@ define @intrinsic_vmulh_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmulh.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -796,7 +796,7 @@ define @intrinsic_vmulh_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmulh.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -843,7 +843,7 @@ define @intrinsic_vmulh_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmulh.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -889,7 +889,7 @@ define @intrinsic_vmulh_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmulh.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -935,7 +935,7 @@ define @intrinsic_vmulh_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmulh.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -981,7 +981,7 @@ define @intrinsic_vmulh_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmulh.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1028,7 +1028,7 @@ define @intrinsic_vmulh_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmulh.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1074,7 +1074,7 @@ define @intrinsic_vmulh_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmulh.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1120,7 +1120,7 @@ define @intrinsic_vmulh_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmulh.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1166,7 +1166,7 @@ define @intrinsic_vmulh_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmulh.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1212,7 +1212,7 @@ define @intrinsic_vmulh_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmulh.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1258,7 +1258,7 @@ define @intrinsic_vmulh_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmulh.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1304,7 +1304,7 @@ define @intrinsic_vmulh_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmulh.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1350,7 +1350,7 @@ define @intrinsic_vmulh_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vmulh.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1396,7 +1396,7 @@ define @intrinsic_vmulh_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmulh.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1442,7 +1442,7 @@ define @intrinsic_vmulh_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmulh.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1488,7 +1488,7 @@ define @intrinsic_vmulh_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmulh.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1534,7 +1534,7 @@ define @intrinsic_vmulh_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vmulh.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1580,7 +1580,7 @@ define @intrinsic_vmulh_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vmulh.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1626,7 +1626,7 @@ define @intrinsic_vmulh_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmulh.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1672,7 +1672,7 @@ define @intrinsic_vmulh_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmulh.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1718,7 +1718,7 @@ define @intrinsic_vmulh_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmulh.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1764,7 +1764,7 @@ define @intrinsic_vmulh_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmulh.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1810,7 +1810,7 @@ define @intrinsic_vmulh_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulh_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vmulh.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1860,7 +1860,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vmulh.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -1868,7 +1868,7 @@ ; ; RV64-LABEL: intrinsic_vmulh_vx_nxv1i64_nxv1i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vmulh.vx v8, v8, a0 ; RV64-NEXT: ret entry: @@ -1930,7 +1930,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vmulh.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -1938,7 +1938,7 @@ ; ; RV64-LABEL: intrinsic_vmulh_vx_nxv2i64_nxv2i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vmulh.vx v8, v8, a0 ; RV64-NEXT: ret entry: @@ -2000,7 +2000,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vmulh.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -2008,7 +2008,7 @@ ; ; RV64-LABEL: intrinsic_vmulh_vx_nxv4i64_nxv4i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vmulh.vx v8, v8, a0 ; RV64-NEXT: ret entry: @@ -2070,7 +2070,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmulh.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -2078,7 +2078,7 @@ ; ; RV64-LABEL: intrinsic_vmulh_vx_nxv8i64_nxv8i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vmulh.vx v8, v8, a0 ; RV64-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhsu.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhsu.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmulhsu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulhsu.ll @@ -12,7 +12,7 @@ define @intrinsic_vmulhsu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmulhsu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -58,7 +58,7 @@ define @intrinsic_vmulhsu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmulhsu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vmulhsu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmulhsu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -150,7 +150,7 @@ define @intrinsic_vmulhsu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmulhsu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -196,7 +196,7 @@ define @intrinsic_vmulhsu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmulhsu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -242,7 +242,7 @@ define @intrinsic_vmulhsu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmulhsu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -288,7 +288,7 @@ define @intrinsic_vmulhsu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmulhsu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -335,7 +335,7 @@ define @intrinsic_vmulhsu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmulhsu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -381,7 +381,7 @@ define @intrinsic_vmulhsu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmulhsu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -427,7 +427,7 @@ define @intrinsic_vmulhsu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmulhsu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -473,7 +473,7 @@ define @intrinsic_vmulhsu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmulhsu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -519,7 +519,7 @@ define @intrinsic_vmulhsu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmulhsu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -565,7 +565,7 @@ define @intrinsic_vmulhsu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vmulhsu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -612,7 +612,7 @@ define @intrinsic_vmulhsu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmulhsu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -658,7 +658,7 @@ define @intrinsic_vmulhsu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmulhsu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -704,7 +704,7 @@ define @intrinsic_vmulhsu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmulhsu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -750,7 +750,7 @@ define @intrinsic_vmulhsu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmulhsu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -796,7 +796,7 @@ define @intrinsic_vmulhsu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmulhsu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -843,7 +843,7 @@ define @intrinsic_vmulhsu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmulhsu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -889,7 +889,7 @@ define @intrinsic_vmulhsu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmulhsu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -935,7 +935,7 @@ define @intrinsic_vmulhsu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmulhsu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -981,7 +981,7 @@ define @intrinsic_vmulhsu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmulhsu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1028,7 +1028,7 @@ define @intrinsic_vmulhsu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmulhsu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1074,7 +1074,7 @@ define @intrinsic_vmulhsu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmulhsu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1120,7 +1120,7 @@ define @intrinsic_vmulhsu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmulhsu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1166,7 +1166,7 @@ define @intrinsic_vmulhsu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmulhsu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1212,7 +1212,7 @@ define @intrinsic_vmulhsu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmulhsu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1258,7 +1258,7 @@ define @intrinsic_vmulhsu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmulhsu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1304,7 +1304,7 @@ define @intrinsic_vmulhsu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmulhsu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1350,7 +1350,7 @@ define @intrinsic_vmulhsu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vmulhsu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1396,7 +1396,7 @@ define @intrinsic_vmulhsu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmulhsu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1442,7 +1442,7 @@ define @intrinsic_vmulhsu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmulhsu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1488,7 +1488,7 @@ define @intrinsic_vmulhsu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmulhsu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1534,7 +1534,7 @@ define @intrinsic_vmulhsu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vmulhsu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1580,7 +1580,7 @@ define @intrinsic_vmulhsu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vmulhsu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1626,7 +1626,7 @@ define @intrinsic_vmulhsu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmulhsu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1672,7 +1672,7 @@ define @intrinsic_vmulhsu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmulhsu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1718,7 +1718,7 @@ define @intrinsic_vmulhsu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmulhsu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1764,7 +1764,7 @@ define @intrinsic_vmulhsu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmulhsu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1810,7 +1810,7 @@ define @intrinsic_vmulhsu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vmulhsu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1860,7 +1860,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vmulhsu.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -1868,7 +1868,7 @@ ; ; RV64-LABEL: intrinsic_vmulhsu_vx_nxv1i64_nxv1i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vmulhsu.vx v8, v8, a0 ; RV64-NEXT: ret entry: @@ -1930,7 +1930,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vmulhsu.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -1938,7 +1938,7 @@ ; ; RV64-LABEL: intrinsic_vmulhsu_vx_nxv2i64_nxv2i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vmulhsu.vx v8, v8, a0 ; RV64-NEXT: ret entry: @@ -2000,7 +2000,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vmulhsu.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -2008,7 +2008,7 @@ ; ; RV64-LABEL: intrinsic_vmulhsu_vx_nxv4i64_nxv4i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vmulhsu.vx v8, v8, a0 ; RV64-NEXT: ret entry: @@ -2070,7 +2070,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmulhsu.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -2078,7 +2078,7 @@ ; ; RV64-LABEL: intrinsic_vmulhsu_vx_nxv8i64_nxv8i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vmulhsu.vx v8, v8, a0 ; RV64-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhu-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmulhu-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulhu-sdnode.ll @@ -5,7 +5,7 @@ define @vmulhu_vv_nxv1i32( %va, %vb) { ; CHECK-LABEL: vmulhu_vv_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmulhu.vv v8, v9, v8 ; CHECK-NEXT: ret %vc = zext %vb to @@ -21,7 +21,7 @@ define @vmulhu_vx_nxv1i32( %va, i32 %x) { ; CHECK-LABEL: vmulhu_vx_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmulhu.vx v8, v8, a0 ; CHECK-NEXT: ret %head1 = insertelement poison, i32 %x, i32 0 @@ -40,7 +40,7 @@ ; RV32-LABEL: vmulhu_vi_nxv1i32_0: ; RV32: # %bb.0: ; RV32-NEXT: li a0, -7 -; RV32-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; RV32-NEXT: vmulhu.vx v8, v8, a0 ; RV32-NEXT: ret ; @@ -49,7 +49,7 @@ ; RV64-NEXT: li a0, 1 ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: addi a0, a0, -7 -; RV64-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; RV64-NEXT: vmulhu.vx v8, v8, a0 ; RV64-NEXT: ret %head1 = insertelement poison, i32 -7, i32 0 @@ -67,14 +67,14 @@ define @vmulhu_vi_nxv1i32_1( %va) { ; RV32-LABEL: vmulhu_vi_nxv1i32_1: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; RV32-NEXT: vsrl.vi v8, v8, 28 ; RV32-NEXT: ret ; ; RV64-LABEL: vmulhu_vi_nxv1i32_1: ; RV64: # %bb.0: ; RV64-NEXT: li a0, 16 -; RV64-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; RV64-NEXT: vmulhu.vx v8, v8, a0 ; RV64-NEXT: ret %head1 = insertelement poison, i32 16, i32 0 @@ -92,7 +92,7 @@ define @vmulhu_vv_nxv2i32( %va, %vb) { ; CHECK-LABEL: vmulhu_vv_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vmulhu.vv v8, v9, v8 ; CHECK-NEXT: ret %vc = zext %vb to @@ -108,7 +108,7 @@ define @vmulhu_vx_nxv2i32( %va, i32 %x) { ; CHECK-LABEL: vmulhu_vx_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vmulhu.vx v8, v8, a0 ; CHECK-NEXT: ret %head1 = insertelement poison, i32 %x, i32 0 @@ -127,7 +127,7 @@ ; RV32-LABEL: vmulhu_vi_nxv2i32_0: ; RV32: # %bb.0: ; RV32-NEXT: li a0, -7 -; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; RV32-NEXT: vmulhu.vx v8, v8, a0 ; RV32-NEXT: ret ; @@ -136,7 +136,7 @@ ; RV64-NEXT: li a0, 1 ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: addi a0, a0, -7 -; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; RV64-NEXT: vmulhu.vx v8, v8, a0 ; RV64-NEXT: ret %head1 = insertelement poison, i32 -7, i32 0 @@ -154,14 +154,14 @@ define @vmulhu_vi_nxv2i32_1( %va) { ; RV32-LABEL: vmulhu_vi_nxv2i32_1: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV32-NEXT: vsrl.vi v8, v8, 28 ; RV32-NEXT: ret ; ; RV64-LABEL: vmulhu_vi_nxv2i32_1: ; RV64: # %bb.0: ; RV64-NEXT: li a0, 16 -; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; RV64-NEXT: vmulhu.vx v8, v8, a0 ; RV64-NEXT: ret %head1 = insertelement poison, i32 16, i32 0 @@ -179,7 +179,7 @@ define @vmulhu_vv_nxv4i32( %va, %vb) { ; CHECK-LABEL: vmulhu_vv_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vmulhu.vv v8, v10, v8 ; CHECK-NEXT: ret %vc = zext %vb to @@ -195,7 +195,7 @@ define @vmulhu_vx_nxv4i32( %va, i32 %x) { ; CHECK-LABEL: vmulhu_vx_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vmulhu.vx v8, v8, a0 ; CHECK-NEXT: ret %head1 = insertelement poison, i32 %x, i32 0 @@ -214,7 +214,7 @@ ; RV32-LABEL: vmulhu_vi_nxv4i32_0: ; RV32: # %bb.0: ; RV32-NEXT: li a0, -7 -; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; RV32-NEXT: vmulhu.vx v8, v8, a0 ; RV32-NEXT: ret ; @@ -223,7 +223,7 @@ ; RV64-NEXT: li a0, 1 ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: addi a0, a0, -7 -; RV64-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; RV64-NEXT: vmulhu.vx v8, v8, a0 ; RV64-NEXT: ret %head1 = insertelement poison, i32 -7, i32 0 @@ -241,14 +241,14 @@ define @vmulhu_vi_nxv4i32_1( %va) { ; RV32-LABEL: vmulhu_vi_nxv4i32_1: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; RV32-NEXT: vsrl.vi v8, v8, 28 ; RV32-NEXT: ret ; ; RV64-LABEL: vmulhu_vi_nxv4i32_1: ; RV64: # %bb.0: ; RV64-NEXT: li a0, 16 -; RV64-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; RV64-NEXT: vmulhu.vx v8, v8, a0 ; RV64-NEXT: ret %head1 = insertelement poison, i32 16, i32 0 @@ -266,7 +266,7 @@ define @vmulhu_vv_nxv8i32( %va, %vb) { ; CHECK-LABEL: vmulhu_vv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmulhu.vv v8, v12, v8 ; CHECK-NEXT: ret %vc = zext %vb to @@ -282,7 +282,7 @@ define @vmulhu_vx_nxv8i32( %va, i32 %x) { ; CHECK-LABEL: vmulhu_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vmulhu.vx v8, v8, a0 ; CHECK-NEXT: ret %head1 = insertelement poison, i32 %x, i32 0 @@ -301,7 +301,7 @@ ; RV32-LABEL: vmulhu_vi_nxv8i32_0: ; RV32: # %bb.0: ; RV32-NEXT: li a0, -7 -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vmulhu.vx v8, v8, a0 ; RV32-NEXT: ret ; @@ -310,7 +310,7 @@ ; RV64-NEXT: li a0, 1 ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: addi a0, a0, -7 -; RV64-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV64-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV64-NEXT: vmulhu.vx v8, v8, a0 ; RV64-NEXT: ret %head1 = insertelement poison, i32 -7, i32 0 @@ -328,14 +328,14 @@ define @vmulhu_vi_nxv8i32_1( %va) { ; RV32-LABEL: vmulhu_vi_nxv8i32_1: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; RV32-NEXT: vsrl.vi v8, v8, 28 ; RV32-NEXT: ret ; ; RV64-LABEL: vmulhu_vi_nxv8i32_1: ; RV64: # %bb.0: ; RV64-NEXT: li a0, 16 -; RV64-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV64-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV64-NEXT: vmulhu.vx v8, v8, a0 ; RV64-NEXT: ret %head1 = insertelement poison, i32 16, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhu.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhu.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmulhu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulhu.ll @@ -12,7 +12,7 @@ define @intrinsic_vmulhu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmulhu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -58,7 +58,7 @@ define @intrinsic_vmulhu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmulhu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vmulhu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmulhu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -150,7 +150,7 @@ define @intrinsic_vmulhu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmulhu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -196,7 +196,7 @@ define @intrinsic_vmulhu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmulhu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -242,7 +242,7 @@ define @intrinsic_vmulhu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmulhu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -288,7 +288,7 @@ define @intrinsic_vmulhu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmulhu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -335,7 +335,7 @@ define @intrinsic_vmulhu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmulhu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -381,7 +381,7 @@ define @intrinsic_vmulhu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmulhu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -427,7 +427,7 @@ define @intrinsic_vmulhu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmulhu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -473,7 +473,7 @@ define @intrinsic_vmulhu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmulhu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -519,7 +519,7 @@ define @intrinsic_vmulhu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmulhu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -565,7 +565,7 @@ define @intrinsic_vmulhu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vmulhu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -612,7 +612,7 @@ define @intrinsic_vmulhu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmulhu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -658,7 +658,7 @@ define @intrinsic_vmulhu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmulhu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -704,7 +704,7 @@ define @intrinsic_vmulhu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmulhu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -750,7 +750,7 @@ define @intrinsic_vmulhu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmulhu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -796,7 +796,7 @@ define @intrinsic_vmulhu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmulhu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -843,7 +843,7 @@ define @intrinsic_vmulhu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmulhu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -889,7 +889,7 @@ define @intrinsic_vmulhu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmulhu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -935,7 +935,7 @@ define @intrinsic_vmulhu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmulhu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -981,7 +981,7 @@ define @intrinsic_vmulhu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmulhu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1028,7 +1028,7 @@ define @intrinsic_vmulhu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmulhu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1074,7 +1074,7 @@ define @intrinsic_vmulhu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmulhu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1120,7 +1120,7 @@ define @intrinsic_vmulhu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmulhu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1166,7 +1166,7 @@ define @intrinsic_vmulhu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmulhu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1212,7 +1212,7 @@ define @intrinsic_vmulhu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmulhu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1258,7 +1258,7 @@ define @intrinsic_vmulhu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmulhu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1304,7 +1304,7 @@ define @intrinsic_vmulhu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmulhu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1350,7 +1350,7 @@ define @intrinsic_vmulhu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vmulhu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1396,7 +1396,7 @@ define @intrinsic_vmulhu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmulhu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1442,7 +1442,7 @@ define @intrinsic_vmulhu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmulhu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1488,7 +1488,7 @@ define @intrinsic_vmulhu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmulhu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1534,7 +1534,7 @@ define @intrinsic_vmulhu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vmulhu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1580,7 +1580,7 @@ define @intrinsic_vmulhu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vmulhu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1626,7 +1626,7 @@ define @intrinsic_vmulhu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmulhu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1672,7 +1672,7 @@ define @intrinsic_vmulhu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmulhu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1718,7 +1718,7 @@ define @intrinsic_vmulhu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmulhu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1764,7 +1764,7 @@ define @intrinsic_vmulhu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmulhu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1810,7 +1810,7 @@ define @intrinsic_vmulhu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vmulhu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1860,7 +1860,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vmulhu.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -1868,7 +1868,7 @@ ; ; RV64-LABEL: intrinsic_vmulhu_vx_nxv1i64_nxv1i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vmulhu.vx v8, v8, a0 ; RV64-NEXT: ret entry: @@ -1930,7 +1930,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vmulhu.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -1938,7 +1938,7 @@ ; ; RV64-LABEL: intrinsic_vmulhu_vx_nxv2i64_nxv2i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vmulhu.vx v8, v8, a0 ; RV64-NEXT: ret entry: @@ -2000,7 +2000,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vmulhu.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -2008,7 +2008,7 @@ ; ; RV64-LABEL: intrinsic_vmulhu_vx_nxv4i64_nxv4i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vmulhu.vx v8, v8, a0 ; RV64-NEXT: ret entry: @@ -2070,7 +2070,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vmulhu.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -2078,7 +2078,7 @@ ; ; RV64-LABEL: intrinsic_vmulhu_vx_nxv8i64_nxv8i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vmulhu.vx v8, v8, a0 ; RV64-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.s.x-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.s.x-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmv.s.x-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmv.s.x-rv32.ll @@ -6,7 +6,7 @@ define @intrinsic_vmv.s.x_x_nxv1i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret entry: @@ -19,7 +19,7 @@ define @intrinsic_vmv.s.x_x_nxv2i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret entry: @@ -32,7 +32,7 @@ define @intrinsic_vmv.s.x_x_nxv4i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret entry: @@ -45,7 +45,7 @@ define @intrinsic_vmv.s.x_x_nxv8i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret entry: @@ -58,7 +58,7 @@ define @intrinsic_vmv.s.x_x_nxv16i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret entry: @@ -71,7 +71,7 @@ define @intrinsic_vmv.s.x_x_nxv32i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret entry: @@ -84,7 +84,7 @@ define @intrinsic_vmv.s.x_x_nxv64i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret entry: @@ -97,7 +97,7 @@ define @intrinsic_vmv.s.x_x_nxv1i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret entry: @@ -110,7 +110,7 @@ define @intrinsic_vmv.s.x_x_nxv2i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret entry: @@ -123,7 +123,7 @@ define @intrinsic_vmv.s.x_x_nxv4i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret entry: @@ -136,7 +136,7 @@ define @intrinsic_vmv.s.x_x_nxv8i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret entry: @@ -149,7 +149,7 @@ define @intrinsic_vmv.s.x_x_nxv16i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret entry: @@ -162,7 +162,7 @@ define @intrinsic_vmv.s.x_x_nxv32i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret entry: @@ -175,7 +175,7 @@ define @intrinsic_vmv.s.x_x_nxv1i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret entry: @@ -188,7 +188,7 @@ define @intrinsic_vmv.s.x_x_nxv2i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret entry: @@ -201,7 +201,7 @@ define @intrinsic_vmv.s.x_x_nxv4i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret entry: @@ -214,7 +214,7 @@ define @intrinsic_vmv.s.x_x_nxv8i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret entry: @@ -227,7 +227,7 @@ define @intrinsic_vmv.s.x_x_nxv16i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.s.x-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.s.x-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmv.s.x-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmv.s.x-rv64.ll @@ -6,7 +6,7 @@ define @intrinsic_vmv.s.x_x_nxv1i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret entry: @@ -19,7 +19,7 @@ define @intrinsic_vmv.s.x_x_nxv2i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret entry: @@ -32,7 +32,7 @@ define @intrinsic_vmv.s.x_x_nxv4i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret entry: @@ -45,7 +45,7 @@ define @intrinsic_vmv.s.x_x_nxv8i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret entry: @@ -58,7 +58,7 @@ define @intrinsic_vmv.s.x_x_nxv16i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret entry: @@ -71,7 +71,7 @@ define @intrinsic_vmv.s.x_x_nxv32i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret entry: @@ -84,7 +84,7 @@ define @intrinsic_vmv.s.x_x_nxv64i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret entry: @@ -97,7 +97,7 @@ define @intrinsic_vmv.s.x_x_nxv1i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret entry: @@ -110,7 +110,7 @@ define @intrinsic_vmv.s.x_x_nxv2i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret entry: @@ -123,7 +123,7 @@ define @intrinsic_vmv.s.x_x_nxv4i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret entry: @@ -136,7 +136,7 @@ define @intrinsic_vmv.s.x_x_nxv8i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret entry: @@ -149,7 +149,7 @@ define @intrinsic_vmv.s.x_x_nxv16i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret entry: @@ -162,7 +162,7 @@ define @intrinsic_vmv.s.x_x_nxv32i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret entry: @@ -175,7 +175,7 @@ define @intrinsic_vmv.s.x_x_nxv1i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret entry: @@ -188,7 +188,7 @@ define @intrinsic_vmv.s.x_x_nxv2i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret entry: @@ -201,7 +201,7 @@ define @intrinsic_vmv.s.x_x_nxv4i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret entry: @@ -214,7 +214,7 @@ define @intrinsic_vmv.s.x_x_nxv8i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret entry: @@ -227,7 +227,7 @@ define @intrinsic_vmv.s.x_x_nxv16i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret entry: @@ -240,7 +240,7 @@ define @intrinsic_vmv.s.x_x_nxv1i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret entry: @@ -253,7 +253,7 @@ define @intrinsic_vmv.s.x_x_nxv2i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret entry: @@ -266,7 +266,7 @@ define @intrinsic_vmv.s.x_x_nxv4i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret entry: @@ -279,7 +279,7 @@ define @intrinsic_vmv.s.x_x_nxv8i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret entry: @@ -292,7 +292,7 @@ ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i64_bug: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: ld a0, 0(a0) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-rv32.ll @@ -9,7 +9,7 @@ define @intrinsic_vmv.v.v_v_nxv1i8_nxv1i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -29,7 +29,7 @@ define @intrinsic_vmv.v.v_v_nxv2i8_nxv2i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -49,7 +49,7 @@ define @intrinsic_vmv.v.v_v_nxv4i8_nxv4i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -69,7 +69,7 @@ define @intrinsic_vmv.v.v_v_nxv8i8_nxv8i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -89,7 +89,7 @@ define @intrinsic_vmv.v.v_v_nxv16i8_nxv16i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -109,7 +109,7 @@ define @intrinsic_vmv.v.v_v_nxv32i8_nxv32i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -129,7 +129,7 @@ define @intrinsic_vmv.v.v_v_nxv64i8_nxv64i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -149,7 +149,7 @@ define @intrinsic_vmv.v.v_v_nxv1i16_nxv1i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -169,7 +169,7 @@ define @intrinsic_vmv.v.v_v_nxv2i16_nxv2i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -189,7 +189,7 @@ define @intrinsic_vmv.v.v_v_nxv4i16_nxv4i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -209,7 +209,7 @@ define @intrinsic_vmv.v.v_v_nxv8i16_nxv8i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -229,7 +229,7 @@ define @intrinsic_vmv.v.v_v_nxv16i16_nxv16i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -249,7 +249,7 @@ define @intrinsic_vmv.v.v_v_nxv32i16_nxv32i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -269,7 +269,7 @@ define @intrinsic_vmv.v.v_v_nxv1i32_nxv1i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -289,7 +289,7 @@ define @intrinsic_vmv.v.v_v_nxv2i32_nxv2i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -309,7 +309,7 @@ define @intrinsic_vmv.v.v_v_nxv4i32_nxv4i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -329,7 +329,7 @@ define @intrinsic_vmv.v.v_v_nxv8i32_nxv8i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -349,7 +349,7 @@ define @intrinsic_vmv.v.v_v_nxv16i32_nxv16i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -369,7 +369,7 @@ define @intrinsic_vmv.v.v_v_nxv1i64_nxv1i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -389,7 +389,7 @@ define @intrinsic_vmv.v.v_v_nxv2i64_nxv2i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -409,7 +409,7 @@ define @intrinsic_vmv.v.v_v_nxv4i64_nxv4i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -429,7 +429,7 @@ define @intrinsic_vmv.v.v_v_nxv8i64_nxv8i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -449,7 +449,7 @@ define @intrinsic_vmv.v.v_v_nxv1f16_nxv1f16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -469,7 +469,7 @@ define @intrinsic_vmv.v.v_v_nxv2f16_nxv2f16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -489,7 +489,7 @@ define @intrinsic_vmv.v.v_v_nxv4f16_nxv4f16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -509,7 +509,7 @@ define @intrinsic_vmv.v.v_v_nxv8f16_nxv8f16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -529,7 +529,7 @@ define @intrinsic_vmv.v.v_v_nxv16f16_nxv16f16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -549,7 +549,7 @@ define @intrinsic_vmv.v.v_v_nxv32f16_nxv32f16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -569,7 +569,7 @@ define @intrinsic_vmv.v.v_v_nxv1f32_nxv1f32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -589,7 +589,7 @@ define @intrinsic_vmv.v.v_v_nxv2f32_nxv2f32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -609,7 +609,7 @@ define @intrinsic_vmv.v.v_v_nxv4f32_nxv4f32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -629,7 +629,7 @@ define @intrinsic_vmv.v.v_v_nxv8f32_nxv8f32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -649,7 +649,7 @@ define @intrinsic_vmv.v.v_v_nxv16f32_nxv16f32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -669,7 +669,7 @@ define @intrinsic_vmv.v.v_v_nxv1f64_nxv1f64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -689,7 +689,7 @@ define @intrinsic_vmv.v.v_v_nxv2f64_nxv2f64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -709,7 +709,7 @@ define @intrinsic_vmv.v.v_v_nxv4f64_nxv4f64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -729,7 +729,7 @@ define @intrinsic_vmv.v.v_v_nxv8f64_nxv8f64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-rv64.ll @@ -9,7 +9,7 @@ define @intrinsic_vmv.v.v_v_nxv1i8_nxv1i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -29,7 +29,7 @@ define @intrinsic_vmv.v.v_v_nxv2i8_nxv2i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -49,7 +49,7 @@ define @intrinsic_vmv.v.v_v_nxv4i8_nxv4i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -69,7 +69,7 @@ define @intrinsic_vmv.v.v_v_nxv8i8_nxv8i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -89,7 +89,7 @@ define @intrinsic_vmv.v.v_v_nxv16i8_nxv16i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -109,7 +109,7 @@ define @intrinsic_vmv.v.v_v_nxv32i8_nxv32i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -129,7 +129,7 @@ define @intrinsic_vmv.v.v_v_nxv64i8_nxv64i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -149,7 +149,7 @@ define @intrinsic_vmv.v.v_v_nxv1i16_nxv1i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -169,7 +169,7 @@ define @intrinsic_vmv.v.v_v_nxv2i16_nxv2i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -189,7 +189,7 @@ define @intrinsic_vmv.v.v_v_nxv4i16_nxv4i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -209,7 +209,7 @@ define @intrinsic_vmv.v.v_v_nxv8i16_nxv8i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -229,7 +229,7 @@ define @intrinsic_vmv.v.v_v_nxv16i16_nxv16i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -249,7 +249,7 @@ define @intrinsic_vmv.v.v_v_nxv32i16_nxv32i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -269,7 +269,7 @@ define @intrinsic_vmv.v.v_v_nxv1i32_nxv1i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -289,7 +289,7 @@ define @intrinsic_vmv.v.v_v_nxv2i32_nxv2i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -309,7 +309,7 @@ define @intrinsic_vmv.v.v_v_nxv4i32_nxv4i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -329,7 +329,7 @@ define @intrinsic_vmv.v.v_v_nxv8i32_nxv8i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -349,7 +349,7 @@ define @intrinsic_vmv.v.v_v_nxv16i32_nxv16i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -369,7 +369,7 @@ define @intrinsic_vmv.v.v_v_nxv1i64_nxv1i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -389,7 +389,7 @@ define @intrinsic_vmv.v.v_v_nxv2i64_nxv2i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -409,7 +409,7 @@ define @intrinsic_vmv.v.v_v_nxv4i64_nxv4i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -429,7 +429,7 @@ define @intrinsic_vmv.v.v_v_nxv8i64_nxv8i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -449,7 +449,7 @@ define @intrinsic_vmv.v.v_v_nxv1f16_nxv1f16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -469,7 +469,7 @@ define @intrinsic_vmv.v.v_v_nxv2f16_nxv2f16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -489,7 +489,7 @@ define @intrinsic_vmv.v.v_v_nxv4f16_nxv4f16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -509,7 +509,7 @@ define @intrinsic_vmv.v.v_v_nxv8f16_nxv8f16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -529,7 +529,7 @@ define @intrinsic_vmv.v.v_v_nxv16f16_nxv16f16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -549,7 +549,7 @@ define @intrinsic_vmv.v.v_v_nxv32f16_nxv32f16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -569,7 +569,7 @@ define @intrinsic_vmv.v.v_v_nxv1f32_nxv1f32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -589,7 +589,7 @@ define @intrinsic_vmv.v.v_v_nxv2f32_nxv2f32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -609,7 +609,7 @@ define @intrinsic_vmv.v.v_v_nxv4f32_nxv4f32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -629,7 +629,7 @@ define @intrinsic_vmv.v.v_v_nxv8f32_nxv8f32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -649,7 +649,7 @@ define @intrinsic_vmv.v.v_v_nxv16f32_nxv16f32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -669,7 +669,7 @@ define @intrinsic_vmv.v.v_v_nxv1f64_nxv1f64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -689,7 +689,7 @@ define @intrinsic_vmv.v.v_v_nxv2f64_nxv2f64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -709,7 +709,7 @@ define @intrinsic_vmv.v.v_v_nxv4f64_nxv4f64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: @@ -729,7 +729,7 @@ define @intrinsic_vmv.v.v_v_nxv8f64_nxv8f64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.v_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmv.v.v v8, v8 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv32.ll @@ -9,7 +9,7 @@ define @intrinsic_vmv.v.x_x_nxv1i8(i8 %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: ret entry: @@ -29,7 +29,7 @@ define @intrinsic_vmv.v.x_x_nxv2i8(i8 %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: ret entry: @@ -49,7 +49,7 @@ define @intrinsic_vmv.v.x_x_nxv4i8(i8 %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: ret entry: @@ -69,7 +69,7 @@ define @intrinsic_vmv.v.x_x_nxv8i8(i8 %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: ret entry: @@ -89,7 +89,7 @@ define @intrinsic_vmv.v.x_x_nxv16i8(i8 %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: ret entry: @@ -109,7 +109,7 @@ define @intrinsic_vmv.v.x_x_nxv32i8(i8 %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: ret entry: @@ -129,7 +129,7 @@ define @intrinsic_vmv.v.x_x_nxv64i8(i8 %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: ret entry: @@ -149,7 +149,7 @@ define @intrinsic_vmv.v.x_x_nxv1i16(i16 %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: ret entry: @@ -169,7 +169,7 @@ define @intrinsic_vmv.v.x_x_nxv2i16(i16 %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: ret entry: @@ -189,7 +189,7 @@ define @intrinsic_vmv.v.x_x_nxv4i16(i16 %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: ret entry: @@ -209,7 +209,7 @@ define @intrinsic_vmv.v.x_x_nxv8i16(i16 %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: ret entry: @@ -229,7 +229,7 @@ define @intrinsic_vmv.v.x_x_nxv16i16(i16 %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: ret entry: @@ -249,7 +249,7 @@ define @intrinsic_vmv.v.x_x_nxv32i16(i16 %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: ret entry: @@ -269,7 +269,7 @@ define @intrinsic_vmv.v.x_x_nxv1i32(i32 %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: ret entry: @@ -289,7 +289,7 @@ define @intrinsic_vmv.v.x_x_nxv2i32(i32 %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: ret entry: @@ -309,7 +309,7 @@ define @intrinsic_vmv.v.x_x_nxv4i32(i32 %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: ret entry: @@ -329,7 +329,7 @@ define @intrinsic_vmv.v.x_x_nxv8i32(i32 %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: ret entry: @@ -349,7 +349,7 @@ define @intrinsic_vmv.v.x_x_nxv16i32(i32 %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: ret entry: @@ -373,7 +373,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v8, (a0), zero ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -398,7 +398,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlse64.v v8, (a0), zero ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -423,7 +423,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v8, (a0), zero ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -448,7 +448,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vlse64.v v8, (a0), zero ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -464,7 +464,7 @@ define @intrinsic_vmv.v.x_i_nxv1i8(i32 %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 9 ; CHECK-NEXT: ret entry: @@ -479,7 +479,7 @@ define @intrinsic_vmv.v.x_i_nxv2i8(i32 %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 9 ; CHECK-NEXT: ret entry: @@ -494,7 +494,7 @@ define @intrinsic_vmv.v.x_i_nxv4i8(i32 %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 9 ; CHECK-NEXT: ret entry: @@ -509,7 +509,7 @@ define @intrinsic_vmv.v.x_i_nxv8i8(i32 %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 9 ; CHECK-NEXT: ret entry: @@ -524,7 +524,7 @@ define @intrinsic_vmv.v.x_i_nxv16i8(i32 %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 9 ; CHECK-NEXT: ret entry: @@ -539,7 +539,7 @@ define @intrinsic_vmv.v.x_i_nxv32i8(i32 %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 9 ; CHECK-NEXT: ret entry: @@ -554,7 +554,7 @@ define @intrinsic_vmv.v.x_i_nxv64i8(i32 %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 9 ; CHECK-NEXT: ret entry: @@ -569,7 +569,7 @@ define @intrinsic_vmv.v.x_i_nxv1i16(i32 %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 9 ; CHECK-NEXT: ret entry: @@ -584,7 +584,7 @@ define @intrinsic_vmv.v.x_i_nxv2i16(i32 %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 9 ; CHECK-NEXT: ret entry: @@ -599,7 +599,7 @@ define @intrinsic_vmv.v.x_i_nxv4i16(i32 %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 9 ; CHECK-NEXT: ret entry: @@ -614,7 +614,7 @@ define @intrinsic_vmv.v.x_i_nxv8i16(i32 %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 9 ; CHECK-NEXT: ret entry: @@ -629,7 +629,7 @@ define @intrinsic_vmv.v.x_i_nxv16i16(i32 %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 9 ; CHECK-NEXT: ret entry: @@ -644,7 +644,7 @@ define @intrinsic_vmv.v.x_i_nxv32i16(i32 %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 9 ; CHECK-NEXT: ret entry: @@ -659,7 +659,7 @@ define @intrinsic_vmv.v.x_i_nxv1i32(i32 %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 9 ; CHECK-NEXT: ret entry: @@ -674,7 +674,7 @@ define @intrinsic_vmv.v.x_i_nxv2i32(i32 %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 9 ; CHECK-NEXT: ret entry: @@ -689,7 +689,7 @@ define @intrinsic_vmv.v.x_i_nxv4i32(i32 %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 9 ; CHECK-NEXT: ret entry: @@ -704,7 +704,7 @@ define @intrinsic_vmv.v.x_i_nxv8i32(i32 %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 9 ; CHECK-NEXT: ret entry: @@ -719,7 +719,7 @@ define @intrinsic_vmv.v.x_i_nxv16i32(i32 %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 9 ; CHECK-NEXT: ret entry: @@ -734,7 +734,7 @@ define @intrinsic_vmv.v.x_i_nxv1i64(i32 %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 9 ; CHECK-NEXT: ret entry: @@ -749,7 +749,7 @@ define @intrinsic_vmv.v.x_i_nxv2i64(i32 %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 9 ; CHECK-NEXT: ret entry: @@ -764,7 +764,7 @@ define @intrinsic_vmv.v.x_i_nxv4i64(i32 %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 9 ; CHECK-NEXT: ret entry: @@ -779,7 +779,7 @@ define @intrinsic_vmv.v.x_i_nxv8i64(i32 %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 9 ; CHECK-NEXT: ret entry: @@ -794,7 +794,7 @@ define @intrinsic_vmv.v.x_i_nxv1i64_vlmax() nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i64_vlmax: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 3 ; CHECK-NEXT: ret entry: @@ -809,7 +809,7 @@ define @intrinsic_vmv.v.x_i_nxv2i64_vlmax() nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i64_vlmax: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 3 ; CHECK-NEXT: ret entry: @@ -824,7 +824,7 @@ define @intrinsic_vmv.v.x_i_nxv4i64_vlmax() nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i64_vlmax: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 3 ; CHECK-NEXT: ret entry: @@ -839,7 +839,7 @@ define @intrinsic_vmv.v.x_i_nxv8i64_vlmax() nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i64_vlmax: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 3 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv64.ll @@ -9,7 +9,7 @@ define @intrinsic_vmv.v.x_x_nxv1i8(i8 %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: ret entry: @@ -29,7 +29,7 @@ define @intrinsic_vmv.v.x_x_nxv2i8(i8 %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: ret entry: @@ -49,7 +49,7 @@ define @intrinsic_vmv.v.x_x_nxv4i8(i8 %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: ret entry: @@ -69,7 +69,7 @@ define @intrinsic_vmv.v.x_x_nxv8i8(i8 %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: ret entry: @@ -89,7 +89,7 @@ define @intrinsic_vmv.v.x_x_nxv16i8(i8 %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: ret entry: @@ -109,7 +109,7 @@ define @intrinsic_vmv.v.x_x_nxv32i8(i8 %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: ret entry: @@ -129,7 +129,7 @@ define @intrinsic_vmv.v.x_x_nxv64i8(i8 %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: ret entry: @@ -149,7 +149,7 @@ define @intrinsic_vmv.v.x_x_nxv1i16(i16 %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: ret entry: @@ -169,7 +169,7 @@ define @intrinsic_vmv.v.x_x_nxv2i16(i16 %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: ret entry: @@ -189,7 +189,7 @@ define @intrinsic_vmv.v.x_x_nxv4i16(i16 %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: ret entry: @@ -209,7 +209,7 @@ define @intrinsic_vmv.v.x_x_nxv8i16(i16 %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: ret entry: @@ -229,7 +229,7 @@ define @intrinsic_vmv.v.x_x_nxv16i16(i16 %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: ret entry: @@ -249,7 +249,7 @@ define @intrinsic_vmv.v.x_x_nxv32i16(i16 %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: ret entry: @@ -269,7 +269,7 @@ define @intrinsic_vmv.v.x_x_nxv1i32(i32 %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: ret entry: @@ -289,7 +289,7 @@ define @intrinsic_vmv.v.x_x_nxv2i32(i32 %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: ret entry: @@ -309,7 +309,7 @@ define @intrinsic_vmv.v.x_x_nxv4i32(i32 %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: ret entry: @@ -329,7 +329,7 @@ define @intrinsic_vmv.v.x_x_nxv8i32(i32 %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: ret entry: @@ -349,7 +349,7 @@ define @intrinsic_vmv.v.x_x_nxv16i32(i32 %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: ret entry: @@ -369,7 +369,7 @@ define @intrinsic_vmv.v.x_x_nxv1i64(i64 %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: ret entry: @@ -389,7 +389,7 @@ define @intrinsic_vmv.v.x_x_nxv2i64(i64 %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: ret entry: @@ -409,7 +409,7 @@ define @intrinsic_vmv.v.x_x_nxv4i64(i64 %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: ret entry: @@ -429,7 +429,7 @@ define @intrinsic_vmv.v.x_x_nxv8i64(i64 %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: ret entry: @@ -444,7 +444,7 @@ define @intrinsic_vmv.v.x_i_nxv1i8(i64 %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 9 ; CHECK-NEXT: ret entry: @@ -459,7 +459,7 @@ define @intrinsic_vmv.v.x_i_nxv2i8(i64 %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 9 ; CHECK-NEXT: ret entry: @@ -474,7 +474,7 @@ define @intrinsic_vmv.v.x_i_nxv4i8(i64 %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 9 ; CHECK-NEXT: ret entry: @@ -489,7 +489,7 @@ define @intrinsic_vmv.v.x_i_nxv8i8(i64 %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 9 ; CHECK-NEXT: ret entry: @@ -504,7 +504,7 @@ define @intrinsic_vmv.v.x_i_nxv16i8(i64 %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 9 ; CHECK-NEXT: ret entry: @@ -519,7 +519,7 @@ define @intrinsic_vmv.v.x_i_nxv32i8(i64 %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 9 ; CHECK-NEXT: ret entry: @@ -534,7 +534,7 @@ define @intrinsic_vmv.v.x_i_nxv64i8(i64 %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 9 ; CHECK-NEXT: ret entry: @@ -549,7 +549,7 @@ define @intrinsic_vmv.v.x_i_nxv1i16(i64 %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 9 ; CHECK-NEXT: ret entry: @@ -564,7 +564,7 @@ define @intrinsic_vmv.v.x_i_nxv2i16(i64 %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 9 ; CHECK-NEXT: ret entry: @@ -579,7 +579,7 @@ define @intrinsic_vmv.v.x_i_nxv4i16(i64 %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 9 ; CHECK-NEXT: ret entry: @@ -594,7 +594,7 @@ define @intrinsic_vmv.v.x_i_nxv8i16(i64 %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 9 ; CHECK-NEXT: ret entry: @@ -609,7 +609,7 @@ define @intrinsic_vmv.v.x_i_nxv16i16(i64 %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 9 ; CHECK-NEXT: ret entry: @@ -624,7 +624,7 @@ define @intrinsic_vmv.v.x_i_nxv32i16(i64 %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 9 ; CHECK-NEXT: ret entry: @@ -639,7 +639,7 @@ define @intrinsic_vmv.v.x_i_nxv1i32(i64 %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 9 ; CHECK-NEXT: ret entry: @@ -654,7 +654,7 @@ define @intrinsic_vmv.v.x_i_nxv2i32(i64 %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 9 ; CHECK-NEXT: ret entry: @@ -669,7 +669,7 @@ define @intrinsic_vmv.v.x_i_nxv4i32(i64 %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 9 ; CHECK-NEXT: ret entry: @@ -684,7 +684,7 @@ define @intrinsic_vmv.v.x_i_nxv8i32(i64 %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 9 ; CHECK-NEXT: ret entry: @@ -699,7 +699,7 @@ define @intrinsic_vmv.v.x_i_nxv16i32(i64 %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 9 ; CHECK-NEXT: ret entry: @@ -714,7 +714,7 @@ define @intrinsic_vmv.v.x_i_nxv1i64(i64 %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 9 ; CHECK-NEXT: ret entry: @@ -729,7 +729,7 @@ define @intrinsic_vmv.v.x_i_nxv2i64(i64 %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 9 ; CHECK-NEXT: ret entry: @@ -744,7 +744,7 @@ define @intrinsic_vmv.v.x_i_nxv4i64(i64 %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 9 ; CHECK-NEXT: ret entry: @@ -759,7 +759,7 @@ define @intrinsic_vmv.v.x_i_nxv8i64(i64 %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 9 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.x.s-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.x.s-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmv.x.s-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmv.x.s-rv32.ll @@ -6,7 +6,7 @@ define signext i8 @intrinsic_vmv.x.s_s_nxv1i8( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, mf8, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -19,7 +19,7 @@ define signext i8 @intrinsic_vmv.x.s_s_nxv2i8( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -32,7 +32,7 @@ define signext i8 @intrinsic_vmv.x.s_s_nxv4i8( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, mf2, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -45,7 +45,7 @@ define signext i8 @intrinsic_vmv.x.s_s_nxv8i8( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, m1, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -58,7 +58,7 @@ define signext i8 @intrinsic_vmv.x.s_s_nxv16i8( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e8, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, m2, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -71,7 +71,7 @@ define signext i8 @intrinsic_vmv.x.s_s_nxv32i8( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e8, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, m4, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -84,7 +84,7 @@ define signext i8 @intrinsic_vmv.x.s_s_nxv64i8( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -97,7 +97,7 @@ define signext i16 @intrinsic_vmv.x.s_s_nxv1i16( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -110,7 +110,7 @@ define signext i16 @intrinsic_vmv.x.s_s_nxv2i16( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, mf2, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -123,7 +123,7 @@ define signext i16 @intrinsic_vmv.x.s_s_nxv4i16( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -136,7 +136,7 @@ define signext i16 @intrinsic_vmv.x.s_s_nxv8i16( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m2, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -149,7 +149,7 @@ define signext i16 @intrinsic_vmv.x.s_s_nxv16i16( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -162,7 +162,7 @@ define signext i16 @intrinsic_vmv.x.s_s_nxv32i16( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e16, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m8, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -175,7 +175,7 @@ define i32 @intrinsic_vmv.x.s_s_nxv1i32( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -188,7 +188,7 @@ define i32 @intrinsic_vmv.x.s_s_nxv2i32( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -201,7 +201,7 @@ define i32 @intrinsic_vmv.x.s_s_nxv4i32( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -214,7 +214,7 @@ define i32 @intrinsic_vmv.x.s_s_nxv8i32( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m4, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -227,7 +227,7 @@ define i32 @intrinsic_vmv.x.s_s_nxv16i32( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m8, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -241,7 +241,7 @@ ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vsrl.vx v9, v8, a0 ; CHECK-NEXT: vmv.x.s a1, v9 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -257,7 +257,7 @@ ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, ma ; CHECK-NEXT: vsrl.vx v10, v8, a0 ; CHECK-NEXT: vmv.x.s a1, v10 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -273,7 +273,7 @@ ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m4, ta, ma ; CHECK-NEXT: vsrl.vx v12, v8, a0 ; CHECK-NEXT: vmv.x.s a1, v12 ; CHECK-NEXT: vmv.x.s a0, v8 @@ -289,7 +289,7 @@ ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, ma ; CHECK-NEXT: vsrl.vx v16, v8, a0 ; CHECK-NEXT: vmv.x.s a1, v16 ; CHECK-NEXT: vmv.x.s a0, v8 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.x.s-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.x.s-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmv.x.s-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmv.x.s-rv64.ll @@ -6,7 +6,7 @@ define signext i8 @intrinsic_vmv.x.s_s_nxv1i8( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, mf8, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -19,7 +19,7 @@ define signext i8 @intrinsic_vmv.x.s_s_nxv2i8( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -32,7 +32,7 @@ define signext i8 @intrinsic_vmv.x.s_s_nxv4i8( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, mf2, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -45,7 +45,7 @@ define signext i8 @intrinsic_vmv.x.s_s_nxv8i8( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, m1, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -58,7 +58,7 @@ define signext i8 @intrinsic_vmv.x.s_s_nxv16i8( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e8, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, m2, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -71,7 +71,7 @@ define signext i8 @intrinsic_vmv.x.s_s_nxv32i8( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e8, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, m4, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -84,7 +84,7 @@ define signext i8 @intrinsic_vmv.x.s_s_nxv64i8( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -97,7 +97,7 @@ define signext i16 @intrinsic_vmv.x.s_s_nxv1i16( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -110,7 +110,7 @@ define signext i16 @intrinsic_vmv.x.s_s_nxv2i16( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, mf2, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -123,7 +123,7 @@ define signext i16 @intrinsic_vmv.x.s_s_nxv4i16( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -136,7 +136,7 @@ define signext i16 @intrinsic_vmv.x.s_s_nxv8i16( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m2, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -149,7 +149,7 @@ define signext i16 @intrinsic_vmv.x.s_s_nxv16i16( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -162,7 +162,7 @@ define signext i16 @intrinsic_vmv.x.s_s_nxv32i16( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e16, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m8, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -175,7 +175,7 @@ define signext i32 @intrinsic_vmv.x.s_s_nxv1i32( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -188,7 +188,7 @@ define signext i32 @intrinsic_vmv.x.s_s_nxv2i32( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -201,7 +201,7 @@ define signext i32 @intrinsic_vmv.x.s_s_nxv4i32( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m2, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -214,7 +214,7 @@ define signext i32 @intrinsic_vmv.x.s_s_nxv8i32( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m4, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -227,7 +227,7 @@ define signext i32 @intrinsic_vmv.x.s_s_nxv16i32( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e32, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m8, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -240,7 +240,7 @@ define i64 @intrinsic_vmv.x.s_s_nxv1i64( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -253,7 +253,7 @@ define i64 @intrinsic_vmv.x.s_s_nxv2i64( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e64, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m2, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -266,7 +266,7 @@ define i64 @intrinsic_vmv.x.s_s_nxv4i64( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e64, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m4, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: @@ -279,7 +279,7 @@ define i64 @intrinsic_vmv.x.s_s_nxv8i64( %0) nounwind { ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e64, m8, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m8, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmxnor.ll b/llvm/test/CodeGen/RISCV/rvv/vmxnor.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmxnor.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmxnor.ll @@ -11,7 +11,7 @@ define @intrinsic_vmxnor_mm_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxnor_mm_nxv1i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v0, v8 ; CHECK-NEXT: ret entry: @@ -31,7 +31,7 @@ define @intrinsic_vmxnor_mm_nxv2i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxnor_mm_nxv2i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v0, v8 ; CHECK-NEXT: ret entry: @@ -51,7 +51,7 @@ define @intrinsic_vmxnor_mm_nxv4i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxnor_mm_nxv4i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v0, v8 ; CHECK-NEXT: ret entry: @@ -71,7 +71,7 @@ define @intrinsic_vmxnor_mm_nxv8i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxnor_mm_nxv8i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v0, v8 ; CHECK-NEXT: ret entry: @@ -91,7 +91,7 @@ define @intrinsic_vmxnor_mm_nxv16i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxnor_mm_nxv16i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v0, v8 ; CHECK-NEXT: ret entry: @@ -111,7 +111,7 @@ define @intrinsic_vmxnor_mm_nxv32i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxnor_mm_nxv32i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v0, v8 ; CHECK-NEXT: ret entry: @@ -131,7 +131,7 @@ define @intrinsic_vmxnor_mm_nxv64i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxnor_mm_nxv64i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmxnor.mm v0, v0, v8 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmxor.ll b/llvm/test/CodeGen/RISCV/rvv/vmxor.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmxor.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmxor.ll @@ -11,7 +11,7 @@ define @intrinsic_vmxor_mm_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxor_mm_nxv1i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret entry: @@ -31,7 +31,7 @@ define @intrinsic_vmxor_mm_nxv2i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxor_mm_nxv2i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret entry: @@ -51,7 +51,7 @@ define @intrinsic_vmxor_mm_nxv4i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxor_mm_nxv4i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret entry: @@ -71,7 +71,7 @@ define @intrinsic_vmxor_mm_nxv8i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxor_mm_nxv8i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret entry: @@ -91,7 +91,7 @@ define @intrinsic_vmxor_mm_nxv16i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxor_mm_nxv16i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret entry: @@ -111,7 +111,7 @@ define @intrinsic_vmxor_mm_nxv32i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxor_mm_nxv32i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret entry: @@ -131,7 +131,7 @@ define @intrinsic_vmxor_mm_nxv64i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vmxor_mm_nxv64i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclip.ll b/llvm/test/CodeGen/RISCV/rvv/vnclip.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnclip.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnclip.ll @@ -12,7 +12,7 @@ define @intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vnclip.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -59,7 +59,7 @@ define @intrinsic_vnclip_wv_nxv2i8_nxv2i16_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv2i8_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vnclip.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -106,7 +106,7 @@ define @intrinsic_vnclip_wv_nxv4i8_nxv4i16_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv4i8_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vnclip.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -153,7 +153,7 @@ define @intrinsic_vnclip_wv_nxv8i8_nxv8i16_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv8i8_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vnclip.wv v11, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret @@ -201,7 +201,7 @@ define @intrinsic_vnclip_wv_nxv16i8_nxv16i16_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv16i8_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vnclip.wv v14, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret @@ -249,7 +249,7 @@ define @intrinsic_vnclip_wv_nxv32i8_nxv32i16_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv32i8_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vnclip.wv v20, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret @@ -297,7 +297,7 @@ define @intrinsic_vnclip_wv_nxv1i16_nxv1i32_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv1i16_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vnclip.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -344,7 +344,7 @@ define @intrinsic_vnclip_wv_nxv2i16_nxv2i32_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv2i16_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vnclip.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -391,7 +391,7 @@ define @intrinsic_vnclip_wv_nxv4i16_nxv4i32_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv4i16_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vnclip.wv v11, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret @@ -439,7 +439,7 @@ define @intrinsic_vnclip_wv_nxv8i16_nxv8i32_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv8i16_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vnclip.wv v14, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret @@ -487,7 +487,7 @@ define @intrinsic_vnclip_wv_nxv16i16_nxv16i32_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv16i16_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vnclip.wv v20, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret @@ -535,7 +535,7 @@ define @intrinsic_vnclip_wv_nxv1i32_nxv1i64_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv1i32_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vnclip.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -582,7 +582,7 @@ define @intrinsic_vnclip_wv_nxv2i32_nxv2i64_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv2i32_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vnclip.wv v11, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret @@ -630,7 +630,7 @@ define @intrinsic_vnclip_wv_nxv4i32_nxv4i64_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv4i32_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vnclip.wv v14, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret @@ -678,7 +678,7 @@ define @intrinsic_vnclip_wv_nxv8i32_nxv8i64_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_wv_nxv8i32_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vnclip.wv v20, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret @@ -726,7 +726,7 @@ define @intrinsic_vnclip_vx_nxv1i8_nxv1i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vnclip.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -773,7 +773,7 @@ define @intrinsic_vnclip_vx_nxv2i8_nxv2i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vnclip.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -820,7 +820,7 @@ define @intrinsic_vnclip_vx_nxv4i8_nxv4i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vnclip.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -867,7 +867,7 @@ define @intrinsic_vnclip_vx_nxv8i8_nxv8i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vnclip.wx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -915,7 +915,7 @@ define @intrinsic_vnclip_vx_nxv16i8_nxv16i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vnclip.wx v12, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -963,7 +963,7 @@ define @intrinsic_vnclip_vx_nxv32i8_nxv32i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vnclip.wx v16, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1011,7 +1011,7 @@ define @intrinsic_vnclip_vx_nxv1i16_nxv1i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vnclip.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1058,7 +1058,7 @@ define @intrinsic_vnclip_vx_nxv2i16_nxv2i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vnclip.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1105,7 +1105,7 @@ define @intrinsic_vnclip_vx_nxv4i16_nxv4i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vnclip.wx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1153,7 +1153,7 @@ define @intrinsic_vnclip_vx_nxv8i16_nxv8i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vnclip.wx v12, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1201,7 +1201,7 @@ define @intrinsic_vnclip_vx_nxv16i16_nxv16i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vnclip.wx v16, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1249,7 +1249,7 @@ define @intrinsic_vnclip_vx_nxv1i32_nxv1i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vnclip.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1296,7 +1296,7 @@ define @intrinsic_vnclip_vx_nxv2i32_nxv2i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vnclip.wx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1344,7 +1344,7 @@ define @intrinsic_vnclip_vx_nxv4i32_nxv4i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vnclip.wx v12, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1392,7 +1392,7 @@ define @intrinsic_vnclip_vx_nxv8i32_nxv8i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vx_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vnclip.wx v16, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1434,7 +1434,7 @@ define @intrinsic_vnclip_vi_nxv1i8_nxv1i16_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vi_nxv1i8_nxv1i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vnclip.wi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1467,7 +1467,7 @@ define @intrinsic_vnclip_vi_nxv2i8_nxv2i16_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vi_nxv2i8_nxv2i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vnclip.wi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1500,7 +1500,7 @@ define @intrinsic_vnclip_vi_nxv4i8_nxv4i16_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vi_nxv4i8_nxv4i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vnclip.wi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1533,7 +1533,7 @@ define @intrinsic_vnclip_vi_nxv8i8_nxv8i16_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vi_nxv8i8_nxv8i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vnclip.wi v10, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1567,7 +1567,7 @@ define @intrinsic_vnclip_vi_nxv16i8_nxv16i16_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vi_nxv16i8_nxv16i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vnclip.wi v12, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1601,7 +1601,7 @@ define @intrinsic_vnclip_vi_nxv32i8_nxv32i16_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vi_nxv32i8_nxv32i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vnclip.wi v16, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1635,7 +1635,7 @@ define @intrinsic_vnclip_vi_nxv1i16_nxv1i32_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vi_nxv1i16_nxv1i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vnclip.wi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1668,7 +1668,7 @@ define @intrinsic_vnclip_vi_nxv2i16_nxv2i32_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vi_nxv2i16_nxv2i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vnclip.wi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1701,7 +1701,7 @@ define @intrinsic_vnclip_vi_nxv4i16_nxv4i32_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vi_nxv4i16_nxv4i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vnclip.wi v10, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1735,7 +1735,7 @@ define @intrinsic_vnclip_vi_nxv8i16_nxv8i32_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vi_nxv8i16_nxv8i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vnclip.wi v12, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1769,7 +1769,7 @@ define @intrinsic_vnclip_vi_nxv16i16_nxv16i32_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vi_nxv16i16_nxv16i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vnclip.wi v16, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1803,7 +1803,7 @@ define @intrinsic_vnclip_vi_nxv1i32_nxv1i64_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vi_nxv1i32_nxv1i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vnclip.wi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1836,7 +1836,7 @@ define @intrinsic_vnclip_vi_nxv2i32_nxv2i64_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vi_nxv2i32_nxv2i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vnclip.wi v10, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1870,7 +1870,7 @@ define @intrinsic_vnclip_vi_nxv4i32_nxv4i64_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vi_nxv4i32_nxv4i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vnclip.wi v12, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1904,7 +1904,7 @@ define @intrinsic_vnclip_vi_nxv8i32_nxv8i64_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vnclip_vi_nxv8i32_nxv8i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vnclip.wi v16, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclipu.ll b/llvm/test/CodeGen/RISCV/rvv/vnclipu.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnclipu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnclipu.ll @@ -12,7 +12,7 @@ define @intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vnclipu.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -59,7 +59,7 @@ define @intrinsic_vnclipu_wv_nxv2i8_nxv2i16_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv2i8_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vnclipu.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -106,7 +106,7 @@ define @intrinsic_vnclipu_wv_nxv4i8_nxv4i16_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv4i8_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vnclipu.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -153,7 +153,7 @@ define @intrinsic_vnclipu_wv_nxv8i8_nxv8i16_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv8i8_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vnclipu.wv v11, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret @@ -201,7 +201,7 @@ define @intrinsic_vnclipu_wv_nxv16i8_nxv16i16_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv16i8_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vnclipu.wv v14, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret @@ -249,7 +249,7 @@ define @intrinsic_vnclipu_wv_nxv32i8_nxv32i16_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv32i8_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vnclipu.wv v20, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret @@ -297,7 +297,7 @@ define @intrinsic_vnclipu_wv_nxv1i16_nxv1i32_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv1i16_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vnclipu.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -344,7 +344,7 @@ define @intrinsic_vnclipu_wv_nxv2i16_nxv2i32_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv2i16_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vnclipu.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -391,7 +391,7 @@ define @intrinsic_vnclipu_wv_nxv4i16_nxv4i32_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv4i16_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vnclipu.wv v11, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret @@ -439,7 +439,7 @@ define @intrinsic_vnclipu_wv_nxv8i16_nxv8i32_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv8i16_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vnclipu.wv v14, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret @@ -487,7 +487,7 @@ define @intrinsic_vnclipu_wv_nxv16i16_nxv16i32_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv16i16_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vnclipu.wv v20, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret @@ -535,7 +535,7 @@ define @intrinsic_vnclipu_wv_nxv1i32_nxv1i64_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv1i32_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vnclipu.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -582,7 +582,7 @@ define @intrinsic_vnclipu_wv_nxv2i32_nxv2i64_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv2i32_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vnclipu.wv v11, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret @@ -630,7 +630,7 @@ define @intrinsic_vnclipu_wv_nxv4i32_nxv4i64_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv4i32_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vnclipu.wv v14, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret @@ -678,7 +678,7 @@ define @intrinsic_vnclipu_wv_nxv8i32_nxv8i64_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv8i32_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vnclipu.wv v20, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret @@ -726,7 +726,7 @@ define @intrinsic_vnclipu_vx_nxv1i8_nxv1i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vnclipu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -773,7 +773,7 @@ define @intrinsic_vnclipu_vx_nxv2i8_nxv2i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vnclipu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -820,7 +820,7 @@ define @intrinsic_vnclipu_vx_nxv4i8_nxv4i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vnclipu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -867,7 +867,7 @@ define @intrinsic_vnclipu_vx_nxv8i8_nxv8i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vnclipu.wx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -915,7 +915,7 @@ define @intrinsic_vnclipu_vx_nxv16i8_nxv16i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vnclipu.wx v12, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -963,7 +963,7 @@ define @intrinsic_vnclipu_vx_nxv32i8_nxv32i16( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vnclipu.wx v16, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1011,7 +1011,7 @@ define @intrinsic_vnclipu_vx_nxv1i16_nxv1i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vnclipu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1058,7 +1058,7 @@ define @intrinsic_vnclipu_vx_nxv2i16_nxv2i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vnclipu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1105,7 +1105,7 @@ define @intrinsic_vnclipu_vx_nxv4i16_nxv4i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vnclipu.wx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1153,7 +1153,7 @@ define @intrinsic_vnclipu_vx_nxv8i16_nxv8i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vnclipu.wx v12, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1201,7 +1201,7 @@ define @intrinsic_vnclipu_vx_nxv16i16_nxv16i32( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vnclipu.wx v16, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1249,7 +1249,7 @@ define @intrinsic_vnclipu_vx_nxv1i32_nxv1i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vnclipu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1296,7 +1296,7 @@ define @intrinsic_vnclipu_vx_nxv2i32_nxv2i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vnclipu.wx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1344,7 +1344,7 @@ define @intrinsic_vnclipu_vx_nxv4i32_nxv4i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vnclipu.wx v12, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1392,7 +1392,7 @@ define @intrinsic_vnclipu_vx_nxv8i32_nxv8i64( %0, iXLen %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vnclipu.wx v16, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1434,7 +1434,7 @@ define @intrinsic_vnclipu_vi_nxv1i8_nxv1i16_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv1i8_nxv1i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vnclipu.wi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1467,7 +1467,7 @@ define @intrinsic_vnclipu_vi_nxv2i8_nxv2i16_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv2i8_nxv2i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vnclipu.wi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1500,7 +1500,7 @@ define @intrinsic_vnclipu_vi_nxv4i8_nxv4i16_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv4i8_nxv4i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vnclipu.wi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1533,7 +1533,7 @@ define @intrinsic_vnclipu_vi_nxv8i8_nxv8i16_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv8i8_nxv8i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vnclipu.wi v10, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1567,7 +1567,7 @@ define @intrinsic_vnclipu_vi_nxv16i8_nxv16i16_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv16i8_nxv16i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vnclipu.wi v12, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1601,7 +1601,7 @@ define @intrinsic_vnclipu_vi_nxv32i8_nxv32i16_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv32i8_nxv32i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vnclipu.wi v16, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1635,7 +1635,7 @@ define @intrinsic_vnclipu_vi_nxv1i16_nxv1i32_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv1i16_nxv1i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vnclipu.wi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1668,7 +1668,7 @@ define @intrinsic_vnclipu_vi_nxv2i16_nxv2i32_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv2i16_nxv2i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vnclipu.wi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1701,7 +1701,7 @@ define @intrinsic_vnclipu_vi_nxv4i16_nxv4i32_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv4i16_nxv4i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vnclipu.wi v10, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1735,7 +1735,7 @@ define @intrinsic_vnclipu_vi_nxv8i16_nxv8i32_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv8i16_nxv8i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vnclipu.wi v12, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1769,7 +1769,7 @@ define @intrinsic_vnclipu_vi_nxv16i16_nxv16i32_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv16i16_nxv16i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vnclipu.wi v16, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1803,7 +1803,7 @@ define @intrinsic_vnclipu_vi_nxv1i32_nxv1i64_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv1i32_nxv1i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vnclipu.wi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1836,7 +1836,7 @@ define @intrinsic_vnclipu_vi_nxv2i32_nxv2i64_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv2i32_nxv2i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vnclipu.wi v10, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1870,7 +1870,7 @@ define @intrinsic_vnclipu_vi_nxv4i32_nxv4i64_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv4i32_nxv4i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vnclipu.wi v12, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1904,7 +1904,7 @@ define @intrinsic_vnclipu_vi_nxv8i32_nxv8i64_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv8i32_nxv8i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vnclipu.wi v16, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv32.ll @@ -11,7 +11,7 @@ define @intrinsic_vnmsac_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vnmsac.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -58,7 +58,7 @@ define @intrinsic_vnmsac_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vnmsac.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -105,7 +105,7 @@ define @intrinsic_vnmsac_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vnmsac.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -152,7 +152,7 @@ define @intrinsic_vnmsac_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vnmsac.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -199,7 +199,7 @@ define @intrinsic_vnmsac_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vnmsac.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -246,7 +246,7 @@ define @intrinsic_vnmsac_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vnmsac.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -293,7 +293,7 @@ define @intrinsic_vnmsac_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vnmsac.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vnmsac_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vnmsac.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vnmsac_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vnmsac.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vnmsac_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vnmsac.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vnmsac_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vnmsac.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vnmsac_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vnmsac.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vnmsac_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vnmsac.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -622,7 +622,7 @@ define @intrinsic_vnmsac_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vnmsac.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -669,7 +669,7 @@ define @intrinsic_vnmsac_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vnmsac.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -716,7 +716,7 @@ define @intrinsic_vnmsac_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vnmsac.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -763,7 +763,7 @@ define @intrinsic_vnmsac_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vnmsac.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -810,7 +810,7 @@ define @intrinsic_vnmsac_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vnmsac.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -857,7 +857,7 @@ define @intrinsic_vnmsac_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i8_i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma ; CHECK-NEXT: vnmsac.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -904,7 +904,7 @@ define @intrinsic_vnmsac_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i8_i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma ; CHECK-NEXT: vnmsac.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -951,7 +951,7 @@ define @intrinsic_vnmsac_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i8_i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma ; CHECK-NEXT: vnmsac.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -998,7 +998,7 @@ define @intrinsic_vnmsac_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv8i8_i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma ; CHECK-NEXT: vnmsac.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1045,7 +1045,7 @@ define @intrinsic_vnmsac_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv16i8_i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, ma ; CHECK-NEXT: vnmsac.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -1092,7 +1092,7 @@ define @intrinsic_vnmsac_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv32i8_i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, ma ; CHECK-NEXT: vnmsac.vx v8, a0, v12 ; CHECK-NEXT: ret entry: @@ -1139,7 +1139,7 @@ define @intrinsic_vnmsac_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i16_i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma ; CHECK-NEXT: vnmsac.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1186,7 +1186,7 @@ define @intrinsic_vnmsac_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i16_i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma ; CHECK-NEXT: vnmsac.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1233,7 +1233,7 @@ define @intrinsic_vnmsac_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i16_i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma ; CHECK-NEXT: vnmsac.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1280,7 +1280,7 @@ define @intrinsic_vnmsac_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv8i16_i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma ; CHECK-NEXT: vnmsac.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -1327,7 +1327,7 @@ define @intrinsic_vnmsac_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv16i16_i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma ; CHECK-NEXT: vnmsac.vx v8, a0, v12 ; CHECK-NEXT: ret entry: @@ -1374,7 +1374,7 @@ define @intrinsic_vnmsac_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i32_i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma ; CHECK-NEXT: vnmsac.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1421,7 +1421,7 @@ define @intrinsic_vnmsac_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i32_i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; CHECK-NEXT: vnmsac.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1468,7 +1468,7 @@ define @intrinsic_vnmsac_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i32_i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma ; CHECK-NEXT: vnmsac.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -1515,7 +1515,7 @@ define @intrinsic_vnmsac_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv8i32_i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma ; CHECK-NEXT: vnmsac.vx v8, a0, v12 ; CHECK-NEXT: ret entry: @@ -1566,9 +1566,9 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, ma ; CHECK-NEXT: vnmsac.vv v8, v10, v9 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1596,7 +1596,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vnmsac.vv v8, v10, v9, v0.t @@ -1627,9 +1627,9 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, ma ; CHECK-NEXT: vnmsac.vv v8, v12, v10 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1657,7 +1657,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu ; CHECK-NEXT: vnmsac.vv v8, v12, v10, v0.t @@ -1688,9 +1688,9 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, ma ; CHECK-NEXT: vnmsac.vv v8, v16, v12 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1718,7 +1718,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; CHECK-NEXT: vnmsac.vv v8, v16, v12, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv64.ll @@ -11,7 +11,7 @@ define @intrinsic_vnmsac_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vnmsac.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -58,7 +58,7 @@ define @intrinsic_vnmsac_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vnmsac.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -105,7 +105,7 @@ define @intrinsic_vnmsac_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vnmsac.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -152,7 +152,7 @@ define @intrinsic_vnmsac_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vnmsac.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -199,7 +199,7 @@ define @intrinsic_vnmsac_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vnmsac.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -246,7 +246,7 @@ define @intrinsic_vnmsac_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vnmsac.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -293,7 +293,7 @@ define @intrinsic_vnmsac_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vnmsac.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vnmsac_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vnmsac.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vnmsac_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vnmsac.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vnmsac_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vnmsac.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vnmsac_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vnmsac.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vnmsac_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vnmsac.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vnmsac_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vnmsac.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -622,7 +622,7 @@ define @intrinsic_vnmsac_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vnmsac.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -669,7 +669,7 @@ define @intrinsic_vnmsac_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vnmsac.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -716,7 +716,7 @@ define @intrinsic_vnmsac_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vnmsac.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -763,7 +763,7 @@ define @intrinsic_vnmsac_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vnmsac.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -810,7 +810,7 @@ define @intrinsic_vnmsac_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vnmsac.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -857,7 +857,7 @@ define @intrinsic_vnmsac_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i8_i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma ; CHECK-NEXT: vnmsac.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -904,7 +904,7 @@ define @intrinsic_vnmsac_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i8_i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma ; CHECK-NEXT: vnmsac.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -951,7 +951,7 @@ define @intrinsic_vnmsac_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i8_i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma ; CHECK-NEXT: vnmsac.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -998,7 +998,7 @@ define @intrinsic_vnmsac_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv8i8_i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma ; CHECK-NEXT: vnmsac.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1045,7 +1045,7 @@ define @intrinsic_vnmsac_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv16i8_i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, ma ; CHECK-NEXT: vnmsac.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -1092,7 +1092,7 @@ define @intrinsic_vnmsac_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv32i8_i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, ma ; CHECK-NEXT: vnmsac.vx v8, a0, v12 ; CHECK-NEXT: ret entry: @@ -1139,7 +1139,7 @@ define @intrinsic_vnmsac_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i16_i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma ; CHECK-NEXT: vnmsac.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1186,7 +1186,7 @@ define @intrinsic_vnmsac_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i16_i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma ; CHECK-NEXT: vnmsac.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1233,7 +1233,7 @@ define @intrinsic_vnmsac_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i16_i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma ; CHECK-NEXT: vnmsac.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1280,7 +1280,7 @@ define @intrinsic_vnmsac_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv8i16_i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma ; CHECK-NEXT: vnmsac.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -1327,7 +1327,7 @@ define @intrinsic_vnmsac_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv16i16_i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma ; CHECK-NEXT: vnmsac.vx v8, a0, v12 ; CHECK-NEXT: ret entry: @@ -1374,7 +1374,7 @@ define @intrinsic_vnmsac_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i32_i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma ; CHECK-NEXT: vnmsac.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1421,7 +1421,7 @@ define @intrinsic_vnmsac_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i32_i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; CHECK-NEXT: vnmsac.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1468,7 +1468,7 @@ define @intrinsic_vnmsac_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i32_i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma ; CHECK-NEXT: vnmsac.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -1515,7 +1515,7 @@ define @intrinsic_vnmsac_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv8i32_i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma ; CHECK-NEXT: vnmsac.vx v8, a0, v12 ; CHECK-NEXT: ret entry: @@ -1562,7 +1562,7 @@ define @intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, ma ; CHECK-NEXT: vnmsac.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1609,7 +1609,7 @@ define @intrinsic_vnmsac_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv2i64_i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, ma ; CHECK-NEXT: vnmsac.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -1656,7 +1656,7 @@ define @intrinsic_vnmsac_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsac_vx_nxv4i64_i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, ma ; CHECK-NEXT: vnmsac.vx v8, a0, v12 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv32.ll @@ -11,7 +11,7 @@ define @intrinsic_vnmsub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vnmsub.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -58,7 +58,7 @@ define @intrinsic_vnmsub_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vnmsub.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -105,7 +105,7 @@ define @intrinsic_vnmsub_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vnmsub.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -152,7 +152,7 @@ define @intrinsic_vnmsub_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vnmsub.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -199,7 +199,7 @@ define @intrinsic_vnmsub_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vnmsub.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -246,7 +246,7 @@ define @intrinsic_vnmsub_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vnmsub.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -293,7 +293,7 @@ define @intrinsic_vnmsub_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vnmsub.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vnmsub_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vnmsub.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vnmsub_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vnmsub.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vnmsub_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vnmsub.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vnmsub_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vnmsub.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vnmsub_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vnmsub.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vnmsub_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vnmsub.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -622,7 +622,7 @@ define @intrinsic_vnmsub_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vnmsub.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -669,7 +669,7 @@ define @intrinsic_vnmsub_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vnmsub.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -716,7 +716,7 @@ define @intrinsic_vnmsub_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vnmsub.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -763,7 +763,7 @@ define @intrinsic_vnmsub_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vnmsub.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -810,7 +810,7 @@ define @intrinsic_vnmsub_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vnmsub.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -857,7 +857,7 @@ define @intrinsic_vnmsub_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i8_i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma ; CHECK-NEXT: vnmsub.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -904,7 +904,7 @@ define @intrinsic_vnmsub_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i8_i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma ; CHECK-NEXT: vnmsub.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -951,7 +951,7 @@ define @intrinsic_vnmsub_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i8_i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma ; CHECK-NEXT: vnmsub.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -998,7 +998,7 @@ define @intrinsic_vnmsub_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i8_i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma ; CHECK-NEXT: vnmsub.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1045,7 +1045,7 @@ define @intrinsic_vnmsub_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv16i8_i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, ma ; CHECK-NEXT: vnmsub.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -1092,7 +1092,7 @@ define @intrinsic_vnmsub_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv32i8_i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, ma ; CHECK-NEXT: vnmsub.vx v8, a0, v12 ; CHECK-NEXT: ret entry: @@ -1139,7 +1139,7 @@ define @intrinsic_vnmsub_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i16_i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma ; CHECK-NEXT: vnmsub.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1186,7 +1186,7 @@ define @intrinsic_vnmsub_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i16_i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma ; CHECK-NEXT: vnmsub.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1233,7 +1233,7 @@ define @intrinsic_vnmsub_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i16_i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma ; CHECK-NEXT: vnmsub.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1280,7 +1280,7 @@ define @intrinsic_vnmsub_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i16_i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma ; CHECK-NEXT: vnmsub.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -1327,7 +1327,7 @@ define @intrinsic_vnmsub_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv16i16_i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma ; CHECK-NEXT: vnmsub.vx v8, a0, v12 ; CHECK-NEXT: ret entry: @@ -1374,7 +1374,7 @@ define @intrinsic_vnmsub_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i32_i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma ; CHECK-NEXT: vnmsub.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1421,7 +1421,7 @@ define @intrinsic_vnmsub_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i32_i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; CHECK-NEXT: vnmsub.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1468,7 +1468,7 @@ define @intrinsic_vnmsub_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i32_i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma ; CHECK-NEXT: vnmsub.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -1515,7 +1515,7 @@ define @intrinsic_vnmsub_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i32_i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma ; CHECK-NEXT: vnmsub.vx v8, a0, v12 ; CHECK-NEXT: ret entry: @@ -1566,9 +1566,9 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, ma ; CHECK-NEXT: vnmsub.vv v8, v10, v9 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1596,7 +1596,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu ; CHECK-NEXT: vnmsub.vv v8, v10, v9, v0.t @@ -1627,9 +1627,9 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, ma ; CHECK-NEXT: vnmsub.vv v8, v12, v10 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1657,7 +1657,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, mu ; CHECK-NEXT: vnmsub.vv v8, v12, v10, v0.t @@ -1688,9 +1688,9 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, ma ; CHECK-NEXT: vnmsub.vv v8, v16, v12 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret @@ -1718,7 +1718,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu ; CHECK-NEXT: vnmsub.vv v8, v16, v12, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv64.ll @@ -11,7 +11,7 @@ define @intrinsic_vnmsub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vnmsub.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -58,7 +58,7 @@ define @intrinsic_vnmsub_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vnmsub.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -105,7 +105,7 @@ define @intrinsic_vnmsub_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vnmsub.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -152,7 +152,7 @@ define @intrinsic_vnmsub_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vnmsub.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -199,7 +199,7 @@ define @intrinsic_vnmsub_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vnmsub.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -246,7 +246,7 @@ define @intrinsic_vnmsub_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vnmsub.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -293,7 +293,7 @@ define @intrinsic_vnmsub_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vnmsub.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vnmsub_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vnmsub.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vnmsub_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vnmsub.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vnmsub_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vnmsub.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vnmsub_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vnmsub.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vnmsub_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vnmsub.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vnmsub_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vnmsub.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -622,7 +622,7 @@ define @intrinsic_vnmsub_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vnmsub.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -669,7 +669,7 @@ define @intrinsic_vnmsub_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vnmsub.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -716,7 +716,7 @@ define @intrinsic_vnmsub_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vnmsub.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -763,7 +763,7 @@ define @intrinsic_vnmsub_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vnmsub.vv v8, v10, v12 ; CHECK-NEXT: ret entry: @@ -810,7 +810,7 @@ define @intrinsic_vnmsub_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vnmsub.vv v8, v12, v16 ; CHECK-NEXT: ret entry: @@ -857,7 +857,7 @@ define @intrinsic_vnmsub_vx_nxv1i8_i8_nxv1i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i8_i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma ; CHECK-NEXT: vnmsub.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -904,7 +904,7 @@ define @intrinsic_vnmsub_vx_nxv2i8_i8_nxv2i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i8_i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma ; CHECK-NEXT: vnmsub.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -951,7 +951,7 @@ define @intrinsic_vnmsub_vx_nxv4i8_i8_nxv4i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i8_i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma ; CHECK-NEXT: vnmsub.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -998,7 +998,7 @@ define @intrinsic_vnmsub_vx_nxv8i8_i8_nxv8i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i8_i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma ; CHECK-NEXT: vnmsub.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1045,7 +1045,7 @@ define @intrinsic_vnmsub_vx_nxv16i8_i8_nxv16i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv16i8_i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, ma ; CHECK-NEXT: vnmsub.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -1092,7 +1092,7 @@ define @intrinsic_vnmsub_vx_nxv32i8_i8_nxv32i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv32i8_i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, ma ; CHECK-NEXT: vnmsub.vx v8, a0, v12 ; CHECK-NEXT: ret entry: @@ -1139,7 +1139,7 @@ define @intrinsic_vnmsub_vx_nxv1i16_i16_nxv1i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i16_i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma ; CHECK-NEXT: vnmsub.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1186,7 +1186,7 @@ define @intrinsic_vnmsub_vx_nxv2i16_i16_nxv2i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i16_i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma ; CHECK-NEXT: vnmsub.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1233,7 +1233,7 @@ define @intrinsic_vnmsub_vx_nxv4i16_i16_nxv4i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i16_i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma ; CHECK-NEXT: vnmsub.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1280,7 +1280,7 @@ define @intrinsic_vnmsub_vx_nxv8i16_i16_nxv8i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i16_i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma ; CHECK-NEXT: vnmsub.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -1327,7 +1327,7 @@ define @intrinsic_vnmsub_vx_nxv16i16_i16_nxv16i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv16i16_i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma ; CHECK-NEXT: vnmsub.vx v8, a0, v12 ; CHECK-NEXT: ret entry: @@ -1374,7 +1374,7 @@ define @intrinsic_vnmsub_vx_nxv1i32_i32_nxv1i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i32_i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma ; CHECK-NEXT: vnmsub.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1421,7 +1421,7 @@ define @intrinsic_vnmsub_vx_nxv2i32_i32_nxv2i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i32_i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; CHECK-NEXT: vnmsub.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1468,7 +1468,7 @@ define @intrinsic_vnmsub_vx_nxv4i32_i32_nxv4i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i32_i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma ; CHECK-NEXT: vnmsub.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -1515,7 +1515,7 @@ define @intrinsic_vnmsub_vx_nxv8i32_i32_nxv8i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv8i32_i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma ; CHECK-NEXT: vnmsub.vx v8, a0, v12 ; CHECK-NEXT: ret entry: @@ -1562,7 +1562,7 @@ define @intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64( %0, i64 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, ma ; CHECK-NEXT: vnmsub.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1609,7 +1609,7 @@ define @intrinsic_vnmsub_vx_nxv2i64_i64_nxv2i64( %0, i64 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv2i64_i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, ma ; CHECK-NEXT: vnmsub.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -1656,7 +1656,7 @@ define @intrinsic_vnmsub_vx_nxv4i64_i64_nxv4i64( %0, i64 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vnmsub_vx_nxv4i64_i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, ma ; CHECK-NEXT: vnmsub.vx v8, a0, v12 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vnmsub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsub-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnmsub-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnmsub-sdnode.ll @@ -10,7 +10,7 @@ define @vnmsub_vv_nxv1i8( %va, %vb, %vc) { ; CHECK-LABEL: vnmsub_vv_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vnmsub.vv v8, v9, v10 ; CHECK-NEXT: ret %x = mul %va, %vb @@ -21,7 +21,7 @@ define @vnmsub_vx_nxv1i8( %va, %vb, i8 %c) { ; CHECK-LABEL: vnmsub_vx_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vnmsub.vx v8, a0, v9 ; CHECK-NEXT: ret %head = insertelement poison, i8 %c, i32 0 @@ -34,7 +34,7 @@ define @vnmsub_vv_nxv2i8( %va, %vb, %vc) { ; CHECK-LABEL: vnmsub_vv_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vnmsub.vv v8, v10, v9 ; CHECK-NEXT: ret %x = mul %va, %vc @@ -45,7 +45,7 @@ define @vnmsub_vx_nxv2i8( %va, %vb, i8 %c) { ; CHECK-LABEL: vnmsub_vx_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vnmsac.vx v8, a0, v9 ; CHECK-NEXT: ret %head = insertelement poison, i8 %c, i32 0 @@ -58,7 +58,7 @@ define @vnmsub_vv_nxv4i8( %va, %vb, %vc) { ; CHECK-LABEL: vnmsub_vv_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vnmsub.vv v8, v9, v10 ; CHECK-NEXT: ret %x = mul %vb, %va @@ -69,7 +69,7 @@ define @vnmsub_vx_nxv4i8( %va, %vb, i8 %c) { ; CHECK-LABEL: vnmsub_vx_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vnmsub.vx v8, a0, v9 ; CHECK-NEXT: ret %head = insertelement poison, i8 %c, i32 0 @@ -82,7 +82,7 @@ define @vnmsub_vv_nxv8i8( %va, %vb, %vc) { ; CHECK-LABEL: vnmsub_vv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vnmsac.vv v8, v10, v9 ; CHECK-NEXT: ret %x = mul %vb, %vc @@ -93,7 +93,7 @@ define @vnmsub_vx_nxv8i8( %va, %vb, i8 %c) { ; CHECK-LABEL: vnmsub_vx_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vnmsac.vx v8, a0, v9 ; CHECK-NEXT: ret %head = insertelement poison, i8 %c, i32 0 @@ -106,7 +106,7 @@ define @vnmsub_vv_nxv16i8( %va, %vb, %vc) { ; CHECK-LABEL: vnmsub_vv_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vnmsub.vv v8, v12, v10 ; CHECK-NEXT: ret %x = mul %vc, %va @@ -117,7 +117,7 @@ define @vnmsub_vx_nxv16i8( %va, %vb, i8 %c) { ; CHECK-LABEL: vnmsub_vx_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vnmsub.vx v8, a0, v10 ; CHECK-NEXT: ret %head = insertelement poison, i8 %c, i32 0 @@ -130,7 +130,7 @@ define @vnmsub_vv_nxv32i8( %va, %vb, %vc) { ; CHECK-LABEL: vnmsub_vv_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vnmsac.vv v8, v16, v12 ; CHECK-NEXT: ret %x = mul %vc, %vb @@ -141,7 +141,7 @@ define @vnmsub_vx_nxv32i8( %va, %vb, i8 %c) { ; CHECK-LABEL: vnmsub_vx_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vnmsac.vx v8, a0, v12 ; CHECK-NEXT: ret %head = insertelement poison, i8 %c, i32 0 @@ -155,7 +155,7 @@ ; CHECK-LABEL: vnmsub_vv_nxv64i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8r.v v24, (a0) -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vnmsac.vv v8, v16, v24 ; CHECK-NEXT: ret %x = mul %vc, %vb @@ -166,7 +166,7 @@ define @vnmsub_vx_nxv64i8( %va, %vb, i8 %c) { ; CHECK-LABEL: vnmsub_vx_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vnmsac.vx v8, a0, v16 ; CHECK-NEXT: ret %head = insertelement poison, i8 %c, i32 0 @@ -179,7 +179,7 @@ define @vnmsub_vv_nxv1i16( %va, %vb, %vc) { ; CHECK-LABEL: vnmsub_vv_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vnmsub.vv v8, v9, v10 ; CHECK-NEXT: ret %x = mul %va, %vb @@ -190,7 +190,7 @@ define @vnmsub_vx_nxv1i16( %va, %vb, i16 %c) { ; CHECK-LABEL: vnmsub_vx_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vnmsub.vx v8, a0, v9 ; CHECK-NEXT: ret %head = insertelement poison, i16 %c, i32 0 @@ -203,7 +203,7 @@ define @vnmsub_vv_nxv2i16( %va, %vb, %vc) { ; CHECK-LABEL: vnmsub_vv_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vnmsub.vv v8, v10, v9 ; CHECK-NEXT: ret %x = mul %va, %vc @@ -214,7 +214,7 @@ define @vnmsub_vx_nxv2i16( %va, %vb, i16 %c) { ; CHECK-LABEL: vnmsub_vx_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vnmsac.vx v8, a0, v9 ; CHECK-NEXT: ret %head = insertelement poison, i16 %c, i32 0 @@ -227,7 +227,7 @@ define @vnmsub_vv_nxv4i16( %va, %vb, %vc) { ; CHECK-LABEL: vnmsub_vv_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vnmsub.vv v8, v9, v10 ; CHECK-NEXT: ret %x = mul %vb, %va @@ -238,7 +238,7 @@ define @vnmsub_vx_nxv4i16( %va, %vb, i16 %c) { ; CHECK-LABEL: vnmsub_vx_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vnmsub.vx v8, a0, v9 ; CHECK-NEXT: ret %head = insertelement poison, i16 %c, i32 0 @@ -251,7 +251,7 @@ define @vnmsub_vv_nxv8i16( %va, %vb, %vc) { ; CHECK-LABEL: vnmsub_vv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vnmsac.vv v8, v12, v10 ; CHECK-NEXT: ret %x = mul %vb, %vc @@ -262,7 +262,7 @@ define @vnmsub_vx_nxv8i16( %va, %vb, i16 %c) { ; CHECK-LABEL: vnmsub_vx_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vnmsac.vx v8, a0, v10 ; CHECK-NEXT: ret %head = insertelement poison, i16 %c, i32 0 @@ -275,7 +275,7 @@ define @vnmsub_vv_nxv16i16( %va, %vb, %vc) { ; CHECK-LABEL: vnmsub_vv_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vnmsub.vv v8, v16, v12 ; CHECK-NEXT: ret %x = mul %vc, %va @@ -286,7 +286,7 @@ define @vnmsub_vx_nxv16i16( %va, %vb, i16 %c) { ; CHECK-LABEL: vnmsub_vx_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vnmsub.vx v8, a0, v12 ; CHECK-NEXT: ret %head = insertelement poison, i16 %c, i32 0 @@ -300,7 +300,7 @@ ; CHECK-LABEL: vnmsub_vv_nxv32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re16.v v24, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vnmsac.vv v8, v16, v24 ; CHECK-NEXT: ret %x = mul %vc, %vb @@ -311,7 +311,7 @@ define @vnmsub_vx_nxv32i16( %va, %vb, i16 %c) { ; CHECK-LABEL: vnmsub_vx_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; CHECK-NEXT: vnmsac.vx v8, a0, v16 ; CHECK-NEXT: ret %head = insertelement poison, i16 %c, i32 0 @@ -324,7 +324,7 @@ define @vnmsub_vv_nxv1i32( %va, %vb, %vc) { ; CHECK-LABEL: vnmsub_vv_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vnmsub.vv v8, v9, v10 ; CHECK-NEXT: ret %x = mul %va, %vb @@ -335,7 +335,7 @@ define @vnmsub_vx_nxv1i32( %va, %vb, i32 %c) { ; CHECK-LABEL: vnmsub_vx_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vnmsub.vx v8, a0, v9 ; CHECK-NEXT: ret %head = insertelement poison, i32 %c, i32 0 @@ -348,7 +348,7 @@ define @vnmsub_vv_nxv2i32( %va, %vb, %vc) { ; CHECK-LABEL: vnmsub_vv_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vnmsub.vv v8, v10, v9 ; CHECK-NEXT: ret %x = mul %va, %vc @@ -359,7 +359,7 @@ define @vnmsub_vx_nxv2i32( %va, %vb, i32 %c) { ; CHECK-LABEL: vnmsub_vx_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vnmsac.vx v8, a0, v9 ; CHECK-NEXT: ret %head = insertelement poison, i32 %c, i32 0 @@ -372,7 +372,7 @@ define @vnmsub_vv_nxv4i32( %va, %vb, %vc) { ; CHECK-LABEL: vnmsub_vv_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vnmsub.vv v8, v10, v12 ; CHECK-NEXT: ret %x = mul %vb, %va @@ -383,7 +383,7 @@ define @vnmsub_vx_nxv4i32( %va, %vb, i32 %c) { ; CHECK-LABEL: vnmsub_vx_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vnmsub.vx v8, a0, v10 ; CHECK-NEXT: ret %head = insertelement poison, i32 %c, i32 0 @@ -396,7 +396,7 @@ define @vnmsub_vv_nxv8i32( %va, %vb, %vc) { ; CHECK-LABEL: vnmsub_vv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vnmsac.vv v8, v16, v12 ; CHECK-NEXT: ret %x = mul %vb, %vc @@ -407,7 +407,7 @@ define @vnmsub_vx_nxv8i32( %va, %vb, i32 %c) { ; CHECK-LABEL: vnmsub_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vnmsac.vx v8, a0, v12 ; CHECK-NEXT: ret %head = insertelement poison, i32 %c, i32 0 @@ -421,7 +421,7 @@ ; CHECK-LABEL: vnmsub_vv_nxv16i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vnmsub.vv v8, v24, v16 ; CHECK-NEXT: ret %x = mul %vc, %va @@ -432,7 +432,7 @@ define @vnmsub_vx_nxv16i32( %va, %vb, i32 %c) { ; CHECK-LABEL: vnmsub_vx_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; CHECK-NEXT: vnmsub.vx v8, a0, v16 ; CHECK-NEXT: ret %head = insertelement poison, i32 %c, i32 0 @@ -445,7 +445,7 @@ define @vnmsub_vv_nxv1i64( %va, %vb, %vc) { ; CHECK-LABEL: vnmsub_vv_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vnmsub.vv v8, v9, v10 ; CHECK-NEXT: ret %x = mul %va, %vb @@ -461,7 +461,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vnmsub.vv v8, v10, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -469,7 +469,7 @@ ; ; RV64-LABEL: vnmsub_vx_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV64-NEXT: vnmsub.vx v8, a0, v9 ; RV64-NEXT: ret %head = insertelement poison, i64 %c, i32 0 @@ -482,7 +482,7 @@ define @vnmsub_vv_nxv2i64( %va, %vb, %vc) { ; CHECK-LABEL: vnmsub_vv_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vnmsub.vv v8, v12, v10 ; CHECK-NEXT: ret %x = mul %va, %vc @@ -498,7 +498,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vnmsac.vv v8, v10, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -506,7 +506,7 @@ ; ; RV64-LABEL: vnmsub_vx_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV64-NEXT: vnmsac.vx v8, a0, v10 ; RV64-NEXT: ret %head = insertelement poison, i64 %c, i32 0 @@ -519,7 +519,7 @@ define @vnmsub_vv_nxv4i64( %va, %vb, %vc) { ; CHECK-LABEL: vnmsub_vv_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vnmsub.vv v8, v12, v16 ; CHECK-NEXT: ret %x = mul %vb, %va @@ -535,7 +535,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vnmsub.vv v8, v16, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -543,7 +543,7 @@ ; ; RV64-LABEL: vnmsub_vx_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV64-NEXT: vnmsub.vx v8, a0, v12 ; RV64-NEXT: ret %head = insertelement poison, i64 %c, i32 0 @@ -557,7 +557,7 @@ ; CHECK-LABEL: vnmsub_vv_nxv8i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vl8re64.v v24, (a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vnmsac.vv v8, v16, v24 ; CHECK-NEXT: ret %x = mul %vb, %vc @@ -573,7 +573,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v24, (a0), zero ; RV32-NEXT: vnmsac.vv v8, v16, v24 ; RV32-NEXT: addi sp, sp, 16 @@ -581,7 +581,7 @@ ; ; RV64-LABEL: vnmsub_vx_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vnmsac.vx v8, a0, v16 ; RV64-NEXT: ret %head = insertelement poison, i64 %c, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vnsra_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vnsra.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vnsra_wv_nxv2i8_nxv2i16_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv2i8_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vnsra.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vnsra_wv_nxv4i8_nxv4i16_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv4i8_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vnsra.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vnsra_wv_nxv8i8_nxv8i16_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv8i8_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vnsra.wv v11, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret @@ -199,7 +199,7 @@ define @intrinsic_vnsra_wv_nxv16i8_nxv16i16_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv16i8_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vnsra.wv v14, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret @@ -247,7 +247,7 @@ define @intrinsic_vnsra_wv_nxv32i8_nxv32i16_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv32i8_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vnsra.wv v20, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret @@ -295,7 +295,7 @@ define @intrinsic_vnsra_wv_nxv1i16_nxv1i32_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv1i16_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vnsra.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -342,7 +342,7 @@ define @intrinsic_vnsra_wv_nxv2i16_nxv2i32_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv2i16_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vnsra.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -389,7 +389,7 @@ define @intrinsic_vnsra_wv_nxv4i16_nxv4i32_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv4i16_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vnsra.wv v11, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret @@ -437,7 +437,7 @@ define @intrinsic_vnsra_wv_nxv8i16_nxv8i32_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv8i16_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vnsra.wv v14, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret @@ -485,7 +485,7 @@ define @intrinsic_vnsra_wv_nxv16i16_nxv16i32_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv16i16_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vnsra.wv v20, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret @@ -533,7 +533,7 @@ define @intrinsic_vnsra_wv_nxv1i32_nxv1i64_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv1i32_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vnsra.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -580,7 +580,7 @@ define @intrinsic_vnsra_wv_nxv2i32_nxv2i64_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv2i32_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vnsra.wv v11, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret @@ -628,7 +628,7 @@ define @intrinsic_vnsra_wv_nxv4i32_nxv4i64_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv4i32_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vnsra.wv v14, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret @@ -676,7 +676,7 @@ define @intrinsic_vnsra_wv_nxv8i32_nxv8i64_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv8i32_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vnsra.wv v20, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret @@ -724,7 +724,7 @@ define @intrinsic_vnsra_vx_nxv1i8_nxv1i16( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vnsra.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -771,7 +771,7 @@ define @intrinsic_vnsra_vx_nxv2i8_nxv2i16( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vnsra.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -818,7 +818,7 @@ define @intrinsic_vnsra_vx_nxv4i8_nxv4i16( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vnsra.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -865,7 +865,7 @@ define @intrinsic_vnsra_vx_nxv8i8_nxv8i16( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vnsra.wx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -913,7 +913,7 @@ define @intrinsic_vnsra_vx_nxv16i8_nxv16i16( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vnsra.wx v12, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -961,7 +961,7 @@ define @intrinsic_vnsra_vx_nxv32i8_nxv32i16( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vnsra.wx v16, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1009,7 +1009,7 @@ define @intrinsic_vnsra_vx_nxv1i16_nxv1i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vnsra.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1056,7 +1056,7 @@ define @intrinsic_vnsra_vx_nxv2i16_nxv2i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vnsra.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1103,7 +1103,7 @@ define @intrinsic_vnsra_vx_nxv4i16_nxv4i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vnsra.wx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1151,7 +1151,7 @@ define @intrinsic_vnsra_vx_nxv8i16_nxv8i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vnsra.wx v12, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1199,7 +1199,7 @@ define @intrinsic_vnsra_vx_nxv16i16_nxv16i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vnsra.wx v16, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1247,7 +1247,7 @@ define @intrinsic_vnsra_vx_nxv1i32_nxv1i64( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vnsra.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1294,7 +1294,7 @@ define @intrinsic_vnsra_vx_nxv2i32_nxv2i64( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vnsra.wx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1342,7 +1342,7 @@ define @intrinsic_vnsra_vx_nxv4i32_nxv4i64( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vnsra.wx v12, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1390,7 +1390,7 @@ define @intrinsic_vnsra_vx_nxv8i32_nxv8i64( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vnsra.wx v16, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1432,7 +1432,7 @@ define @intrinsic_vnsra_vi_nxv1i8_nxv1i16_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vi_nxv1i8_nxv1i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vnsra.wi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1465,7 +1465,7 @@ define @intrinsic_vnsra_vi_nxv2i8_nxv2i16_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vi_nxv2i8_nxv2i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vnsra.wi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1498,7 +1498,7 @@ define @intrinsic_vnsra_vi_nxv4i8_nxv4i16_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vi_nxv4i8_nxv4i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vnsra.wi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1531,7 +1531,7 @@ define @intrinsic_vnsra_vi_nxv8i8_nxv8i16_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vi_nxv8i8_nxv8i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vnsra.wi v10, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1565,7 +1565,7 @@ define @intrinsic_vnsra_vi_nxv16i8_nxv16i16_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vi_nxv16i8_nxv16i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vnsra.wi v12, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1599,7 +1599,7 @@ define @intrinsic_vnsra_vi_nxv32i8_nxv32i16_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vi_nxv32i8_nxv32i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vnsra.wi v16, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1633,7 +1633,7 @@ define @intrinsic_vnsra_vi_nxv1i16_nxv1i32_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vi_nxv1i16_nxv1i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vnsra.wi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1666,7 +1666,7 @@ define @intrinsic_vnsra_vi_nxv2i16_nxv2i32_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vi_nxv2i16_nxv2i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vnsra.wi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1699,7 +1699,7 @@ define @intrinsic_vnsra_vi_nxv4i16_nxv4i32_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vi_nxv4i16_nxv4i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vnsra.wi v10, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1733,7 +1733,7 @@ define @intrinsic_vnsra_vi_nxv8i16_nxv8i32_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vi_nxv8i16_nxv8i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vnsra.wi v12, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1767,7 +1767,7 @@ define @intrinsic_vnsra_vi_nxv16i16_nxv16i32_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vi_nxv16i16_nxv16i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vnsra.wi v16, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1801,7 +1801,7 @@ define @intrinsic_vnsra_vi_nxv1i32_nxv1i64_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vi_nxv1i32_nxv1i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vnsra.wi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1834,7 +1834,7 @@ define @intrinsic_vnsra_vi_nxv2i32_nxv2i64_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vi_nxv2i32_nxv2i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vnsra.wi v10, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1868,7 +1868,7 @@ define @intrinsic_vnsra_vi_nxv4i32_nxv4i64_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vi_nxv4i32_nxv4i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vnsra.wi v12, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1902,7 +1902,7 @@ define @intrinsic_vnsra_vi_nxv8i32_nxv8i64_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vi_nxv8i32_nxv8i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vnsra.wi v16, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vnsra_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vnsra.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vnsra_wv_nxv2i8_nxv2i16_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv2i8_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vnsra.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vnsra_wv_nxv4i8_nxv4i16_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv4i8_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vnsra.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vnsra_wv_nxv8i8_nxv8i16_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv8i8_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vnsra.wv v11, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret @@ -199,7 +199,7 @@ define @intrinsic_vnsra_wv_nxv16i8_nxv16i16_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv16i8_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vnsra.wv v14, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret @@ -247,7 +247,7 @@ define @intrinsic_vnsra_wv_nxv32i8_nxv32i16_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv32i8_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vnsra.wv v20, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret @@ -295,7 +295,7 @@ define @intrinsic_vnsra_wv_nxv1i16_nxv1i32_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv1i16_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vnsra.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -342,7 +342,7 @@ define @intrinsic_vnsra_wv_nxv2i16_nxv2i32_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv2i16_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vnsra.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -389,7 +389,7 @@ define @intrinsic_vnsra_wv_nxv4i16_nxv4i32_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv4i16_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vnsra.wv v11, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret @@ -437,7 +437,7 @@ define @intrinsic_vnsra_wv_nxv8i16_nxv8i32_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv8i16_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vnsra.wv v14, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret @@ -485,7 +485,7 @@ define @intrinsic_vnsra_wv_nxv16i16_nxv16i32_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv16i16_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vnsra.wv v20, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret @@ -533,7 +533,7 @@ define @intrinsic_vnsra_wv_nxv1i32_nxv1i64_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv1i32_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vnsra.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -580,7 +580,7 @@ define @intrinsic_vnsra_wv_nxv2i32_nxv2i64_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv2i32_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vnsra.wv v11, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret @@ -628,7 +628,7 @@ define @intrinsic_vnsra_wv_nxv4i32_nxv4i64_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv4i32_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vnsra.wv v14, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret @@ -676,7 +676,7 @@ define @intrinsic_vnsra_wv_nxv8i32_nxv8i64_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_wv_nxv8i32_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vnsra.wv v20, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret @@ -724,7 +724,7 @@ define @intrinsic_vnsra_vx_nxv1i8_nxv1i16( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vnsra.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -771,7 +771,7 @@ define @intrinsic_vnsra_vx_nxv2i8_nxv2i16( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vnsra.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -818,7 +818,7 @@ define @intrinsic_vnsra_vx_nxv4i8_nxv4i16( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vnsra.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -865,7 +865,7 @@ define @intrinsic_vnsra_vx_nxv8i8_nxv8i16( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vnsra.wx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -913,7 +913,7 @@ define @intrinsic_vnsra_vx_nxv16i8_nxv16i16( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vnsra.wx v12, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -961,7 +961,7 @@ define @intrinsic_vnsra_vx_nxv32i8_nxv32i16( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vnsra.wx v16, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1009,7 +1009,7 @@ define @intrinsic_vnsra_vx_nxv1i16_nxv1i32( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vnsra.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1056,7 +1056,7 @@ define @intrinsic_vnsra_vx_nxv2i16_nxv2i32( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vnsra.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1103,7 +1103,7 @@ define @intrinsic_vnsra_vx_nxv4i16_nxv4i32( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vnsra.wx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1151,7 +1151,7 @@ define @intrinsic_vnsra_vx_nxv8i16_nxv8i32( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vnsra.wx v12, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1199,7 +1199,7 @@ define @intrinsic_vnsra_vx_nxv16i16_nxv16i32( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vnsra.wx v16, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1247,7 +1247,7 @@ define @intrinsic_vnsra_vx_nxv1i32_nxv1i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vnsra.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1294,7 +1294,7 @@ define @intrinsic_vnsra_vx_nxv2i32_nxv2i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vnsra.wx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1342,7 +1342,7 @@ define @intrinsic_vnsra_vx_nxv4i32_nxv4i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vnsra.wx v12, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1390,7 +1390,7 @@ define @intrinsic_vnsra_vx_nxv8i32_nxv8i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vx_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vnsra.wx v16, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1432,7 +1432,7 @@ define @intrinsic_vnsra_vi_nxv1i8_nxv1i16_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vi_nxv1i8_nxv1i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vnsra.wi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1465,7 +1465,7 @@ define @intrinsic_vnsra_vi_nxv2i8_nxv2i16_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vi_nxv2i8_nxv2i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vnsra.wi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1498,7 +1498,7 @@ define @intrinsic_vnsra_vi_nxv4i8_nxv4i16_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vi_nxv4i8_nxv4i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vnsra.wi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1531,7 +1531,7 @@ define @intrinsic_vnsra_vi_nxv8i8_nxv8i16_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vi_nxv8i8_nxv8i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vnsra.wi v10, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1565,7 +1565,7 @@ define @intrinsic_vnsra_vi_nxv16i8_nxv16i16_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vi_nxv16i8_nxv16i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vnsra.wi v12, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1599,7 +1599,7 @@ define @intrinsic_vnsra_vi_nxv32i8_nxv32i16_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vi_nxv32i8_nxv32i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vnsra.wi v16, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1633,7 +1633,7 @@ define @intrinsic_vnsra_vi_nxv1i16_nxv1i32_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vi_nxv1i16_nxv1i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vnsra.wi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1666,7 +1666,7 @@ define @intrinsic_vnsra_vi_nxv2i16_nxv2i32_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vi_nxv2i16_nxv2i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vnsra.wi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1699,7 +1699,7 @@ define @intrinsic_vnsra_vi_nxv4i16_nxv4i32_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vi_nxv4i16_nxv4i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vnsra.wi v10, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1733,7 +1733,7 @@ define @intrinsic_vnsra_vi_nxv8i16_nxv8i32_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vi_nxv8i16_nxv8i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vnsra.wi v12, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1767,7 +1767,7 @@ define @intrinsic_vnsra_vi_nxv16i16_nxv16i32_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vi_nxv16i16_nxv16i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vnsra.wi v16, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1801,7 +1801,7 @@ define @intrinsic_vnsra_vi_nxv1i32_nxv1i64_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vi_nxv1i32_nxv1i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vnsra.wi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1834,7 +1834,7 @@ define @intrinsic_vnsra_vi_nxv2i32_nxv2i64_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vi_nxv2i32_nxv2i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vnsra.wi v10, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1868,7 +1868,7 @@ define @intrinsic_vnsra_vi_nxv4i32_nxv4i64_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vi_nxv4i32_nxv4i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vnsra.wi v12, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1902,7 +1902,7 @@ define @intrinsic_vnsra_vi_nxv8i32_nxv8i64_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsra_vi_nxv8i32_nxv8i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vnsra.wi v16, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsra-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vnsra-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnsra-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsra-sdnode.ll @@ -7,7 +7,7 @@ define @vnsra_wv_nxv1i32_sext( %va, %vb) { ; CHECK-LABEL: vnsra_wv_nxv1i32_sext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vnsra.wv v8, v8, v9 ; CHECK-NEXT: ret %vc = sext %vb to @@ -19,7 +19,7 @@ define @vnsra_wx_i32_nxv1i32_sext( %va, i32 %b) { ; CHECK-LABEL: vnsra_wx_i32_nxv1i32_sext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vnsra.wx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -33,7 +33,7 @@ define @vnsra_wi_i32_nxv1i32_sext( %va) { ; CHECK-LABEL: vnsra_wi_i32_nxv1i32_sext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vnsra.wi v8, v8, 15 ; CHECK-NEXT: ret %head = insertelement poison, i32 15, i32 0 @@ -47,7 +47,7 @@ define @vnsra_wv_nxv2i32_sext( %va, %vb) { ; CHECK-LABEL: vnsra_wv_nxv2i32_sext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vnsra.wv v11, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret @@ -60,7 +60,7 @@ define @vnsra_wx_i32_nxv2i32_sext( %va, i32 %b) { ; CHECK-LABEL: vnsra_wx_i32_nxv2i32_sext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vnsra.wx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -75,7 +75,7 @@ define @vnsra_wi_i32_nxv2i32_sext( %va) { ; CHECK-LABEL: vnsra_wi_i32_nxv2i32_sext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vnsra.wi v10, v8, 15 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -90,7 +90,7 @@ define @vnsra_wv_nxv4i32_sext( %va, %vb) { ; CHECK-LABEL: vnsra_wv_nxv4i32_sext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vnsra.wv v14, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret @@ -103,7 +103,7 @@ define @vnsra_wx_i32_nxv4i32_sext( %va, i32 %b) { ; CHECK-LABEL: vnsra_wx_i32_nxv4i32_sext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vnsra.wx v12, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -118,7 +118,7 @@ define @vnsra_wi_i32_nxv4i32_sext( %va) { ; CHECK-LABEL: vnsra_wi_i32_nxv4i32_sext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vnsra.wi v12, v8, 15 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -133,7 +133,7 @@ define @vnsra_wv_nxv8i32_sext( %va, %vb) { ; CHECK-LABEL: vnsra_wv_nxv8i32_sext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vnsra.wv v20, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret @@ -146,7 +146,7 @@ define @vnsra_wx_i32_nxv8i32_sext( %va, i32 %b) { ; CHECK-LABEL: vnsra_wx_i32_nxv8i32_sext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vnsra.wx v16, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -161,7 +161,7 @@ define @vnsra_wi_i32_nxv8i32_sext( %va) { ; CHECK-LABEL: vnsra_wi_i32_nxv8i32_sext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vnsra.wi v16, v8, 15 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -176,7 +176,7 @@ define @vnsra_wv_nxv1i32_zext( %va, %vb) { ; CHECK-LABEL: vnsra_wv_nxv1i32_zext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vnsra.wv v8, v8, v9 ; CHECK-NEXT: ret %vc = zext %vb to @@ -188,7 +188,7 @@ define @vnsra_wx_i32_nxv1i32_zext( %va, i32 %b) { ; CHECK-LABEL: vnsra_wx_i32_nxv1i32_zext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vnsra.wx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -202,7 +202,7 @@ define @vnsra_wi_i32_nxv1i32_zext( %va) { ; CHECK-LABEL: vnsra_wi_i32_nxv1i32_zext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vnsra.wi v8, v8, 15 ; CHECK-NEXT: ret %head = insertelement poison, i32 15, i32 0 @@ -216,7 +216,7 @@ define @vnsra_wv_nxv2i32_zext( %va, %vb) { ; CHECK-LABEL: vnsra_wv_nxv2i32_zext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vnsra.wv v11, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret @@ -229,7 +229,7 @@ define @vnsra_wx_i32_nxv2i32_zext( %va, i32 %b) { ; CHECK-LABEL: vnsra_wx_i32_nxv2i32_zext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vnsra.wx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -244,7 +244,7 @@ define @vnsra_wi_i32_nxv2i32_zext( %va) { ; CHECK-LABEL: vnsra_wi_i32_nxv2i32_zext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vnsra.wi v10, v8, 15 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -259,7 +259,7 @@ define @vnsra_wv_nxv4i32_zext( %va, %vb) { ; CHECK-LABEL: vnsra_wv_nxv4i32_zext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vnsra.wv v14, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret @@ -272,7 +272,7 @@ define @vnsra_wx_i32_nxv4i32_zext( %va, i32 %b) { ; CHECK-LABEL: vnsra_wx_i32_nxv4i32_zext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vnsra.wx v12, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -287,7 +287,7 @@ define @vnsra_wi_i32_nxv4i32_zext( %va) { ; CHECK-LABEL: vnsra_wi_i32_nxv4i32_zext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vnsra.wi v12, v8, 15 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -302,7 +302,7 @@ define @vnsra_wv_nxv8i32_zext( %va, %vb) { ; CHECK-LABEL: vnsra_wv_nxv8i32_zext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vnsra.wv v20, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret @@ -315,7 +315,7 @@ define @vnsra_wx_i32_nxv8i32_zext( %va, i32 %b) { ; CHECK-LABEL: vnsra_wx_i32_nxv8i32_zext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vnsra.wx v16, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -330,7 +330,7 @@ define @vnsra_wi_i32_nxv8i32_zext( %va) { ; CHECK-LABEL: vnsra_wi_i32_nxv8i32_zext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vnsra.wi v16, v8, 15 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vnsrl_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vnsrl.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vnsrl_wv_nxv2i8_nxv2i16_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv2i8_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vnsrl_wv_nxv4i8_nxv4i16_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv4i8_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vnsrl.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vnsrl_wv_nxv8i8_nxv8i16_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv8i8_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vnsrl.wv v11, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret @@ -199,7 +199,7 @@ define @intrinsic_vnsrl_wv_nxv16i8_nxv16i16_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv16i8_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vnsrl.wv v14, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret @@ -247,7 +247,7 @@ define @intrinsic_vnsrl_wv_nxv32i8_nxv32i16_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv32i8_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vnsrl.wv v20, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret @@ -295,7 +295,7 @@ define @intrinsic_vnsrl_wv_nxv1i16_nxv1i32_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv1i16_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vnsrl.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -342,7 +342,7 @@ define @intrinsic_vnsrl_wv_nxv2i16_nxv2i32_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv2i16_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -389,7 +389,7 @@ define @intrinsic_vnsrl_wv_nxv4i16_nxv4i32_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv4i16_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vnsrl.wv v11, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret @@ -437,7 +437,7 @@ define @intrinsic_vnsrl_wv_nxv8i16_nxv8i32_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv8i16_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vnsrl.wv v14, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret @@ -485,7 +485,7 @@ define @intrinsic_vnsrl_wv_nxv16i16_nxv16i32_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv16i16_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vnsrl.wv v20, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret @@ -533,7 +533,7 @@ define @intrinsic_vnsrl_wv_nxv1i32_nxv1i64_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv1i32_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vnsrl.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -580,7 +580,7 @@ define @intrinsic_vnsrl_wv_nxv2i32_nxv2i64_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv2i32_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vnsrl.wv v11, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret @@ -628,7 +628,7 @@ define @intrinsic_vnsrl_wv_nxv4i32_nxv4i64_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv4i32_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vnsrl.wv v14, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret @@ -676,7 +676,7 @@ define @intrinsic_vnsrl_wv_nxv8i32_nxv8i64_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv8i32_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vnsrl.wv v20, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret @@ -724,7 +724,7 @@ define @intrinsic_vnsrl_vx_nxv1i8_nxv1i16( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vnsrl.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -771,7 +771,7 @@ define @intrinsic_vnsrl_vx_nxv2i8_nxv2i16( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -818,7 +818,7 @@ define @intrinsic_vnsrl_vx_nxv4i8_nxv4i16( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vnsrl.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -865,7 +865,7 @@ define @intrinsic_vnsrl_vx_nxv8i8_nxv8i16( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vnsrl.wx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -913,7 +913,7 @@ define @intrinsic_vnsrl_vx_nxv16i8_nxv16i16( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vnsrl.wx v12, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -961,7 +961,7 @@ define @intrinsic_vnsrl_vx_nxv32i8_nxv32i16( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vnsrl.wx v16, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1009,7 +1009,7 @@ define @intrinsic_vnsrl_vx_nxv1i16_nxv1i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vnsrl.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1056,7 +1056,7 @@ define @intrinsic_vnsrl_vx_nxv2i16_nxv2i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1103,7 +1103,7 @@ define @intrinsic_vnsrl_vx_nxv4i16_nxv4i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vnsrl.wx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1151,7 +1151,7 @@ define @intrinsic_vnsrl_vx_nxv8i16_nxv8i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vnsrl.wx v12, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1199,7 +1199,7 @@ define @intrinsic_vnsrl_vx_nxv16i16_nxv16i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vnsrl.wx v16, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1247,7 +1247,7 @@ define @intrinsic_vnsrl_vx_nxv1i32_nxv1i64( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vnsrl.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1294,7 +1294,7 @@ define @intrinsic_vnsrl_vx_nxv2i32_nxv2i64( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vnsrl.wx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1342,7 +1342,7 @@ define @intrinsic_vnsrl_vx_nxv4i32_nxv4i64( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vnsrl.wx v12, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1390,7 +1390,7 @@ define @intrinsic_vnsrl_vx_nxv8i32_nxv8i64( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vnsrl.wx v16, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1432,7 +1432,7 @@ define @intrinsic_vnsrl_vi_nxv1i8_nxv1i16_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv1i8_nxv1i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1465,7 +1465,7 @@ define @intrinsic_vnsrl_vi_nxv2i8_nxv2i16_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv2i8_nxv2i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1498,7 +1498,7 @@ define @intrinsic_vnsrl_vi_nxv4i8_nxv4i16_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv4i8_nxv4i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1531,7 +1531,7 @@ define @intrinsic_vnsrl_vi_nxv8i8_nxv8i16_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv8i8_nxv8i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vnsrl.wi v10, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1565,7 +1565,7 @@ define @intrinsic_vnsrl_vi_nxv16i8_nxv16i16_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv16i8_nxv16i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vnsrl.wi v12, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1599,7 +1599,7 @@ define @intrinsic_vnsrl_vi_nxv32i8_nxv32i16_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv32i8_nxv32i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vnsrl.wi v16, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1633,7 +1633,7 @@ define @intrinsic_vnsrl_vi_nxv1i16_nxv1i32_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv1i16_nxv1i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1666,7 +1666,7 @@ define @intrinsic_vnsrl_vi_nxv2i16_nxv2i32_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv2i16_nxv2i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1699,7 +1699,7 @@ define @intrinsic_vnsrl_vi_nxv4i16_nxv4i32_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv4i16_nxv4i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vnsrl.wi v10, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1733,7 +1733,7 @@ define @intrinsic_vnsrl_vi_nxv8i16_nxv8i32_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv8i16_nxv8i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vnsrl.wi v12, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1767,7 +1767,7 @@ define @intrinsic_vnsrl_vi_nxv16i16_nxv16i32_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv16i16_nxv16i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vnsrl.wi v16, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1801,7 +1801,7 @@ define @intrinsic_vnsrl_vi_nxv1i32_nxv1i64_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv1i32_nxv1i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1834,7 +1834,7 @@ define @intrinsic_vnsrl_vi_nxv2i32_nxv2i64_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv2i32_nxv2i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vnsrl.wi v10, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1868,7 +1868,7 @@ define @intrinsic_vnsrl_vi_nxv4i32_nxv4i64_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv4i32_nxv4i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vnsrl.wi v12, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1902,7 +1902,7 @@ define @intrinsic_vnsrl_vi_nxv8i32_nxv8i64_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv8i32_nxv8i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vnsrl.wi v16, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vnsrl_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vnsrl.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vnsrl_wv_nxv2i8_nxv2i16_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv2i8_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vnsrl_wv_nxv4i8_nxv4i16_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv4i8_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vnsrl.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vnsrl_wv_nxv8i8_nxv8i16_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv8i8_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vnsrl.wv v11, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret @@ -199,7 +199,7 @@ define @intrinsic_vnsrl_wv_nxv16i8_nxv16i16_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv16i8_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vnsrl.wv v14, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret @@ -247,7 +247,7 @@ define @intrinsic_vnsrl_wv_nxv32i8_nxv32i16_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv32i8_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vnsrl.wv v20, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret @@ -295,7 +295,7 @@ define @intrinsic_vnsrl_wv_nxv1i16_nxv1i32_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv1i16_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vnsrl.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -342,7 +342,7 @@ define @intrinsic_vnsrl_wv_nxv2i16_nxv2i32_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv2i16_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -389,7 +389,7 @@ define @intrinsic_vnsrl_wv_nxv4i16_nxv4i32_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv4i16_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vnsrl.wv v11, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret @@ -437,7 +437,7 @@ define @intrinsic_vnsrl_wv_nxv8i16_nxv8i32_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv8i16_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vnsrl.wv v14, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret @@ -485,7 +485,7 @@ define @intrinsic_vnsrl_wv_nxv16i16_nxv16i32_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv16i16_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vnsrl.wv v20, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret @@ -533,7 +533,7 @@ define @intrinsic_vnsrl_wv_nxv1i32_nxv1i64_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv1i32_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vnsrl.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -580,7 +580,7 @@ define @intrinsic_vnsrl_wv_nxv2i32_nxv2i64_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv2i32_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vnsrl.wv v11, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret @@ -628,7 +628,7 @@ define @intrinsic_vnsrl_wv_nxv4i32_nxv4i64_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv4i32_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vnsrl.wv v14, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret @@ -676,7 +676,7 @@ define @intrinsic_vnsrl_wv_nxv8i32_nxv8i64_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_wv_nxv8i32_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vnsrl.wv v20, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret @@ -724,7 +724,7 @@ define @intrinsic_vnsrl_vx_nxv1i8_nxv1i16( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vnsrl.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -771,7 +771,7 @@ define @intrinsic_vnsrl_vx_nxv2i8_nxv2i16( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -818,7 +818,7 @@ define @intrinsic_vnsrl_vx_nxv4i8_nxv4i16( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vnsrl.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -865,7 +865,7 @@ define @intrinsic_vnsrl_vx_nxv8i8_nxv8i16( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vnsrl.wx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -913,7 +913,7 @@ define @intrinsic_vnsrl_vx_nxv16i8_nxv16i16( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vnsrl.wx v12, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -961,7 +961,7 @@ define @intrinsic_vnsrl_vx_nxv32i8_nxv32i16( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vnsrl.wx v16, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1009,7 +1009,7 @@ define @intrinsic_vnsrl_vx_nxv1i16_nxv1i32( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vnsrl.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1056,7 +1056,7 @@ define @intrinsic_vnsrl_vx_nxv2i16_nxv2i32( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1103,7 +1103,7 @@ define @intrinsic_vnsrl_vx_nxv4i16_nxv4i32( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vnsrl.wx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1151,7 +1151,7 @@ define @intrinsic_vnsrl_vx_nxv8i16_nxv8i32( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vnsrl.wx v12, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1199,7 +1199,7 @@ define @intrinsic_vnsrl_vx_nxv16i16_nxv16i32( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vnsrl.wx v16, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1247,7 +1247,7 @@ define @intrinsic_vnsrl_vx_nxv1i32_nxv1i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vnsrl.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1294,7 +1294,7 @@ define @intrinsic_vnsrl_vx_nxv2i32_nxv2i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vnsrl.wx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1342,7 +1342,7 @@ define @intrinsic_vnsrl_vx_nxv4i32_nxv4i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vnsrl.wx v12, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1390,7 +1390,7 @@ define @intrinsic_vnsrl_vx_nxv8i32_nxv8i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vx_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vnsrl.wx v16, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1432,7 +1432,7 @@ define @intrinsic_vnsrl_vi_nxv1i8_nxv1i16_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv1i8_nxv1i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1465,7 +1465,7 @@ define @intrinsic_vnsrl_vi_nxv2i8_nxv2i16_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv2i8_nxv2i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1498,7 +1498,7 @@ define @intrinsic_vnsrl_vi_nxv4i8_nxv4i16_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv4i8_nxv4i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1531,7 +1531,7 @@ define @intrinsic_vnsrl_vi_nxv8i8_nxv8i16_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv8i8_nxv8i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vnsrl.wi v10, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1565,7 +1565,7 @@ define @intrinsic_vnsrl_vi_nxv16i8_nxv16i16_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv16i8_nxv16i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vnsrl.wi v12, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1599,7 +1599,7 @@ define @intrinsic_vnsrl_vi_nxv32i8_nxv32i16_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv32i8_nxv32i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vnsrl.wi v16, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1633,7 +1633,7 @@ define @intrinsic_vnsrl_vi_nxv1i16_nxv1i32_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv1i16_nxv1i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1666,7 +1666,7 @@ define @intrinsic_vnsrl_vi_nxv2i16_nxv2i32_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv2i16_nxv2i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1699,7 +1699,7 @@ define @intrinsic_vnsrl_vi_nxv4i16_nxv4i32_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv4i16_nxv4i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vnsrl.wi v10, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1733,7 +1733,7 @@ define @intrinsic_vnsrl_vi_nxv8i16_nxv8i32_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv8i16_nxv8i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vnsrl.wi v12, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1767,7 +1767,7 @@ define @intrinsic_vnsrl_vi_nxv16i16_nxv16i32_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv16i16_nxv16i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vnsrl.wi v16, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1801,7 +1801,7 @@ define @intrinsic_vnsrl_vi_nxv1i32_nxv1i64_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv1i32_nxv1i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1834,7 +1834,7 @@ define @intrinsic_vnsrl_vi_nxv2i32_nxv2i64_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv2i32_nxv2i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vnsrl.wi v10, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1868,7 +1868,7 @@ define @intrinsic_vnsrl_vi_nxv4i32_nxv4i64_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv4i32_nxv4i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vnsrl.wi v12, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1902,7 +1902,7 @@ define @intrinsic_vnsrl_vi_nxv8i32_nxv8i64_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vnsrl_vi_nxv8i32_nxv8i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vnsrl.wi v16, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsrl-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vnsrl-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnsrl-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsrl-sdnode.ll @@ -7,7 +7,7 @@ define @vnsrl_wv_nxv1i32_sext( %va, %vb) { ; CHECK-LABEL: vnsrl_wv_nxv1i32_sext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vnsrl.wv v8, v8, v9 ; CHECK-NEXT: ret %vc = sext %vb to @@ -19,7 +19,7 @@ define @vnsrl_wx_i32_nxv1i32_sext( %va, i32 %b) { ; CHECK-LABEL: vnsrl_wx_i32_nxv1i32_sext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vnsrl.wx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -33,7 +33,7 @@ define @vnsrl_wi_i32_nxv1i32_sext( %va) { ; CHECK-LABEL: vnsrl_wi_i32_nxv1i32_sext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 15 ; CHECK-NEXT: ret %head = insertelement poison, i32 15, i32 0 @@ -47,7 +47,7 @@ define @vnsrl_wv_nxv2i32_sext( %va, %vb) { ; CHECK-LABEL: vnsrl_wv_nxv2i32_sext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vnsrl.wv v11, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret @@ -60,7 +60,7 @@ define @vnsrl_wx_i32_nxv2i32_sext( %va, i32 %b) { ; CHECK-LABEL: vnsrl_wx_i32_nxv2i32_sext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vnsrl.wx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -75,7 +75,7 @@ define @vnsrl_wi_i32_nxv2i32_sext( %va) { ; CHECK-LABEL: vnsrl_wi_i32_nxv2i32_sext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vnsrl.wi v10, v8, 15 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -90,7 +90,7 @@ define @vnsrl_wv_nxv4i32_sext( %va, %vb) { ; CHECK-LABEL: vnsrl_wv_nxv4i32_sext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vnsrl.wv v14, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret @@ -103,7 +103,7 @@ define @vnsrl_wx_i32_nxv4i32_sext( %va, i32 %b) { ; CHECK-LABEL: vnsrl_wx_i32_nxv4i32_sext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vnsrl.wx v12, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -118,7 +118,7 @@ define @vnsrl_wi_i32_nxv4i32_sext( %va) { ; CHECK-LABEL: vnsrl_wi_i32_nxv4i32_sext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vnsrl.wi v12, v8, 15 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -133,7 +133,7 @@ define @vnsrl_wv_nxv8i32_sext( %va, %vb) { ; CHECK-LABEL: vnsrl_wv_nxv8i32_sext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vnsrl.wv v20, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret @@ -146,7 +146,7 @@ define @vnsrl_wx_i32_nxv8i32_sext( %va, i32 %b) { ; CHECK-LABEL: vnsrl_wx_i32_nxv8i32_sext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vnsrl.wx v16, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -161,7 +161,7 @@ define @vnsrl_wi_i32_nxv8i32_sext( %va) { ; CHECK-LABEL: vnsrl_wi_i32_nxv8i32_sext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vnsrl.wi v16, v8, 15 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -176,7 +176,7 @@ define @vnsrl_wv_nxv1i32_zext( %va, %vb) { ; CHECK-LABEL: vnsrl_wv_nxv1i32_zext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vnsrl.wv v8, v8, v9 ; CHECK-NEXT: ret %vc = zext %vb to @@ -188,7 +188,7 @@ define @vnsrl_wx_i32_nxv1i32_zext( %va, i32 %b) { ; CHECK-LABEL: vnsrl_wx_i32_nxv1i32_zext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vnsrl.wx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -202,7 +202,7 @@ define @vnsrl_wi_i32_nxv1i32_zext( %va) { ; CHECK-LABEL: vnsrl_wi_i32_nxv1i32_zext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 15 ; CHECK-NEXT: ret %head = insertelement poison, i32 15, i32 0 @@ -216,7 +216,7 @@ define @vnsrl_wv_nxv2i32_zext( %va, %vb) { ; CHECK-LABEL: vnsrl_wv_nxv2i32_zext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vnsrl.wv v11, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret @@ -229,7 +229,7 @@ define @vnsrl_wx_i32_nxv2i32_zext( %va, i32 %b) { ; CHECK-LABEL: vnsrl_wx_i32_nxv2i32_zext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vnsrl.wx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -244,7 +244,7 @@ define @vnsrl_wi_i32_nxv2i32_zext( %va) { ; CHECK-LABEL: vnsrl_wi_i32_nxv2i32_zext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vnsrl.wi v10, v8, 15 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -259,7 +259,7 @@ define @vnsrl_wv_nxv4i32_zext( %va, %vb) { ; CHECK-LABEL: vnsrl_wv_nxv4i32_zext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vnsrl.wv v14, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret @@ -272,7 +272,7 @@ define @vnsrl_wx_i32_nxv4i32_zext( %va, i32 %b) { ; CHECK-LABEL: vnsrl_wx_i32_nxv4i32_zext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vnsrl.wx v12, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -287,7 +287,7 @@ define @vnsrl_wi_i32_nxv4i32_zext( %va) { ; CHECK-LABEL: vnsrl_wi_i32_nxv4i32_zext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vnsrl.wi v12, v8, 15 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -302,7 +302,7 @@ define @vnsrl_wv_nxv8i32_zext( %va, %vb) { ; CHECK-LABEL: vnsrl_wv_nxv8i32_zext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vnsrl.wv v20, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret @@ -315,7 +315,7 @@ define @vnsrl_wx_i32_nxv8i32_zext( %va, i32 %b) { ; CHECK-LABEL: vnsrl_wx_i32_nxv8i32_zext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vnsrl.wx v16, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -330,7 +330,7 @@ define @vnsrl_wi_i32_nxv8i32_zext( %va) { ; CHECK-LABEL: vnsrl_wi_i32_nxv8i32_zext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vnsrl.wi v16, v8, 15 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vor_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vor_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vor_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vor_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vor_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -292,7 +292,7 @@ define @intrinsic_vor_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vor_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vor_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vor_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vor_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vor_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vor_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vor_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vor_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -717,7 +717,7 @@ define @intrinsic_vor_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -764,7 +764,7 @@ define @intrinsic_vor_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -811,7 +811,7 @@ define @intrinsic_vor_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vor_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vor_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vor_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vor_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1048,7 +1048,7 @@ define @intrinsic_vor_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1095,7 +1095,7 @@ define @intrinsic_vor_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1142,7 +1142,7 @@ define @intrinsic_vor_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1189,7 +1189,7 @@ define @intrinsic_vor_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1236,7 +1236,7 @@ define @intrinsic_vor_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1283,7 +1283,7 @@ define @intrinsic_vor_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1330,7 +1330,7 @@ define @intrinsic_vor_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1377,7 +1377,7 @@ define @intrinsic_vor_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1424,7 +1424,7 @@ define @intrinsic_vor_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1471,7 +1471,7 @@ define @intrinsic_vor_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1518,7 +1518,7 @@ define @intrinsic_vor_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1565,7 +1565,7 @@ define @intrinsic_vor_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1612,7 +1612,7 @@ define @intrinsic_vor_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1659,7 +1659,7 @@ define @intrinsic_vor_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1706,7 +1706,7 @@ define @intrinsic_vor_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1753,7 +1753,7 @@ define @intrinsic_vor_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1800,7 +1800,7 @@ define @intrinsic_vor_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1847,7 +1847,7 @@ define @intrinsic_vor_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1898,7 +1898,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 @@ -1957,7 +1957,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vor.vv v8, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 @@ -2016,7 +2016,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vor.vv v8, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 @@ -2075,7 +2075,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vor.vv v8, v8, v16 ; CHECK-NEXT: addi sp, sp, 16 @@ -2124,7 +2124,7 @@ define @intrinsic_vor_vi_nxv1i8_nxv1i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2157,7 +2157,7 @@ define @intrinsic_vor_vi_nxv2i8_nxv2i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2190,7 +2190,7 @@ define @intrinsic_vor_vi_nxv4i8_nxv4i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2223,7 +2223,7 @@ define @intrinsic_vor_vi_nxv8i8_nxv8i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2256,7 +2256,7 @@ define @intrinsic_vor_vi_nxv16i8_nxv16i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2289,7 +2289,7 @@ define @intrinsic_vor_vi_nxv32i8_nxv32i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2322,7 +2322,7 @@ define @intrinsic_vor_vi_nxv64i8_nxv64i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2355,7 +2355,7 @@ define @intrinsic_vor_vi_nxv1i16_nxv1i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2388,7 +2388,7 @@ define @intrinsic_vor_vi_nxv2i16_nxv2i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2421,7 +2421,7 @@ define @intrinsic_vor_vi_nxv4i16_nxv4i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2454,7 +2454,7 @@ define @intrinsic_vor_vi_nxv8i16_nxv8i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2487,7 +2487,7 @@ define @intrinsic_vor_vi_nxv16i16_nxv16i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2520,7 +2520,7 @@ define @intrinsic_vor_vi_nxv32i16_nxv32i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2553,7 +2553,7 @@ define @intrinsic_vor_vi_nxv1i32_nxv1i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2586,7 +2586,7 @@ define @intrinsic_vor_vi_nxv2i32_nxv2i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2619,7 +2619,7 @@ define @intrinsic_vor_vi_nxv4i32_nxv4i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2652,7 +2652,7 @@ define @intrinsic_vor_vi_nxv8i32_nxv8i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2685,7 +2685,7 @@ define @intrinsic_vor_vi_nxv16i32_nxv16i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2718,7 +2718,7 @@ define @intrinsic_vor_vi_nxv1i64_nxv1i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2751,7 +2751,7 @@ define @intrinsic_vor_vi_nxv2i64_nxv2i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2784,7 +2784,7 @@ define @intrinsic_vor_vi_nxv4i64_nxv4i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2817,7 +2817,7 @@ define @intrinsic_vor_vi_nxv8i64_nxv8i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vor_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vor_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vor_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vor_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vor_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -292,7 +292,7 @@ define @intrinsic_vor_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vor_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vor_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vor_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vor_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vor_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vor_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vor_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vor_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -717,7 +717,7 @@ define @intrinsic_vor_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -764,7 +764,7 @@ define @intrinsic_vor_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -811,7 +811,7 @@ define @intrinsic_vor_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vor_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vor_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vor_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vor_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1048,7 +1048,7 @@ define @intrinsic_vor_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1095,7 +1095,7 @@ define @intrinsic_vor_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1142,7 +1142,7 @@ define @intrinsic_vor_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1189,7 +1189,7 @@ define @intrinsic_vor_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1236,7 +1236,7 @@ define @intrinsic_vor_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1283,7 +1283,7 @@ define @intrinsic_vor_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1330,7 +1330,7 @@ define @intrinsic_vor_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1377,7 +1377,7 @@ define @intrinsic_vor_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1424,7 +1424,7 @@ define @intrinsic_vor_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1471,7 +1471,7 @@ define @intrinsic_vor_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1518,7 +1518,7 @@ define @intrinsic_vor_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1565,7 +1565,7 @@ define @intrinsic_vor_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1612,7 +1612,7 @@ define @intrinsic_vor_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1659,7 +1659,7 @@ define @intrinsic_vor_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1706,7 +1706,7 @@ define @intrinsic_vor_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1753,7 +1753,7 @@ define @intrinsic_vor_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1800,7 +1800,7 @@ define @intrinsic_vor_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1847,7 +1847,7 @@ define @intrinsic_vor_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1894,7 +1894,7 @@ define @intrinsic_vor_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1941,7 +1941,7 @@ define @intrinsic_vor_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1988,7 +1988,7 @@ define @intrinsic_vor_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -2035,7 +2035,7 @@ define @intrinsic_vor_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vor_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -2076,7 +2076,7 @@ define @intrinsic_vor_vi_nxv1i8_nxv1i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2109,7 +2109,7 @@ define @intrinsic_vor_vi_nxv2i8_nxv2i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2142,7 +2142,7 @@ define @intrinsic_vor_vi_nxv4i8_nxv4i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2175,7 +2175,7 @@ define @intrinsic_vor_vi_nxv8i8_nxv8i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2208,7 +2208,7 @@ define @intrinsic_vor_vi_nxv16i8_nxv16i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2241,7 +2241,7 @@ define @intrinsic_vor_vi_nxv32i8_nxv32i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2274,7 +2274,7 @@ define @intrinsic_vor_vi_nxv64i8_nxv64i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2307,7 +2307,7 @@ define @intrinsic_vor_vi_nxv1i16_nxv1i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2340,7 +2340,7 @@ define @intrinsic_vor_vi_nxv2i16_nxv2i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2373,7 +2373,7 @@ define @intrinsic_vor_vi_nxv4i16_nxv4i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2406,7 +2406,7 @@ define @intrinsic_vor_vi_nxv8i16_nxv8i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2439,7 +2439,7 @@ define @intrinsic_vor_vi_nxv16i16_nxv16i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2472,7 +2472,7 @@ define @intrinsic_vor_vi_nxv32i16_nxv32i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2505,7 +2505,7 @@ define @intrinsic_vor_vi_nxv1i32_nxv1i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2538,7 +2538,7 @@ define @intrinsic_vor_vi_nxv2i32_nxv2i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2571,7 +2571,7 @@ define @intrinsic_vor_vi_nxv4i32_nxv4i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2604,7 +2604,7 @@ define @intrinsic_vor_vi_nxv8i32_nxv8i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2637,7 +2637,7 @@ define @intrinsic_vor_vi_nxv16i32_nxv16i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2670,7 +2670,7 @@ define @intrinsic_vor_vi_nxv1i64_nxv1i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2703,7 +2703,7 @@ define @intrinsic_vor_vi_nxv2i64_nxv2i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2736,7 +2736,7 @@ define @intrinsic_vor_vi_nxv4i64_nxv4i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2769,7 +2769,7 @@ define @intrinsic_vor_vi_nxv8i64_nxv8i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vor_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vor-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vor-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vor-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vor-sdnode.ll @@ -5,7 +5,7 @@ define @vor_vx_nxv1i8( %va, i8 signext %b) { ; CHECK-LABEL: vor_vx_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -17,7 +17,7 @@ define @vor_vx_nxv1i8_0( %va) { ; CHECK-LABEL: vor_vx_nxv1i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vor.vi v8, v8, -12 ; CHECK-NEXT: ret %head = insertelement poison, i8 -12, i32 0 @@ -29,7 +29,7 @@ define @vor_vx_nxv1i8_1( %va) { ; CHECK-LABEL: vor_vx_nxv1i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 15 ; CHECK-NEXT: ret %head = insertelement poison, i8 15, i32 0 @@ -42,7 +42,7 @@ ; CHECK-LABEL: vor_vx_nxv1i8_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 16, i32 0 @@ -54,7 +54,7 @@ define @vor_vx_nxv2i8( %va, i8 signext %b) { ; CHECK-LABEL: vor_vx_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -66,7 +66,7 @@ define @vor_vx_nxv2i8_0( %va) { ; CHECK-LABEL: vor_vx_nxv2i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vor.vi v8, v8, -12 ; CHECK-NEXT: ret %head = insertelement poison, i8 -12, i32 0 @@ -78,7 +78,7 @@ define @vor_vx_nxv2i8_1( %va) { ; CHECK-LABEL: vor_vx_nxv2i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 15 ; CHECK-NEXT: ret %head = insertelement poison, i8 15, i32 0 @@ -91,7 +91,7 @@ ; CHECK-LABEL: vor_vx_nxv2i8_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 16, i32 0 @@ -103,7 +103,7 @@ define @vor_vx_nxv4i8( %va, i8 signext %b) { ; CHECK-LABEL: vor_vx_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -115,7 +115,7 @@ define @vor_vx_nxv4i8_0( %va) { ; CHECK-LABEL: vor_vx_nxv4i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, -12 ; CHECK-NEXT: ret %head = insertelement poison, i8 -12, i32 0 @@ -127,7 +127,7 @@ define @vor_vx_nxv4i8_1( %va) { ; CHECK-LABEL: vor_vx_nxv4i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 15 ; CHECK-NEXT: ret %head = insertelement poison, i8 15, i32 0 @@ -140,7 +140,7 @@ ; CHECK-LABEL: vor_vx_nxv4i8_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 16, i32 0 @@ -152,7 +152,7 @@ define @vor_vx_nxv8i8( %va, i8 signext %b) { ; CHECK-LABEL: vor_vx_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -164,7 +164,7 @@ define @vor_vx_nxv8i8_0( %va) { ; CHECK-LABEL: vor_vx_nxv8i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vor.vi v8, v8, -12 ; CHECK-NEXT: ret %head = insertelement poison, i8 -12, i32 0 @@ -176,7 +176,7 @@ define @vor_vx_nxv8i8_1( %va) { ; CHECK-LABEL: vor_vx_nxv8i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 15 ; CHECK-NEXT: ret %head = insertelement poison, i8 15, i32 0 @@ -189,7 +189,7 @@ ; CHECK-LABEL: vor_vx_nxv8i8_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 16, i32 0 @@ -201,7 +201,7 @@ define @vor_vx_nxv16i8( %va, i8 signext %b) { ; CHECK-LABEL: vor_vx_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -213,7 +213,7 @@ define @vor_vx_nxv16i8_0( %va) { ; CHECK-LABEL: vor_vx_nxv16i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, -12 ; CHECK-NEXT: ret %head = insertelement poison, i8 -12, i32 0 @@ -225,7 +225,7 @@ define @vor_vx_nxv16i8_1( %va) { ; CHECK-LABEL: vor_vx_nxv16i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 15 ; CHECK-NEXT: ret %head = insertelement poison, i8 15, i32 0 @@ -238,7 +238,7 @@ ; CHECK-LABEL: vor_vx_nxv16i8_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 16, i32 0 @@ -250,7 +250,7 @@ define @vor_vx_nxv32i8( %va, i8 signext %b) { ; CHECK-LABEL: vor_vx_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -262,7 +262,7 @@ define @vor_vx_nxv32i8_0( %va) { ; CHECK-LABEL: vor_vx_nxv32i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vor.vi v8, v8, -12 ; CHECK-NEXT: ret %head = insertelement poison, i8 -12, i32 0 @@ -274,7 +274,7 @@ define @vor_vx_nxv32i8_1( %va) { ; CHECK-LABEL: vor_vx_nxv32i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 15 ; CHECK-NEXT: ret %head = insertelement poison, i8 15, i32 0 @@ -287,7 +287,7 @@ ; CHECK-LABEL: vor_vx_nxv32i8_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 16, i32 0 @@ -299,7 +299,7 @@ define @vor_vx_nxv64i8( %va, i8 signext %b) { ; CHECK-LABEL: vor_vx_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -311,7 +311,7 @@ define @vor_vx_nxv64i8_0( %va) { ; CHECK-LABEL: vor_vx_nxv64i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vor.vi v8, v8, -12 ; CHECK-NEXT: ret %head = insertelement poison, i8 -12, i32 0 @@ -323,7 +323,7 @@ define @vor_vx_nxv64i8_1( %va) { ; CHECK-LABEL: vor_vx_nxv64i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 15 ; CHECK-NEXT: ret %head = insertelement poison, i8 15, i32 0 @@ -336,7 +336,7 @@ ; CHECK-LABEL: vor_vx_nxv64i8_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 16, i32 0 @@ -348,7 +348,7 @@ define @vor_vx_nxv1i16( %va, i16 signext %b) { ; CHECK-LABEL: vor_vx_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -360,7 +360,7 @@ define @vor_vx_nxv1i16_0( %va) { ; CHECK-LABEL: vor_vx_nxv1i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vor.vi v8, v8, -12 ; CHECK-NEXT: ret %head = insertelement poison, i16 -12, i32 0 @@ -372,7 +372,7 @@ define @vor_vx_nxv1i16_1( %va) { ; CHECK-LABEL: vor_vx_nxv1i16_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 15 ; CHECK-NEXT: ret %head = insertelement poison, i16 15, i32 0 @@ -385,7 +385,7 @@ ; CHECK-LABEL: vor_vx_nxv1i16_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 16, i32 0 @@ -397,7 +397,7 @@ define @vor_vx_nxv2i16( %va, i16 signext %b) { ; CHECK-LABEL: vor_vx_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -409,7 +409,7 @@ define @vor_vx_nxv2i16_0( %va) { ; CHECK-LABEL: vor_vx_nxv2i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, -12 ; CHECK-NEXT: ret %head = insertelement poison, i16 -12, i32 0 @@ -421,7 +421,7 @@ define @vor_vx_nxv2i16_1( %va) { ; CHECK-LABEL: vor_vx_nxv2i16_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 15 ; CHECK-NEXT: ret %head = insertelement poison, i16 15, i32 0 @@ -434,7 +434,7 @@ ; CHECK-LABEL: vor_vx_nxv2i16_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 16, i32 0 @@ -446,7 +446,7 @@ define @vor_vx_nxv4i16( %va, i16 signext %b) { ; CHECK-LABEL: vor_vx_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -458,7 +458,7 @@ define @vor_vx_nxv4i16_0( %va) { ; CHECK-LABEL: vor_vx_nxv4i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vor.vi v8, v8, -12 ; CHECK-NEXT: ret %head = insertelement poison, i16 -12, i32 0 @@ -470,7 +470,7 @@ define @vor_vx_nxv4i16_1( %va) { ; CHECK-LABEL: vor_vx_nxv4i16_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 15 ; CHECK-NEXT: ret %head = insertelement poison, i16 15, i32 0 @@ -483,7 +483,7 @@ ; CHECK-LABEL: vor_vx_nxv4i16_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 16, i32 0 @@ -495,7 +495,7 @@ define @vor_vx_nxv8i16( %va, i16 signext %b) { ; CHECK-LABEL: vor_vx_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -507,7 +507,7 @@ define @vor_vx_nxv8i16_0( %va) { ; CHECK-LABEL: vor_vx_nxv8i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, -12 ; CHECK-NEXT: ret %head = insertelement poison, i16 -12, i32 0 @@ -519,7 +519,7 @@ define @vor_vx_nxv8i16_1( %va) { ; CHECK-LABEL: vor_vx_nxv8i16_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 15 ; CHECK-NEXT: ret %head = insertelement poison, i16 15, i32 0 @@ -532,7 +532,7 @@ ; CHECK-LABEL: vor_vx_nxv8i16_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 16, i32 0 @@ -544,7 +544,7 @@ define @vor_vx_nxv16i16( %va, i16 signext %b) { ; CHECK-LABEL: vor_vx_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -556,7 +556,7 @@ define @vor_vx_nxv16i16_0( %va) { ; CHECK-LABEL: vor_vx_nxv16i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vor.vi v8, v8, -12 ; CHECK-NEXT: ret %head = insertelement poison, i16 -12, i32 0 @@ -568,7 +568,7 @@ define @vor_vx_nxv16i16_1( %va) { ; CHECK-LABEL: vor_vx_nxv16i16_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 15 ; CHECK-NEXT: ret %head = insertelement poison, i16 15, i32 0 @@ -581,7 +581,7 @@ ; CHECK-LABEL: vor_vx_nxv16i16_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 16, i32 0 @@ -593,7 +593,7 @@ define @vor_vx_nxv32i16( %va, i16 signext %b) { ; CHECK-LABEL: vor_vx_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -605,7 +605,7 @@ define @vor_vx_nxv32i16_0( %va) { ; CHECK-LABEL: vor_vx_nxv32i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vor.vi v8, v8, -12 ; CHECK-NEXT: ret %head = insertelement poison, i16 -12, i32 0 @@ -617,7 +617,7 @@ define @vor_vx_nxv32i16_1( %va) { ; CHECK-LABEL: vor_vx_nxv32i16_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 15 ; CHECK-NEXT: ret %head = insertelement poison, i16 15, i32 0 @@ -630,7 +630,7 @@ ; CHECK-LABEL: vor_vx_nxv32i16_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 16, i32 0 @@ -642,7 +642,7 @@ define @vor_vx_nxv1i32( %va, i32 signext %b) { ; CHECK-LABEL: vor_vx_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -654,7 +654,7 @@ define @vor_vx_nxv1i32_0( %va) { ; CHECK-LABEL: vor_vx_nxv1i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, -12 ; CHECK-NEXT: ret %head = insertelement poison, i32 -12, i32 0 @@ -666,7 +666,7 @@ define @vor_vx_nxv1i32_1( %va) { ; CHECK-LABEL: vor_vx_nxv1i32_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 15 ; CHECK-NEXT: ret %head = insertelement poison, i32 15, i32 0 @@ -679,7 +679,7 @@ ; CHECK-LABEL: vor_vx_nxv1i32_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 16, i32 0 @@ -691,7 +691,7 @@ define @vor_vx_nxv2i32( %va, i32 signext %b) { ; CHECK-LABEL: vor_vx_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -703,7 +703,7 @@ define @vor_vx_nxv2i32_0( %va) { ; CHECK-LABEL: vor_vx_nxv2i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vor.vi v8, v8, -12 ; CHECK-NEXT: ret %head = insertelement poison, i32 -12, i32 0 @@ -715,7 +715,7 @@ define @vor_vx_nxv2i32_1( %va) { ; CHECK-LABEL: vor_vx_nxv2i32_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 15 ; CHECK-NEXT: ret %head = insertelement poison, i32 15, i32 0 @@ -728,7 +728,7 @@ ; CHECK-LABEL: vor_vx_nxv2i32_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 16, i32 0 @@ -740,7 +740,7 @@ define @vor_vx_nxv4i32( %va, i32 signext %b) { ; CHECK-LABEL: vor_vx_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -752,7 +752,7 @@ define @vor_vx_nxv4i32_0( %va) { ; CHECK-LABEL: vor_vx_nxv4i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, -12 ; CHECK-NEXT: ret %head = insertelement poison, i32 -12, i32 0 @@ -764,7 +764,7 @@ define @vor_vx_nxv4i32_1( %va) { ; CHECK-LABEL: vor_vx_nxv4i32_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 15 ; CHECK-NEXT: ret %head = insertelement poison, i32 15, i32 0 @@ -777,7 +777,7 @@ ; CHECK-LABEL: vor_vx_nxv4i32_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 16, i32 0 @@ -789,7 +789,7 @@ define @vor_vx_nxv8i32( %va, i32 signext %b) { ; CHECK-LABEL: vor_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -801,7 +801,7 @@ define @vor_vx_nxv8i32_0( %va) { ; CHECK-LABEL: vor_vx_nxv8i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vor.vi v8, v8, -12 ; CHECK-NEXT: ret %head = insertelement poison, i32 -12, i32 0 @@ -813,7 +813,7 @@ define @vor_vx_nxv8i32_1( %va) { ; CHECK-LABEL: vor_vx_nxv8i32_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 15 ; CHECK-NEXT: ret %head = insertelement poison, i32 15, i32 0 @@ -826,7 +826,7 @@ ; CHECK-LABEL: vor_vx_nxv8i32_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 16, i32 0 @@ -838,7 +838,7 @@ define @vor_vx_nxv16i32( %va, i32 signext %b) { ; CHECK-LABEL: vor_vx_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -850,7 +850,7 @@ define @vor_vx_nxv16i32_0( %va) { ; CHECK-LABEL: vor_vx_nxv16i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vor.vi v8, v8, -12 ; CHECK-NEXT: ret %head = insertelement poison, i32 -12, i32 0 @@ -862,7 +862,7 @@ define @vor_vx_nxv16i32_1( %va) { ; CHECK-LABEL: vor_vx_nxv16i32_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 15 ; CHECK-NEXT: ret %head = insertelement poison, i32 15, i32 0 @@ -875,7 +875,7 @@ ; CHECK-LABEL: vor_vx_nxv16i32_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 16, i32 0 @@ -892,7 +892,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vor.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -900,7 +900,7 @@ ; ; RV64-LABEL: vor_vx_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV64-NEXT: vor.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -912,7 +912,7 @@ define @vor_vx_nxv1i64_0( %va) { ; CHECK-LABEL: vor_vx_nxv1i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vor.vi v8, v8, -12 ; CHECK-NEXT: ret %head = insertelement poison, i64 -12, i32 0 @@ -924,7 +924,7 @@ define @vor_vx_nxv1i64_1( %va) { ; CHECK-LABEL: vor_vx_nxv1i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 15 ; CHECK-NEXT: ret %head = insertelement poison, i64 15, i32 0 @@ -937,7 +937,7 @@ ; CHECK-LABEL: vor_vx_nxv1i64_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 16, i32 0 @@ -954,7 +954,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vor.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -962,7 +962,7 @@ ; ; RV64-LABEL: vor_vx_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV64-NEXT: vor.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -974,7 +974,7 @@ define @vor_vx_nxv2i64_0( %va) { ; CHECK-LABEL: vor_vx_nxv2i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, -12 ; CHECK-NEXT: ret %head = insertelement poison, i64 -12, i32 0 @@ -986,7 +986,7 @@ define @vor_vx_nxv2i64_1( %va) { ; CHECK-LABEL: vor_vx_nxv2i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 15 ; CHECK-NEXT: ret %head = insertelement poison, i64 15, i32 0 @@ -999,7 +999,7 @@ ; CHECK-LABEL: vor_vx_nxv2i64_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 16, i32 0 @@ -1016,7 +1016,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vor.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -1024,7 +1024,7 @@ ; ; RV64-LABEL: vor_vx_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV64-NEXT: vor.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -1036,7 +1036,7 @@ define @vor_vx_nxv4i64_0( %va) { ; CHECK-LABEL: vor_vx_nxv4i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vor.vi v8, v8, -12 ; CHECK-NEXT: ret %head = insertelement poison, i64 -12, i32 0 @@ -1048,7 +1048,7 @@ define @vor_vx_nxv4i64_1( %va) { ; CHECK-LABEL: vor_vx_nxv4i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 15 ; CHECK-NEXT: ret %head = insertelement poison, i64 15, i32 0 @@ -1061,7 +1061,7 @@ ; CHECK-LABEL: vor_vx_nxv4i64_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 16, i32 0 @@ -1078,7 +1078,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vor.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -1086,7 +1086,7 @@ ; ; RV64-LABEL: vor_vx_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vor.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -1098,7 +1098,7 @@ define @vor_vx_nxv8i64_0( %va) { ; CHECK-LABEL: vor_vx_nxv8i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vor.vi v8, v8, -12 ; CHECK-NEXT: ret %head = insertelement poison, i64 -12, i32 0 @@ -1110,7 +1110,7 @@ define @vor_vx_nxv8i64_1( %va) { ; CHECK-LABEL: vor_vx_nxv8i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 15 ; CHECK-NEXT: ret %head = insertelement poison, i64 15, i32 0 @@ -1123,7 +1123,7 @@ ; CHECK-LABEL: vor_vx_nxv8i64_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 16, i32 0 @@ -1136,7 +1136,7 @@ define @vor_vx_nxv8i64_3( %va) { ; CHECK-LABEL: vor_vx_nxv8i64_3: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmv.v.i v8, -1 ; CHECK-NEXT: ret %head = insertelement poison, i64 -1, i32 0 @@ -1152,7 +1152,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v8, (a0), zero ; RV32-NEXT: sw a3, 12(sp) ; RV32-NEXT: sw a2, 8(sp) @@ -1164,7 +1164,7 @@ ; RV64-LABEL: vor_xx_nxv8i64: ; RV64: # %bb.0: ; RV64-NEXT: or a0, a0, a1 -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vmv.v.x v8, a0 ; RV64-NEXT: ret %head1 = insertelement poison, i64 %a, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vor-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vor-vp.ll @@ -33,7 +33,7 @@ define @vor_vv_nxv1i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv1i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -57,7 +57,7 @@ define @vor_vx_nxv1i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_nxv1i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -83,7 +83,7 @@ define @vor_vi_nxv1i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_nxv1i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 5, i32 0 @@ -109,7 +109,7 @@ define @vor_vv_nxv2i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -133,7 +133,7 @@ define @vor_vx_nxv2i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_nxv2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -159,7 +159,7 @@ define @vor_vi_nxv2i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_nxv2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 5, i32 0 @@ -185,7 +185,7 @@ define @vor_vv_nxv4i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -209,7 +209,7 @@ define @vor_vx_nxv4i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_nxv4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -235,7 +235,7 @@ define @vor_vi_nxv4i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_nxv4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 5, i32 0 @@ -261,7 +261,7 @@ define @vor_vv_nxv8i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -285,7 +285,7 @@ define @vor_vx_nxv8i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_nxv8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -311,7 +311,7 @@ define @vor_vi_nxv8i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_nxv8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 5, i32 0 @@ -337,7 +337,7 @@ define @vor_vv_nxv16i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -361,7 +361,7 @@ define @vor_vx_nxv16i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_nxv16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -387,7 +387,7 @@ define @vor_vi_nxv16i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_nxv16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 5, i32 0 @@ -413,7 +413,7 @@ define @vor_vv_nxv32i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv32i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -437,7 +437,7 @@ define @vor_vx_nxv32i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_nxv32i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -463,7 +463,7 @@ define @vor_vi_nxv32i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_nxv32i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 5, i32 0 @@ -489,7 +489,7 @@ define @vor_vv_nxv64i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv64i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -513,7 +513,7 @@ define @vor_vx_nxv64i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_nxv64i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -539,7 +539,7 @@ define @vor_vi_nxv64i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_nxv64i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 5, i32 0 @@ -565,7 +565,7 @@ define @vor_vv_nxv1i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv1i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -589,7 +589,7 @@ define @vor_vx_nxv1i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_nxv1i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -615,7 +615,7 @@ define @vor_vi_nxv1i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_nxv1i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 5, i32 0 @@ -641,7 +641,7 @@ define @vor_vv_nxv2i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -665,7 +665,7 @@ define @vor_vx_nxv2i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_nxv2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -691,7 +691,7 @@ define @vor_vi_nxv2i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_nxv2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 5, i32 0 @@ -717,7 +717,7 @@ define @vor_vv_nxv4i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -741,7 +741,7 @@ define @vor_vx_nxv4i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_nxv4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -767,7 +767,7 @@ define @vor_vi_nxv4i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_nxv4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 5, i32 0 @@ -793,7 +793,7 @@ define @vor_vv_nxv8i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -817,7 +817,7 @@ define @vor_vx_nxv8i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_nxv8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -843,7 +843,7 @@ define @vor_vi_nxv8i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_nxv8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 5, i32 0 @@ -869,7 +869,7 @@ define @vor_vv_nxv16i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -893,7 +893,7 @@ define @vor_vx_nxv16i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_nxv16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -919,7 +919,7 @@ define @vor_vi_nxv16i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_nxv16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 5, i32 0 @@ -945,7 +945,7 @@ define @vor_vv_nxv32i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv32i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -969,7 +969,7 @@ define @vor_vx_nxv32i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_nxv32i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -995,7 +995,7 @@ define @vor_vi_nxv32i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_nxv32i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 5, i32 0 @@ -1021,7 +1021,7 @@ define @vor_vv_nxv1i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv1i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1045,7 +1045,7 @@ define @vor_vx_nxv1i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_nxv1i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1071,7 +1071,7 @@ define @vor_vi_nxv1i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_nxv1i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 5, i32 0 @@ -1097,7 +1097,7 @@ define @vor_vv_nxv2i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1133,7 +1133,7 @@ define @vor_vx_nxv2i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1147,7 +1147,7 @@ define @vor_vx_nxv2i32_unmasked_commute( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_nxv2i32_unmasked_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1173,7 +1173,7 @@ define @vor_vi_nxv2i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 5, i32 0 @@ -1199,7 +1199,7 @@ define @vor_vv_nxv4i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1223,7 +1223,7 @@ define @vor_vx_nxv4i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_nxv4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1249,7 +1249,7 @@ define @vor_vi_nxv4i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_nxv4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 5, i32 0 @@ -1275,7 +1275,7 @@ define @vor_vv_nxv8i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1299,7 +1299,7 @@ define @vor_vx_nxv8i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_nxv8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1325,7 +1325,7 @@ define @vor_vi_nxv8i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_nxv8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 5, i32 0 @@ -1351,7 +1351,7 @@ define @vor_vv_nxv10i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv10i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1375,7 +1375,7 @@ define @vor_vx_nxv10i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_nxv10i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1401,7 +1401,7 @@ define @vor_vi_nxv10i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_nxv10i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 5, i32 0 @@ -1427,7 +1427,7 @@ define @vor_vv_nxv16i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1451,7 +1451,7 @@ define @vor_vx_nxv16i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vx_nxv16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1477,7 +1477,7 @@ define @vor_vi_nxv16i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_nxv16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 5, i32 0 @@ -1503,7 +1503,7 @@ define @vor_vv_nxv1i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv1i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1520,7 +1520,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vor.vv v8, v8, v9, v0.t @@ -1546,16 +1546,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vor.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vor_vx_nxv1i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vor.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1581,7 +1581,7 @@ define @vor_vi_nxv1i64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_nxv1i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 5, i32 0 @@ -1607,7 +1607,7 @@ define @vor_vv_nxv2i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1624,7 +1624,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vor.vv v8, v8, v10, v0.t @@ -1650,16 +1650,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vor.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vor_vx_nxv2i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vor.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1685,7 +1685,7 @@ define @vor_vi_nxv2i64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_nxv2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 5, i32 0 @@ -1711,7 +1711,7 @@ define @vor_vv_nxv4i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv4i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1728,7 +1728,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vor.vv v8, v8, v12, v0.t @@ -1754,16 +1754,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vor.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vor_vx_nxv4i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vor.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1789,7 +1789,7 @@ define @vor_vi_nxv4i64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_nxv4i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 5, i32 0 @@ -1815,7 +1815,7 @@ define @vor_vv_nxv8i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vor_vv_nxv8i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vor.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1832,7 +1832,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vor.vv v8, v8, v16, v0.t @@ -1858,16 +1858,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vor.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vor_vx_nxv8i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vor.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1893,7 +1893,7 @@ define @vor_vi_nxv8i64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vor_vi_nxv8i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vor.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 5, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll @@ -49,7 +49,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; RV32-NEXT: vsext.vf2 v8, v9 ; RV32-NEXT: ret ; @@ -57,7 +57,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t -; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; RV64-NEXT: vsext.vf2 v8, v10 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv2i8.nxv2p0i8( %ptrs, %m, i32 %evl) @@ -70,7 +70,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; RV32-NEXT: vzext.vf2 v8, v9 ; RV32-NEXT: ret ; @@ -78,7 +78,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t -; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; RV64-NEXT: vzext.vf2 v8, v10 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv2i8.nxv2p0i8( %ptrs, %m, i32 %evl) @@ -91,7 +91,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV32-NEXT: vsext.vf4 v8, v9 ; RV32-NEXT: ret ; @@ -99,7 +99,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t -; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV64-NEXT: vsext.vf4 v8, v10 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv2i8.nxv2p0i8( %ptrs, %m, i32 %evl) @@ -112,7 +112,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV32-NEXT: vzext.vf4 v8, v9 ; RV32-NEXT: ret ; @@ -120,7 +120,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t -; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV64-NEXT: vzext.vf4 v8, v10 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv2i8.nxv2p0i8( %ptrs, %m, i32 %evl) @@ -133,7 +133,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; RV32-NEXT: vsext.vf8 v8, v10 ; RV32-NEXT: ret ; @@ -141,7 +141,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t -; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; RV64-NEXT: vsext.vf8 v8, v10 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv2i8.nxv2p0i8( %ptrs, %m, i32 %evl) @@ -154,7 +154,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; RV32-NEXT: vzext.vf8 v8, v10 ; RV32-NEXT: ret ; @@ -162,7 +162,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t -; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; RV64-NEXT: vzext.vf8 v8, v10 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv2i8.nxv2p0i8( %ptrs, %m, i32 %evl) @@ -193,14 +193,14 @@ define @vpgather_truemask_nxv4i8( %ptrs, i32 zeroext %evl) { ; RV32-LABEL: vpgather_truemask_nxv4i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; RV32-NEXT: vluxei32.v v10, (zero), v8 ; RV32-NEXT: vmv1r.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_truemask_nxv4i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; RV64-NEXT: vluxei64.v v12, (zero), v8 ; RV64-NEXT: vmv1r.v v8, v12 ; RV64-NEXT: ret @@ -233,7 +233,7 @@ define @vpgather_baseidx_nxv8i8(i8* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_nxv8i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t @@ -241,7 +241,7 @@ ; ; RV64-LABEL: vpgather_baseidx_nxv8i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t @@ -260,7 +260,7 @@ ; RV32-NEXT: li a3, 0 ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: srli a5, a2, 2 -; RV32-NEXT: vsetvli a4, zero, e8, mf2, ta, mu +; RV32-NEXT: vsetvli a4, zero, e8, mf2, ta, ma ; RV32-NEXT: slli a2, a2, 1 ; RV32-NEXT: sub a4, a1, a2 ; RV32-NEXT: vslidedown.vx v0, v0, a5 @@ -268,7 +268,7 @@ ; RV32-NEXT: # %bb.1: ; RV32-NEXT: mv a3, a4 ; RV32-NEXT: .LBB12_2: -; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, mu +; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma ; RV32-NEXT: vsext.vf4 v24, v10 ; RV32-NEXT: vsetvli zero, a3, e8, m2, ta, mu ; RV32-NEXT: vluxei32.v v18, (a0), v24, v0.t @@ -276,7 +276,7 @@ ; RV32-NEXT: # %bb.3: ; RV32-NEXT: mv a1, a2 ; RV32-NEXT: .LBB12_4: -; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma ; RV32-NEXT: vsext.vf4 v24, v8 ; RV32-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; RV32-NEXT: vmv1r.v v0, v12 @@ -303,12 +303,12 @@ ; RV64-NEXT: mv a7, a6 ; RV64-NEXT: .LBB12_4: ; RV64-NEXT: srli a6, a3, 2 -; RV64-NEXT: vsetvli t0, zero, e8, mf2, ta, mu +; RV64-NEXT: vsetvli t0, zero, e8, mf2, ta, ma ; RV64-NEXT: vslidedown.vx v13, v12, a6 ; RV64-NEXT: srli a6, a3, 3 -; RV64-NEXT: vsetvli t0, zero, e8, mf4, ta, mu +; RV64-NEXT: vsetvli t0, zero, e8, mf4, ta, ma ; RV64-NEXT: vslidedown.vx v0, v13, a6 -; RV64-NEXT: vsetvli t0, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli t0, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v24, v11 ; RV64-NEXT: vsetvli zero, a7, e8, m1, ta, mu ; RV64-NEXT: vluxei64.v v19, (a0), v24, v0.t @@ -321,9 +321,9 @@ ; RV64-NEXT: # %bb.7: ; RV64-NEXT: mv a4, a5 ; RV64-NEXT: .LBB12_8: -; RV64-NEXT: vsetvli a5, zero, e8, mf4, ta, mu +; RV64-NEXT: vsetvli a5, zero, e8, mf4, ta, ma ; RV64-NEXT: vslidedown.vx v0, v12, a6 -; RV64-NEXT: vsetvli a5, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a5, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v24, v9 ; RV64-NEXT: vsetvli zero, a4, e8, m1, ta, mu ; RV64-NEXT: vluxei64.v v17, (a0), v24, v0.t @@ -331,7 +331,7 @@ ; RV64-NEXT: # %bb.9: ; RV64-NEXT: mv a1, a3 ; RV64-NEXT: .LBB12_10: -; RV64-NEXT: vsetvli a4, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a4, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v24, v8 ; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; RV64-NEXT: vmv1r.v v0, v12 @@ -340,7 +340,7 @@ ; RV64-NEXT: # %bb.11: ; RV64-NEXT: mv a2, a3 ; RV64-NEXT: .LBB12_12: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v24, v10 ; RV64-NEXT: vsetvli zero, a2, e8, m1, ta, mu ; RV64-NEXT: vmv1r.v v0, v13 @@ -397,7 +397,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV32-NEXT: vsext.vf2 v8, v9 ; RV32-NEXT: ret ; @@ -405,7 +405,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t -; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV64-NEXT: vsext.vf2 v8, v10 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv2i16.nxv2p0i16( %ptrs, %m, i32 %evl) @@ -418,7 +418,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV32-NEXT: vzext.vf2 v8, v9 ; RV32-NEXT: ret ; @@ -426,7 +426,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t -; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV64-NEXT: vzext.vf2 v8, v10 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv2i16.nxv2p0i16( %ptrs, %m, i32 %evl) @@ -439,7 +439,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; RV32-NEXT: vsext.vf4 v8, v10 ; RV32-NEXT: ret ; @@ -447,7 +447,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t -; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; RV64-NEXT: vsext.vf4 v8, v10 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv2i16.nxv2p0i16( %ptrs, %m, i32 %evl) @@ -460,7 +460,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; RV32-NEXT: vzext.vf4 v8, v10 ; RV32-NEXT: ret ; @@ -468,7 +468,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t -; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; RV64-NEXT: vzext.vf4 v8, v10 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv2i16.nxv2p0i16( %ptrs, %m, i32 %evl) @@ -499,14 +499,14 @@ define @vpgather_truemask_nxv4i16( %ptrs, i32 zeroext %evl) { ; RV32-LABEL: vpgather_truemask_nxv4i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; RV32-NEXT: vluxei32.v v10, (zero), v8 ; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_truemask_nxv4i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; RV64-NEXT: vluxei64.v v12, (zero), v8 ; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret @@ -539,7 +539,7 @@ define @vpgather_baseidx_nxv8i8_nxv8i16(i16* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_nxv8i8_nxv8i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vadd.vv v12, v12, v12 ; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu @@ -548,7 +548,7 @@ ; ; RV64-LABEL: vpgather_baseidx_nxv8i8_nxv8i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu @@ -562,7 +562,7 @@ define @vpgather_baseidx_sext_nxv8i8_nxv8i16(i16* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_sext_nxv8i8_nxv8i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vadd.vv v12, v12, v12 ; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu @@ -571,7 +571,7 @@ ; ; RV64-LABEL: vpgather_baseidx_sext_nxv8i8_nxv8i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu @@ -586,7 +586,7 @@ define @vpgather_baseidx_zext_nxv8i8_nxv8i16(i16* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf4 v12, v8 ; RV32-NEXT: vadd.vv v12, v12, v12 ; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu @@ -595,7 +595,7 @@ ; ; RV64-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf8 v16, v8 ; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu @@ -610,7 +610,7 @@ define @vpgather_baseidx_nxv8i16(i16* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_nxv8i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v12, v8 ; RV32-NEXT: vadd.vv v12, v12, v12 ; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu @@ -619,7 +619,7 @@ ; ; RV64-LABEL: vpgather_baseidx_nxv8i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v16, v8 ; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu @@ -673,7 +673,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; RV32-NEXT: vsext.vf2 v8, v10 ; RV32-NEXT: ret ; @@ -681,7 +681,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t -; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; RV64-NEXT: vsext.vf2 v8, v10 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv2i32.nxv2p0i32( %ptrs, %m, i32 %evl) @@ -694,7 +694,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; RV32-NEXT: vzext.vf2 v8, v10 ; RV32-NEXT: ret ; @@ -702,7 +702,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t -; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; RV64-NEXT: vzext.vf2 v8, v10 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv2i32.nxv2p0i32( %ptrs, %m, i32 %evl) @@ -732,13 +732,13 @@ define @vpgather_truemask_nxv4i32( %ptrs, i32 zeroext %evl) { ; RV32-LABEL: vpgather_truemask_nxv4i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; RV32-NEXT: vluxei32.v v8, (zero), v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_truemask_nxv4i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; RV64-NEXT: vluxei64.v v12, (zero), v8 ; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret @@ -770,7 +770,7 @@ define @vpgather_baseidx_nxv8i8_nxv8i32(i32* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_nxv8i8_nxv8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu @@ -779,7 +779,7 @@ ; ; RV64-LABEL: vpgather_baseidx_nxv8i8_nxv8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu @@ -793,7 +793,7 @@ define @vpgather_baseidx_sext_nxv8i8_nxv8i32(i32* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_sext_nxv8i8_nxv8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu @@ -802,7 +802,7 @@ ; ; RV64-LABEL: vpgather_baseidx_sext_nxv8i8_nxv8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu @@ -817,7 +817,7 @@ define @vpgather_baseidx_zext_nxv8i8_nxv8i32(i32* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu @@ -826,7 +826,7 @@ ; ; RV64-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf8 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu @@ -841,7 +841,7 @@ define @vpgather_baseidx_nxv8i16_nxv8i32(i32* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_nxv8i16_nxv8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu @@ -850,7 +850,7 @@ ; ; RV64-LABEL: vpgather_baseidx_nxv8i16_nxv8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu @@ -864,7 +864,7 @@ define @vpgather_baseidx_sext_nxv8i16_nxv8i32(i32* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_sext_nxv8i16_nxv8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu @@ -873,7 +873,7 @@ ; ; RV64-LABEL: vpgather_baseidx_sext_nxv8i16_nxv8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu @@ -888,7 +888,7 @@ define @vpgather_baseidx_zext_nxv8i16_nxv8i32(i32* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_zext_nxv8i16_nxv8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu @@ -897,7 +897,7 @@ ; ; RV64-LABEL: vpgather_baseidx_zext_nxv8i16_nxv8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf4 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu @@ -912,7 +912,7 @@ define @vpgather_baseidx_nxv8i32(i32* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_nxv8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsll.vi v8, v8, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t @@ -920,7 +920,7 @@ ; ; RV64-LABEL: vpgather_baseidx_nxv8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf2 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu @@ -991,14 +991,14 @@ define @vpgather_truemask_nxv4i64( %ptrs, i32 zeroext %evl) { ; RV32-LABEL: vpgather_truemask_nxv4i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; RV32-NEXT: vluxei32.v v12, (zero), v8 ; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_truemask_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; RV64-NEXT: vluxei64.v v8, (zero), v8 ; RV64-NEXT: ret %mhead = insertelement poison, i1 1, i32 0 @@ -1029,7 +1029,7 @@ define @vpgather_baseidx_nxv8i8_nxv8i64(i64* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_nxv8i8_nxv8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v16, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu @@ -1038,7 +1038,7 @@ ; ; RV64-LABEL: vpgather_baseidx_nxv8i8_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu @@ -1052,7 +1052,7 @@ define @vpgather_baseidx_sext_nxv8i8_nxv8i64(i64* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_sext_nxv8i8_nxv8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v16, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu @@ -1061,7 +1061,7 @@ ; ; RV64-LABEL: vpgather_baseidx_sext_nxv8i8_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu @@ -1076,7 +1076,7 @@ define @vpgather_baseidx_zext_nxv8i8_nxv8i64(i64* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v16, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu @@ -1085,7 +1085,7 @@ ; ; RV64-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf8 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu @@ -1100,7 +1100,7 @@ define @vpgather_baseidx_nxv8i16_nxv8i64(i64* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_nxv8i16_nxv8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v16, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu @@ -1109,7 +1109,7 @@ ; ; RV64-LABEL: vpgather_baseidx_nxv8i16_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu @@ -1123,7 +1123,7 @@ define @vpgather_baseidx_sext_nxv8i16_nxv8i64(i64* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_sext_nxv8i16_nxv8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v16, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu @@ -1132,7 +1132,7 @@ ; ; RV64-LABEL: vpgather_baseidx_sext_nxv8i16_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu @@ -1147,7 +1147,7 @@ define @vpgather_baseidx_zext_nxv8i16_nxv8i64(i64* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_zext_nxv8i16_nxv8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v16, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu @@ -1156,7 +1156,7 @@ ; ; RV64-LABEL: vpgather_baseidx_zext_nxv8i16_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf4 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu @@ -1171,7 +1171,7 @@ define @vpgather_baseidx_nxv8i32_nxv8i64(i64* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_nxv8i32_nxv8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsll.vi v16, v8, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t @@ -1179,7 +1179,7 @@ ; ; RV64-LABEL: vpgather_baseidx_nxv8i32_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf2 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu @@ -1193,7 +1193,7 @@ define @vpgather_baseidx_sext_nxv8i32_nxv8i64(i64* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_sext_nxv8i32_nxv8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsll.vi v16, v8, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t @@ -1201,7 +1201,7 @@ ; ; RV64-LABEL: vpgather_baseidx_sext_nxv8i32_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf2 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu @@ -1216,7 +1216,7 @@ define @vpgather_baseidx_zext_nxv8i32_nxv8i64(i64* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_zext_nxv8i32_nxv8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsll.vi v16, v8, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t @@ -1224,7 +1224,7 @@ ; ; RV64-LABEL: vpgather_baseidx_zext_nxv8i32_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf2 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu @@ -1239,7 +1239,7 @@ define @vpgather_baseidx_nxv8i64(i64* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_nxv8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vnsrl.wi v16, v8, 0 ; RV32-NEXT: vsll.vi v16, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu @@ -1248,7 +1248,7 @@ ; ; RV64-LABEL: vpgather_baseidx_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsll.vi v8, v8, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t @@ -1321,14 +1321,14 @@ define @vpgather_truemask_nxv4f16( %ptrs, i32 zeroext %evl) { ; RV32-LABEL: vpgather_truemask_nxv4f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; RV32-NEXT: vluxei32.v v10, (zero), v8 ; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_truemask_nxv4f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; RV64-NEXT: vluxei64.v v12, (zero), v8 ; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret @@ -1361,7 +1361,7 @@ define @vpgather_baseidx_nxv8i8_nxv8f16(half* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_nxv8i8_nxv8f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vadd.vv v12, v12, v12 ; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu @@ -1370,7 +1370,7 @@ ; ; RV64-LABEL: vpgather_baseidx_nxv8i8_nxv8f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu @@ -1384,7 +1384,7 @@ define @vpgather_baseidx_sext_nxv8i8_nxv8f16(half* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_sext_nxv8i8_nxv8f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vadd.vv v12, v12, v12 ; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu @@ -1393,7 +1393,7 @@ ; ; RV64-LABEL: vpgather_baseidx_sext_nxv8i8_nxv8f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu @@ -1408,7 +1408,7 @@ define @vpgather_baseidx_zext_nxv8i8_nxv8f16(half* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf4 v12, v8 ; RV32-NEXT: vadd.vv v12, v12, v12 ; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu @@ -1417,7 +1417,7 @@ ; ; RV64-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf8 v16, v8 ; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu @@ -1432,7 +1432,7 @@ define @vpgather_baseidx_nxv8f16(half* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_nxv8f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v12, v8 ; RV32-NEXT: vadd.vv v12, v12, v12 ; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu @@ -1441,7 +1441,7 @@ ; ; RV64-LABEL: vpgather_baseidx_nxv8f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v16, v8 ; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu @@ -1512,13 +1512,13 @@ define @vpgather_truemask_nxv4f32( %ptrs, i32 zeroext %evl) { ; RV32-LABEL: vpgather_truemask_nxv4f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; RV32-NEXT: vluxei32.v v8, (zero), v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_truemask_nxv4f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; RV64-NEXT: vluxei64.v v12, (zero), v8 ; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret @@ -1550,7 +1550,7 @@ define @vpgather_baseidx_nxv8i8_nxv8f32(float* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_nxv8i8_nxv8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu @@ -1559,7 +1559,7 @@ ; ; RV64-LABEL: vpgather_baseidx_nxv8i8_nxv8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu @@ -1573,7 +1573,7 @@ define @vpgather_baseidx_sext_nxv8i8_nxv8f32(float* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_sext_nxv8i8_nxv8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu @@ -1582,7 +1582,7 @@ ; ; RV64-LABEL: vpgather_baseidx_sext_nxv8i8_nxv8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu @@ -1597,7 +1597,7 @@ define @vpgather_baseidx_zext_nxv8i8_nxv8f32(float* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu @@ -1606,7 +1606,7 @@ ; ; RV64-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf8 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu @@ -1621,7 +1621,7 @@ define @vpgather_baseidx_nxv8i16_nxv8f32(float* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_nxv8i16_nxv8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu @@ -1630,7 +1630,7 @@ ; ; RV64-LABEL: vpgather_baseidx_nxv8i16_nxv8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu @@ -1644,7 +1644,7 @@ define @vpgather_baseidx_sext_nxv8i16_nxv8f32(float* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_sext_nxv8i16_nxv8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu @@ -1653,7 +1653,7 @@ ; ; RV64-LABEL: vpgather_baseidx_sext_nxv8i16_nxv8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu @@ -1668,7 +1668,7 @@ define @vpgather_baseidx_zext_nxv8i16_nxv8f32(float* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_zext_nxv8i16_nxv8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu @@ -1677,7 +1677,7 @@ ; ; RV64-LABEL: vpgather_baseidx_zext_nxv8i16_nxv8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf4 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu @@ -1692,7 +1692,7 @@ define @vpgather_baseidx_nxv8f32(float* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_nxv8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsll.vi v8, v8, 2 ; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t @@ -1700,7 +1700,7 @@ ; ; RV64-LABEL: vpgather_baseidx_nxv8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf2 v16, v8 ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu @@ -1771,14 +1771,14 @@ define @vpgather_truemask_nxv4f64( %ptrs, i32 zeroext %evl) { ; RV32-LABEL: vpgather_truemask_nxv4f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; RV32-NEXT: vluxei32.v v12, (zero), v8 ; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_truemask_nxv4f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; RV64-NEXT: vluxei64.v v8, (zero), v8 ; RV64-NEXT: ret %mhead = insertelement poison, i1 1, i32 0 @@ -1809,7 +1809,7 @@ define @vpgather_baseidx_nxv6i8_nxv6f64(double* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_nxv6i8_nxv6f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v16, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu @@ -1818,7 +1818,7 @@ ; ; RV64-LABEL: vpgather_baseidx_nxv6i8_nxv6f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu @@ -1832,7 +1832,7 @@ define @vpgather_baseidx_sext_nxv6i8_nxv6f64(double* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_sext_nxv6i8_nxv6f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v16, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu @@ -1841,7 +1841,7 @@ ; ; RV64-LABEL: vpgather_baseidx_sext_nxv6i8_nxv6f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu @@ -1856,7 +1856,7 @@ define @vpgather_baseidx_zext_nxv6i8_nxv6f64(double* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_zext_nxv6i8_nxv6f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v16, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu @@ -1865,7 +1865,7 @@ ; ; RV64-LABEL: vpgather_baseidx_zext_nxv6i8_nxv6f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf8 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu @@ -1880,7 +1880,7 @@ define @vpgather_baseidx_nxv6i16_nxv6f64(double* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_nxv6i16_nxv6f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v16, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu @@ -1889,7 +1889,7 @@ ; ; RV64-LABEL: vpgather_baseidx_nxv6i16_nxv6f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu @@ -1903,7 +1903,7 @@ define @vpgather_baseidx_sext_nxv6i16_nxv6f64(double* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_sext_nxv6i16_nxv6f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v16, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu @@ -1912,7 +1912,7 @@ ; ; RV64-LABEL: vpgather_baseidx_sext_nxv6i16_nxv6f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu @@ -1927,7 +1927,7 @@ define @vpgather_baseidx_zext_nxv6i16_nxv6f64(double* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_zext_nxv6i16_nxv6f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v16, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu @@ -1936,7 +1936,7 @@ ; ; RV64-LABEL: vpgather_baseidx_zext_nxv6i16_nxv6f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf4 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu @@ -1951,7 +1951,7 @@ define @vpgather_baseidx_nxv6i32_nxv6f64(double* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_nxv6i32_nxv6f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsll.vi v16, v8, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t @@ -1959,7 +1959,7 @@ ; ; RV64-LABEL: vpgather_baseidx_nxv6i32_nxv6f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf2 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu @@ -1973,7 +1973,7 @@ define @vpgather_baseidx_sext_nxv6i32_nxv6f64(double* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_sext_nxv6i32_nxv6f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsll.vi v16, v8, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t @@ -1981,7 +1981,7 @@ ; ; RV64-LABEL: vpgather_baseidx_sext_nxv6i32_nxv6f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf2 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu @@ -1996,7 +1996,7 @@ define @vpgather_baseidx_zext_nxv6i32_nxv6f64(double* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_zext_nxv6i32_nxv6f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsll.vi v16, v8, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t @@ -2004,7 +2004,7 @@ ; ; RV64-LABEL: vpgather_baseidx_zext_nxv6i32_nxv6f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf2 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu @@ -2019,7 +2019,7 @@ define @vpgather_baseidx_nxv6f64(double* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_nxv6f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vnsrl.wi v16, v8, 0 ; RV32-NEXT: vsll.vi v16, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu @@ -2028,7 +2028,7 @@ ; ; RV64-LABEL: vpgather_baseidx_nxv6f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsll.vi v8, v8, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t @@ -2060,7 +2060,7 @@ define @vpgather_baseidx_nxv8i8_nxv8f64(double* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_nxv8i8_nxv8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v16, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu @@ -2069,7 +2069,7 @@ ; ; RV64-LABEL: vpgather_baseidx_nxv8i8_nxv8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu @@ -2083,7 +2083,7 @@ define @vpgather_baseidx_sext_nxv8i8_nxv8f64(double* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_sext_nxv8i8_nxv8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v16, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu @@ -2092,7 +2092,7 @@ ; ; RV64-LABEL: vpgather_baseidx_sext_nxv8i8_nxv8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu @@ -2107,7 +2107,7 @@ define @vpgather_baseidx_zext_nxv8i8_nxv8f64(double* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v16, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu @@ -2116,7 +2116,7 @@ ; ; RV64-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf8 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu @@ -2131,7 +2131,7 @@ define @vpgather_baseidx_nxv8i16_nxv8f64(double* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_nxv8i16_nxv8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v16, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu @@ -2140,7 +2140,7 @@ ; ; RV64-LABEL: vpgather_baseidx_nxv8i16_nxv8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu @@ -2154,7 +2154,7 @@ define @vpgather_baseidx_sext_nxv8i16_nxv8f64(double* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_sext_nxv8i16_nxv8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v16, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu @@ -2163,7 +2163,7 @@ ; ; RV64-LABEL: vpgather_baseidx_sext_nxv8i16_nxv8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu @@ -2178,7 +2178,7 @@ define @vpgather_baseidx_zext_nxv8i16_nxv8f64(double* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_zext_nxv8i16_nxv8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v16, v12, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu @@ -2187,7 +2187,7 @@ ; ; RV64-LABEL: vpgather_baseidx_zext_nxv8i16_nxv8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf4 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu @@ -2202,7 +2202,7 @@ define @vpgather_baseidx_nxv8i32_nxv8f64(double* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_nxv8i32_nxv8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsll.vi v16, v8, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t @@ -2210,7 +2210,7 @@ ; ; RV64-LABEL: vpgather_baseidx_nxv8i32_nxv8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf2 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu @@ -2224,7 +2224,7 @@ define @vpgather_baseidx_sext_nxv8i32_nxv8f64(double* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_sext_nxv8i32_nxv8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsll.vi v16, v8, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t @@ -2232,7 +2232,7 @@ ; ; RV64-LABEL: vpgather_baseidx_sext_nxv8i32_nxv8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf2 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu @@ -2247,7 +2247,7 @@ define @vpgather_baseidx_zext_nxv8i32_nxv8f64(double* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_zext_nxv8i32_nxv8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsll.vi v16, v8, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t @@ -2255,7 +2255,7 @@ ; ; RV64-LABEL: vpgather_baseidx_zext_nxv8i32_nxv8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf2 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu @@ -2270,7 +2270,7 @@ define @vpgather_baseidx_nxv8f64(double* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpgather_baseidx_nxv8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vnsrl.wi v16, v8, 0 ; RV32-NEXT: vsll.vi v16, v16, 3 ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu @@ -2279,7 +2279,7 @@ ; ; RV64-LABEL: vpgather_baseidx_nxv8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsll.vi v8, v8, 3 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t @@ -2298,7 +2298,7 @@ ; RV32-NEXT: li a2, 0 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: srli a4, a1, 3 -; RV32-NEXT: vsetvli a3, zero, e8, mf4, ta, mu +; RV32-NEXT: vsetvli a3, zero, e8, mf4, ta, ma ; RV32-NEXT: sub a3, a0, a1 ; RV32-NEXT: vslidedown.vx v0, v0, a4 ; RV32-NEXT: bltu a0, a3, .LBB102_2 @@ -2323,7 +2323,7 @@ ; RV64-NEXT: li a2, 0 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: srli a4, a1, 3 -; RV64-NEXT: vsetvli a3, zero, e8, mf4, ta, mu +; RV64-NEXT: vsetvli a3, zero, e8, mf4, ta, ma ; RV64-NEXT: sub a3, a0, a1 ; RV64-NEXT: vslidedown.vx v0, v0, a4 ; RV64-NEXT: bltu a0, a3, .LBB102_2 @@ -2349,12 +2349,12 @@ ; RV32: # %bb.0: ; RV32-NEXT: vmv1r.v v12, v0 ; RV32-NEXT: li a3, 0 -; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma ; RV32-NEXT: vsext.vf2 v16, v8 ; RV32-NEXT: vsll.vi v24, v16, 3 ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: srli a5, a2, 3 -; RV32-NEXT: vsetvli a4, zero, e8, mf4, ta, mu +; RV32-NEXT: vsetvli a4, zero, e8, mf4, ta, ma ; RV32-NEXT: sub a4, a1, a2 ; RV32-NEXT: vslidedown.vx v0, v0, a5 ; RV32-NEXT: bltu a1, a4, .LBB103_2 @@ -2376,20 +2376,20 @@ ; RV64: # %bb.0: ; RV64-NEXT: vmv1r.v v12, v0 ; RV64-NEXT: li a3, 0 -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v24, v8 ; RV64-NEXT: vsext.vf4 v16, v10 ; RV64-NEXT: vsll.vi v16, v16, 3 ; RV64-NEXT: csrr a2, vlenb ; RV64-NEXT: srli a5, a2, 3 -; RV64-NEXT: vsetvli a4, zero, e8, mf4, ta, mu +; RV64-NEXT: vsetvli a4, zero, e8, mf4, ta, ma ; RV64-NEXT: sub a4, a1, a2 ; RV64-NEXT: vslidedown.vx v0, v0, a5 ; RV64-NEXT: bltu a1, a4, .LBB103_2 ; RV64-NEXT: # %bb.1: ; RV64-NEXT: mv a3, a4 ; RV64-NEXT: .LBB103_2: -; RV64-NEXT: vsetvli a4, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a4, zero, e64, m8, ta, ma ; RV64-NEXT: vsll.vi v24, v24, 3 ; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, mu ; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t @@ -2411,12 +2411,12 @@ ; RV32: # %bb.0: ; RV32-NEXT: vmv1r.v v12, v0 ; RV32-NEXT: li a3, 0 -; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma ; RV32-NEXT: vsext.vf2 v16, v8 ; RV32-NEXT: vsll.vi v24, v16, 3 ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: srli a5, a2, 3 -; RV32-NEXT: vsetvli a4, zero, e8, mf4, ta, mu +; RV32-NEXT: vsetvli a4, zero, e8, mf4, ta, ma ; RV32-NEXT: sub a4, a1, a2 ; RV32-NEXT: vslidedown.vx v0, v0, a5 ; RV32-NEXT: bltu a1, a4, .LBB104_2 @@ -2438,20 +2438,20 @@ ; RV64: # %bb.0: ; RV64-NEXT: vmv1r.v v12, v0 ; RV64-NEXT: li a3, 0 -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v16, v10 ; RV64-NEXT: vsext.vf4 v24, v8 ; RV64-NEXT: vsll.vi v16, v16, 3 ; RV64-NEXT: csrr a2, vlenb ; RV64-NEXT: srli a5, a2, 3 -; RV64-NEXT: vsetvli a4, zero, e8, mf4, ta, mu +; RV64-NEXT: vsetvli a4, zero, e8, mf4, ta, ma ; RV64-NEXT: sub a4, a1, a2 ; RV64-NEXT: vslidedown.vx v0, v0, a5 ; RV64-NEXT: bltu a1, a4, .LBB104_2 ; RV64-NEXT: # %bb.1: ; RV64-NEXT: mv a3, a4 ; RV64-NEXT: .LBB104_2: -; RV64-NEXT: vsetvli a4, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a4, zero, e64, m8, ta, ma ; RV64-NEXT: vsll.vi v24, v24, 3 ; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, mu ; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t @@ -2474,12 +2474,12 @@ ; RV32: # %bb.0: ; RV32-NEXT: vmv1r.v v12, v0 ; RV32-NEXT: li a3, 0 -; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma ; RV32-NEXT: vzext.vf2 v16, v8 ; RV32-NEXT: vsll.vi v24, v16, 3 ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: srli a5, a2, 3 -; RV32-NEXT: vsetvli a4, zero, e8, mf4, ta, mu +; RV32-NEXT: vsetvli a4, zero, e8, mf4, ta, ma ; RV32-NEXT: sub a4, a1, a2 ; RV32-NEXT: vslidedown.vx v0, v0, a5 ; RV32-NEXT: bltu a1, a4, .LBB105_2 @@ -2501,20 +2501,20 @@ ; RV64: # %bb.0: ; RV64-NEXT: vmv1r.v v12, v0 ; RV64-NEXT: li a3, 0 -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf4 v16, v10 ; RV64-NEXT: vzext.vf4 v24, v8 ; RV64-NEXT: vsll.vi v16, v16, 3 ; RV64-NEXT: csrr a2, vlenb ; RV64-NEXT: srli a5, a2, 3 -; RV64-NEXT: vsetvli a4, zero, e8, mf4, ta, mu +; RV64-NEXT: vsetvli a4, zero, e8, mf4, ta, ma ; RV64-NEXT: sub a4, a1, a2 ; RV64-NEXT: vslidedown.vx v0, v0, a5 ; RV64-NEXT: bltu a1, a4, .LBB105_2 ; RV64-NEXT: # %bb.1: ; RV64-NEXT: mv a3, a4 ; RV64-NEXT: .LBB105_2: -; RV64-NEXT: vsetvli a4, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a4, zero, e64, m8, ta, ma ; RV64-NEXT: vsll.vi v24, v24, 3 ; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, mu ; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/vpload.ll b/llvm/test/CodeGen/RISCV/rvv/vpload.ll --- a/llvm/test/CodeGen/RISCV/rvv/vpload.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vpload.ll @@ -19,7 +19,7 @@ define @vpload_nxv1i8_allones_mask(* %ptr, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv1i8_allones_mask: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: ret %a = insertelement poison, i1 true, i32 0 @@ -79,7 +79,7 @@ define @vpload_nxv8i8_allones_mask(* %ptr, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv8i8_allones_mask: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: ret %a = insertelement poison, i1 true, i32 0 @@ -115,7 +115,7 @@ define @vpload_nxv2i16_allones_mask(* %ptr, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv2i16_allones_mask: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: ret %a = insertelement poison, i1 true, i32 0 @@ -187,7 +187,7 @@ define @vpload_nxv4i32_allones_mask(* %ptr, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv4i32_allones_mask: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: ret %a = insertelement poison, i1 true, i32 0 @@ -223,7 +223,7 @@ define @vpload_nxv1i64_allones_mask(* %ptr, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv1i64_allones_mask: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: ret %a = insertelement poison, i1 true, i32 0 @@ -295,7 +295,7 @@ define @vpload_nxv2f16_allones_mask(* %ptr, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv2f16_allones_mask: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: ret %a = insertelement poison, i1 true, i32 0 @@ -379,7 +379,7 @@ define @vpload_nxv8f32_allones_mask(* %ptr, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv8f32_allones_mask: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: ret %a = insertelement poison, i1 true, i32 0 @@ -427,7 +427,7 @@ define @vpload_nxv4f64_allones_mask(* %ptr, i32 zeroext %evl) { ; CHECK-LABEL: vpload_nxv4f64_allones_mask: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: ret %a = insertelement poison, i1 true, i32 0 @@ -457,7 +457,7 @@ ; CHECK-NEXT: li a3, 0 ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: srli a5, a2, 3 -; CHECK-NEXT: vsetvli a4, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a4, zero, e8, mf4, ta, ma ; CHECK-NEXT: sub a4, a1, a2 ; CHECK-NEXT: vslidedown.vx v0, v0, a5 ; CHECK-NEXT: bltu a1, a4, .LBB37_2 @@ -510,7 +510,7 @@ ; CHECK-NEXT: .LBB38_4: ; CHECK-NEXT: li a7, 0 ; CHECK-NEXT: srli t0, a3, 3 -; CHECK-NEXT: vsetvli t1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli t1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v8, t0 ; CHECK-NEXT: slli t0, a3, 3 ; CHECK-NEXT: add t0, a0, t0 @@ -523,7 +523,7 @@ ; CHECK-NEXT: # %bb.5: ; CHECK-NEXT: mv a7, t0 ; CHECK-NEXT: .LBB38_6: -; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v8, a6 ; CHECK-NEXT: add a2, a0, a5 ; CHECK-NEXT: bltu a7, a3, .LBB38_8 diff --git a/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll @@ -9,7 +9,7 @@ define @vpmerge_nxv1i1( %va, %vb, %m, i32 zeroext %evl) { ; RV32-LABEL: vpmerge_nxv1i1: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; RV32-NEXT: vid.v v10 ; RV32-NEXT: vmsltu.vx v10, v10, a0 ; RV32-NEXT: vmand.mm v9, v9, v10 @@ -20,7 +20,7 @@ ; ; RV64-LABEL: vpmerge_nxv1i1: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV64-NEXT: vid.v v10 ; RV64-NEXT: vmsltu.vx v10, v10, a0 ; RV64-NEXT: vmand.mm v9, v9, v10 @@ -37,7 +37,7 @@ define @vpmerge_vv_nxv1i8( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vmerge.vvm v9, v9, v8, v0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -48,7 +48,7 @@ define @vpmerge_vx_nxv1i8(i8 %a, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vx_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %a, i32 0 @@ -60,7 +60,7 @@ define @vpmerge_vi_nxv1i8( %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vi_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vmerge.vim v8, v8, 2, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 2, i32 0 @@ -74,7 +74,7 @@ define @vpmerge_vv_nxv2i8( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vmerge.vvm v9, v9, v8, v0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -85,7 +85,7 @@ define @vpmerge_vx_nxv2i8(i8 %a, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vx_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %a, i32 0 @@ -97,7 +97,7 @@ define @vpmerge_vi_nxv2i8( %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vi_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vmerge.vim v8, v8, 2, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 2, i32 0 @@ -111,7 +111,7 @@ define @vpmerge_vv_nxv3i8( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv3i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vmerge.vvm v9, v9, v8, v0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -122,7 +122,7 @@ define @vpmerge_vx_nxv3i8(i8 %a, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vx_nxv3i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %a, i32 0 @@ -134,7 +134,7 @@ define @vpmerge_vi_nxv3i8( %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vi_nxv3i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vmerge.vim v8, v8, 2, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 2, i32 0 @@ -148,7 +148,7 @@ define @vpmerge_vv_nxv4i8( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vmerge.vvm v9, v9, v8, v0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -159,7 +159,7 @@ define @vpmerge_vx_nxv4i8(i8 %a, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vx_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %a, i32 0 @@ -171,7 +171,7 @@ define @vpmerge_vi_nxv4i8( %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vi_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vmerge.vim v8, v8, 2, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 2, i32 0 @@ -185,7 +185,7 @@ define @vpmerge_vv_nxv8i7( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv8i7: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vmerge.vvm v9, v9, v8, v0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -196,7 +196,7 @@ define @vpmerge_vx_nxv8i7(i7 %a, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vx_nxv8i7: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i7 %a, i32 0 @@ -208,7 +208,7 @@ define @vpmerge_vi_nxv8i7( %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vi_nxv8i7: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vmerge.vim v8, v8, 2, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i7 2, i32 0 @@ -222,7 +222,7 @@ define @vpmerge_vv_nxv8i8( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vmerge.vvm v9, v9, v8, v0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -233,7 +233,7 @@ define @vpmerge_vx_nxv8i8(i8 %a, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vx_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %a, i32 0 @@ -245,7 +245,7 @@ define @vpmerge_vi_nxv8i8( %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vi_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vmerge.vim v8, v8, 2, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 2, i32 0 @@ -259,7 +259,7 @@ define @vpmerge_vv_nxv16i8( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vmerge.vvm v10, v10, v8, v0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -270,7 +270,7 @@ define @vpmerge_vx_nxv16i8(i8 %a, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vx_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %a, i32 0 @@ -282,7 +282,7 @@ define @vpmerge_vi_nxv16i8( %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vi_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vmerge.vim v8, v8, 2, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 2, i32 0 @@ -296,7 +296,7 @@ define @vpmerge_vv_nxv32i8( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vmerge.vvm v12, v12, v8, v0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -307,7 +307,7 @@ define @vpmerge_vx_nxv32i8(i8 %a, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vx_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %a, i32 0 @@ -319,7 +319,7 @@ define @vpmerge_vi_nxv32i8( %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vi_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vmerge.vim v8, v8, 2, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 2, i32 0 @@ -333,7 +333,7 @@ define @vpmerge_vv_nxv64i8( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma ; CHECK-NEXT: vmerge.vvm v16, v16, v8, v0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -344,7 +344,7 @@ define @vpmerge_vx_nxv64i8(i8 %a, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vx_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %a, i32 0 @@ -356,7 +356,7 @@ define @vpmerge_vi_nxv64i8( %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vi_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma ; CHECK-NEXT: vmerge.vim v8, v8, 2, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 2, i32 0 @@ -385,7 +385,7 @@ ; RV32-NEXT: add a4, sp, a4 ; RV32-NEXT: addi a4, a4, 16 ; RV32-NEXT: vs8r.v v24, (a4) # Unknown-size Folded Spill -; RV32-NEXT: vsetvli a4, zero, e8, m8, ta, mu +; RV32-NEXT: vsetvli a4, zero, e8, m8, ta, ma ; RV32-NEXT: vlm.v v2, (a2) ; RV32-NEXT: sub a4, a3, a1 ; RV32-NEXT: vmv1r.v v1, v0 @@ -402,7 +402,7 @@ ; RV32-NEXT: mv a2, a4 ; RV32-NEXT: .LBB28_2: ; RV32-NEXT: vl8r.v v8, (a0) -; RV32-NEXT: vsetvli zero, a2, e8, m8, tu, mu +; RV32-NEXT: vsetvli zero, a2, e8, m8, tu, ma ; RV32-NEXT: vmv1r.v v0, v2 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 4 @@ -419,7 +419,7 @@ ; RV32-NEXT: # %bb.3: ; RV32-NEXT: mv a3, a1 ; RV32-NEXT: .LBB28_4: -; RV32-NEXT: vsetvli zero, a3, e8, m8, tu, mu +; RV32-NEXT: vsetvli zero, a3, e8, m8, tu, ma ; RV32-NEXT: vmv1r.v v0, v1 ; RV32-NEXT: addi a0, sp, 16 ; RV32-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload @@ -442,7 +442,7 @@ ; RV64-NEXT: slli a1, a1, 3 ; RV64-NEXT: add a4, a0, a1 ; RV64-NEXT: vl8r.v v24, (a4) -; RV64-NEXT: vsetvli a4, zero, e8, m8, ta, mu +; RV64-NEXT: vsetvli a4, zero, e8, m8, ta, ma ; RV64-NEXT: vlm.v v2, (a2) ; RV64-NEXT: sub a4, a3, a1 ; RV64-NEXT: vmv1r.v v1, v0 @@ -454,14 +454,14 @@ ; RV64-NEXT: mv a2, a4 ; RV64-NEXT: .LBB28_2: ; RV64-NEXT: vl8r.v v8, (a0) -; RV64-NEXT: vsetvli zero, a2, e8, m8, tu, mu +; RV64-NEXT: vsetvli zero, a2, e8, m8, tu, ma ; RV64-NEXT: vmv1r.v v0, v2 ; RV64-NEXT: vmerge.vvm v24, v24, v16, v0 ; RV64-NEXT: bltu a3, a1, .LBB28_4 ; RV64-NEXT: # %bb.3: ; RV64-NEXT: mv a3, a1 ; RV64-NEXT: .LBB28_4: -; RV64-NEXT: vsetvli zero, a3, e8, m8, tu, mu +; RV64-NEXT: vsetvli zero, a3, e8, m8, tu, ma ; RV64-NEXT: vmv1r.v v0, v1 ; RV64-NEXT: addi a0, sp, 16 ; RV64-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload @@ -487,16 +487,16 @@ ; CHECK-NEXT: mv a4, a3 ; CHECK-NEXT: .LBB29_2: ; CHECK-NEXT: li a5, 0 -; CHECK-NEXT: vsetvli a6, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a6, zero, e8, m8, ta, ma ; CHECK-NEXT: vlm.v v24, (a1) -; CHECK-NEXT: vsetvli zero, a4, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a4, e8, m8, tu, ma ; CHECK-NEXT: sub a1, a2, a3 ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: bltu a2, a1, .LBB29_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a5, a1 ; CHECK-NEXT: .LBB29_4: -; CHECK-NEXT: vsetvli zero, a5, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a5, e8, m8, tu, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vmerge.vxm v16, v16, a0, v0 ; CHECK-NEXT: ret @@ -517,16 +517,16 @@ ; CHECK-NEXT: mv a3, a2 ; CHECK-NEXT: .LBB30_2: ; CHECK-NEXT: li a4, 0 -; CHECK-NEXT: vsetvli a5, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a5, zero, e8, m8, ta, ma ; CHECK-NEXT: vlm.v v24, (a0) -; CHECK-NEXT: vsetvli zero, a3, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a3, e8, m8, tu, ma ; CHECK-NEXT: sub a0, a1, a2 ; CHECK-NEXT: vmerge.vim v8, v8, 2, v0 ; CHECK-NEXT: bltu a1, a0, .LBB30_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a4, a0 ; CHECK-NEXT: .LBB30_4: -; CHECK-NEXT: vsetvli zero, a4, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a4, e8, m8, tu, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vmerge.vim v16, v16, 2, v0 ; CHECK-NEXT: ret @@ -541,7 +541,7 @@ define @vpmerge_vv_nxv1i16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vmerge.vvm v9, v9, v8, v0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -552,7 +552,7 @@ define @vpmerge_vx_nxv1i16(i16 %a, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vx_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %a, i32 0 @@ -564,7 +564,7 @@ define @vpmerge_vi_nxv1i16( %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vi_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vmerge.vim v8, v8, 2, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 2, i32 0 @@ -578,7 +578,7 @@ define @vpmerge_vv_nxv2i16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vmerge.vvm v9, v9, v8, v0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -589,7 +589,7 @@ define @vpmerge_vx_nxv2i16(i16 %a, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vx_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %a, i32 0 @@ -601,7 +601,7 @@ define @vpmerge_vi_nxv2i16( %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vi_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vmerge.vim v8, v8, 2, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 2, i32 0 @@ -615,7 +615,7 @@ define @vpmerge_vv_nxv4i16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vmerge.vvm v9, v9, v8, v0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -626,7 +626,7 @@ define @vpmerge_vx_nxv4i16(i16 %a, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vx_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %a, i32 0 @@ -638,7 +638,7 @@ define @vpmerge_vi_nxv4i16( %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vi_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vmerge.vim v8, v8, 2, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 2, i32 0 @@ -652,7 +652,7 @@ define @vpmerge_vv_nxv8i16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vmerge.vvm v10, v10, v8, v0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -663,7 +663,7 @@ define @vpmerge_vx_nxv8i16(i16 %a, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vx_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %a, i32 0 @@ -675,7 +675,7 @@ define @vpmerge_vi_nxv8i16( %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vi_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vmerge.vim v8, v8, 2, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 2, i32 0 @@ -689,7 +689,7 @@ define @vpmerge_vv_nxv16i16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vmerge.vvm v12, v12, v8, v0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -700,7 +700,7 @@ define @vpmerge_vx_nxv16i16(i16 %a, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vx_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %a, i32 0 @@ -712,7 +712,7 @@ define @vpmerge_vi_nxv16i16( %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vi_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vmerge.vim v8, v8, 2, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 2, i32 0 @@ -726,7 +726,7 @@ define @vpmerge_vv_nxv32i16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vmerge.vvm v16, v16, v8, v0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -737,7 +737,7 @@ define @vpmerge_vx_nxv32i16(i16 %a, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vx_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %a, i32 0 @@ -749,7 +749,7 @@ define @vpmerge_vi_nxv32i16( %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vi_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vmerge.vim v8, v8, 2, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 2, i32 0 @@ -763,7 +763,7 @@ define @vpmerge_vv_nxv1i32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vmerge.vvm v9, v9, v8, v0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -774,7 +774,7 @@ define @vpmerge_vx_nxv1i32(i32 %a, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vx_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %a, i32 0 @@ -786,7 +786,7 @@ define @vpmerge_vi_nxv1i32( %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vi_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vmerge.vim v8, v8, 2, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 2, i32 0 @@ -800,7 +800,7 @@ define @vpmerge_vv_nxv2i32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vmerge.vvm v9, v9, v8, v0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -811,7 +811,7 @@ define @vpmerge_vx_nxv2i32(i32 %a, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vx_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %a, i32 0 @@ -823,7 +823,7 @@ define @vpmerge_vi_nxv2i32( %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vi_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vmerge.vim v8, v8, 2, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 2, i32 0 @@ -837,7 +837,7 @@ define @vpmerge_vv_nxv4i32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vmerge.vvm v10, v10, v8, v0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -848,7 +848,7 @@ define @vpmerge_vx_nxv4i32(i32 %a, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vx_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %a, i32 0 @@ -860,7 +860,7 @@ define @vpmerge_vi_nxv4i32( %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vi_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vmerge.vim v8, v8, 2, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 2, i32 0 @@ -874,7 +874,7 @@ define @vpmerge_vv_nxv8i32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vmerge.vvm v12, v12, v8, v0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -885,7 +885,7 @@ define @vpmerge_vx_nxv8i32(i32 %a, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %a, i32 0 @@ -897,7 +897,7 @@ define @vpmerge_vi_nxv8i32( %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vi_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vmerge.vim v8, v8, 2, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 2, i32 0 @@ -911,7 +911,7 @@ define @vpmerge_vv_nxv16i32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vmerge.vvm v16, v16, v8, v0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -922,7 +922,7 @@ define @vpmerge_vx_nxv16i32(i32 %a, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vx_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %a, i32 0 @@ -934,7 +934,7 @@ define @vpmerge_vi_nxv16i32( %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vi_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vmerge.vim v8, v8, 2, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 2, i32 0 @@ -948,7 +948,7 @@ define @vpmerge_vv_nxv1i64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vmerge.vvm v9, v9, v8, v0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -971,7 +971,7 @@ ; ; RV64-LABEL: vpmerge_vx_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma ; RV64-NEXT: vmerge.vxm v8, v8, a0, v0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %a, i32 0 @@ -983,7 +983,7 @@ define @vpmerge_vi_nxv1i64( %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vi_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vmerge.vim v8, v8, 2, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 2, i32 0 @@ -997,7 +997,7 @@ define @vpmerge_vv_nxv2i64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vmerge.vvm v10, v10, v8, v0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -1020,7 +1020,7 @@ ; ; RV64-LABEL: vpmerge_vx_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, ma ; RV64-NEXT: vmerge.vxm v8, v8, a0, v0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %a, i32 0 @@ -1032,7 +1032,7 @@ define @vpmerge_vi_nxv2i64( %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vi_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vmerge.vim v8, v8, 2, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 2, i32 0 @@ -1046,7 +1046,7 @@ define @vpmerge_vv_nxv4i64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vmerge.vvm v12, v12, v8, v0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -1069,7 +1069,7 @@ ; ; RV64-LABEL: vpmerge_vx_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, ma ; RV64-NEXT: vmerge.vxm v8, v8, a0, v0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %a, i32 0 @@ -1081,7 +1081,7 @@ define @vpmerge_vi_nxv4i64( %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vi_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vmerge.vim v8, v8, 2, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 2, i32 0 @@ -1095,7 +1095,7 @@ define @vpmerge_vv_nxv8i64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vmerge.vvm v16, v16, v8, v0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -1118,7 +1118,7 @@ ; ; RV64-LABEL: vpmerge_vx_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, tu, ma ; RV64-NEXT: vmerge.vxm v8, v8, a0, v0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %a, i32 0 @@ -1130,7 +1130,7 @@ define @vpmerge_vi_nxv8i64( %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vi_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vmerge.vim v8, v8, 2, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 2, i32 0 @@ -1144,7 +1144,7 @@ define @vpmerge_vv_nxv1f16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vmerge.vvm v9, v9, v8, v0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1155,7 +1155,7 @@ define @vpmerge_vf_nxv1f16(half %a, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vf_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %a, i32 0 @@ -1169,7 +1169,7 @@ define @vpmerge_vv_nxv2f16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vmerge.vvm v9, v9, v8, v0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1180,7 +1180,7 @@ define @vpmerge_vf_nxv2f16(half %a, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vf_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %a, i32 0 @@ -1194,7 +1194,7 @@ define @vpmerge_vv_nxv4f16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vmerge.vvm v9, v9, v8, v0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1205,7 +1205,7 @@ define @vpmerge_vf_nxv4f16(half %a, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vf_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %a, i32 0 @@ -1219,7 +1219,7 @@ define @vpmerge_vv_nxv8f16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vmerge.vvm v10, v10, v8, v0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -1230,7 +1230,7 @@ define @vpmerge_vf_nxv8f16(half %a, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vf_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %a, i32 0 @@ -1244,7 +1244,7 @@ define @vpmerge_vv_nxv16f16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vmerge.vvm v12, v12, v8, v0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -1255,7 +1255,7 @@ define @vpmerge_vf_nxv16f16(half %a, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vf_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %a, i32 0 @@ -1269,7 +1269,7 @@ define @vpmerge_vv_nxv32f16( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vmerge.vvm v16, v16, v8, v0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -1280,7 +1280,7 @@ define @vpmerge_vf_nxv32f16(half %a, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vf_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %a, i32 0 @@ -1294,7 +1294,7 @@ define @vpmerge_vv_nxv1f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vmerge.vvm v9, v9, v8, v0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1305,7 +1305,7 @@ define @vpmerge_vf_nxv1f32(float %a, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vf_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %a, i32 0 @@ -1319,7 +1319,7 @@ define @vpmerge_vv_nxv2f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vmerge.vvm v9, v9, v8, v0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1330,7 +1330,7 @@ define @vpmerge_vf_nxv2f32(float %a, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vf_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %a, i32 0 @@ -1344,7 +1344,7 @@ define @vpmerge_vv_nxv4f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vmerge.vvm v10, v10, v8, v0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -1355,7 +1355,7 @@ define @vpmerge_vf_nxv4f32(float %a, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vf_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %a, i32 0 @@ -1369,7 +1369,7 @@ define @vpmerge_vv_nxv8f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vmerge.vvm v12, v12, v8, v0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -1380,7 +1380,7 @@ define @vpmerge_vf_nxv8f32(float %a, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vf_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %a, i32 0 @@ -1394,7 +1394,7 @@ define @vpmerge_vv_nxv16f32( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vmerge.vvm v16, v16, v8, v0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -1405,7 +1405,7 @@ define @vpmerge_vf_nxv16f32(float %a, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vf_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, float %a, i32 0 @@ -1419,7 +1419,7 @@ define @vpmerge_vv_nxv1f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vmerge.vvm v9, v9, v8, v0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1430,7 +1430,7 @@ define @vpmerge_vf_nxv1f64(double %a, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vf_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %a, i32 0 @@ -1444,7 +1444,7 @@ define @vpmerge_vv_nxv2f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vmerge.vvm v10, v10, v8, v0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -1455,7 +1455,7 @@ define @vpmerge_vf_nxv2f64(double %a, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vf_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %a, i32 0 @@ -1469,7 +1469,7 @@ define @vpmerge_vv_nxv4f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vmerge.vvm v12, v12, v8, v0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -1480,7 +1480,7 @@ define @vpmerge_vf_nxv4f64(double %a, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vf_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %a, i32 0 @@ -1494,7 +1494,7 @@ define @vpmerge_vv_nxv8f64( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vmerge.vvm v16, v16, v8, v0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -1505,7 +1505,7 @@ define @vpmerge_vf_nxv8f64(double %a, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpmerge_vf_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %a, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll @@ -9,13 +9,13 @@ define void @vpscatter_nxv1i8( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv1i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv1i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv1i8.nxv1p0i8( %val, %ptrs, %m, i32 %evl) @@ -27,13 +27,13 @@ define void @vpscatter_nxv2i8( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv2i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv2i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv2i8.nxv2p0i8( %val, %ptrs, %m, i32 %evl) @@ -43,17 +43,17 @@ define void @vpscatter_nxv2i16_truncstore_nxv2i8( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv2i16_truncstore_nxv2i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; RV32-NEXT: vnsrl.wi v8, v8, 0 -; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv2i16_truncstore_nxv2i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; RV64-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; RV64-NEXT: vnsrl.wi v8, v8, 0 -; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret %tval = trunc %val to @@ -64,21 +64,21 @@ define void @vpscatter_nxv2i32_truncstore_nxv2i8( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv2i32_truncstore_nxv2i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; RV32-NEXT: vnsrl.wi v8, v8, 0 -; RV32-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; RV32-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; RV32-NEXT: vnsrl.wi v8, v8, 0 -; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv2i32_truncstore_nxv2i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; RV64-NEXT: vnsrl.wi v8, v8, 0 -; RV64-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; RV64-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; RV64-NEXT: vnsrl.wi v8, v8, 0 -; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret %tval = trunc %val to @@ -89,25 +89,25 @@ define void @vpscatter_nxv2i64_truncstore_nxv2i8( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv2i64_truncstore_nxv2i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; RV32-NEXT: vnsrl.wi v11, v8, 0 -; RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV32-NEXT: vnsrl.wi v8, v11, 0 -; RV32-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; RV32-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; RV32-NEXT: vnsrl.wi v8, v8, 0 -; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv2i64_truncstore_nxv2i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; RV64-NEXT: vnsrl.wi v12, v8, 0 -; RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV64-NEXT: vnsrl.wi v8, v12, 0 -; RV64-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; RV64-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; RV64-NEXT: vnsrl.wi v8, v8, 0 -; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret %tval = trunc %val to @@ -120,13 +120,13 @@ define void @vpscatter_nxv4i8( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv4i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv4i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv4i8.nxv4p0i8( %val, %ptrs, %m, i32 %evl) @@ -136,13 +136,13 @@ define void @vpscatter_truemask_nxv4i8( %val, %ptrs, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_truemask_nxv4i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v10 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_truemask_nxv4i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v12 ; RV64-NEXT: ret %mhead = insertelement poison, i1 1, i32 0 @@ -156,13 +156,13 @@ define void @vpscatter_nxv8i8( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv8i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; RV32-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv8i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; RV64-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv8i8.nxv8p0i8( %val, %ptrs, %m, i32 %evl) @@ -172,17 +172,17 @@ define void @vpscatter_baseidx_nxv8i8( %val, i8* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_nxv8i8: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v9 -; RV32-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; RV32-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_nxv8i8: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v9 -; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds i8, i8* %base, %idxs @@ -195,13 +195,13 @@ define void @vpscatter_nxv1i16( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv1i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv1i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv1i16.nxv1p0i16( %val, %ptrs, %m, i32 %evl) @@ -213,13 +213,13 @@ define void @vpscatter_nxv2i16( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv2i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv2i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv2i16.nxv2p0i16( %val, %ptrs, %m, i32 %evl) @@ -229,17 +229,17 @@ define void @vpscatter_nxv2i32_truncstore_nxv2i16( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv2i32_truncstore_nxv2i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; RV32-NEXT: vnsrl.wi v8, v8, 0 -; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv2i32_truncstore_nxv2i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; RV64-NEXT: vnsrl.wi v8, v8, 0 -; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret %tval = trunc %val to @@ -250,21 +250,21 @@ define void @vpscatter_nxv2i64_truncstore_nxv2i16( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv2i64_truncstore_nxv2i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; RV32-NEXT: vnsrl.wi v11, v8, 0 -; RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV32-NEXT: vnsrl.wi v8, v11, 0 -; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv2i64_truncstore_nxv2i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; RV64-NEXT: vnsrl.wi v12, v8, 0 -; RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV64-NEXT: vnsrl.wi v8, v12, 0 -; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret %tval = trunc %val to @@ -277,13 +277,13 @@ define void @vpscatter_nxv4i16( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv4i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv4i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv4i16.nxv4p0i16( %val, %ptrs, %m, i32 %evl) @@ -293,13 +293,13 @@ define void @vpscatter_truemask_nxv4i16( %val, %ptrs, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_truemask_nxv4i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v10 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_truemask_nxv4i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v12 ; RV64-NEXT: ret %mhead = insertelement poison, i1 1, i32 0 @@ -313,13 +313,13 @@ define void @vpscatter_nxv8i16( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv8i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv8i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv8i16.nxv8p0i16( %val, %ptrs, %m, i32 %evl) @@ -329,19 +329,19 @@ define void @vpscatter_baseidx_nxv8i8_nxv8i16( %val, i16* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_nxv8i8_nxv8i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v10 ; RV32-NEXT: vadd.vv v12, v12, v12 -; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_nxv8i8_nxv8i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v10 ; RV64-NEXT: vadd.vv v16, v16, v16 -; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds i16, i16* %base, %idxs @@ -352,19 +352,19 @@ define void @vpscatter_baseidx_sext_nxv8i8_nxv8i16( %val, i16* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_sext_nxv8i8_nxv8i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v10 ; RV32-NEXT: vadd.vv v12, v12, v12 -; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_sext_nxv8i8_nxv8i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v10 ; RV64-NEXT: vadd.vv v16, v16, v16 -; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = sext %idxs to @@ -376,19 +376,19 @@ define void @vpscatter_baseidx_zext_nxv8i8_nxv8i16( %val, i16* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf4 v12, v10 ; RV32-NEXT: vadd.vv v12, v12, v12 -; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf8 v16, v10 ; RV64-NEXT: vadd.vv v16, v16, v16 -; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = zext %idxs to @@ -400,19 +400,19 @@ define void @vpscatter_baseidx_nxv8i16( %val, i16* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_nxv8i16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v12, v10 ; RV32-NEXT: vadd.vv v12, v12, v12 -; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_nxv8i16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v16, v10 ; RV64-NEXT: vadd.vv v16, v16, v16 -; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds i16, i16* %base, %idxs @@ -425,13 +425,13 @@ define void @vpscatter_nxv1i32( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv1i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv1i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv1i32.nxv1p0i32( %val, %ptrs, %m, i32 %evl) @@ -443,13 +443,13 @@ define void @vpscatter_nxv2i32( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv2i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv2i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv2i32.nxv2p0i32( %val, %ptrs, %m, i32 %evl) @@ -459,17 +459,17 @@ define void @vpscatter_nxv2i64_truncstore_nxv2i32( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv2i64_truncstore_nxv2i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; RV32-NEXT: vnsrl.wi v11, v8, 0 -; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; RV32-NEXT: vsoxei32.v v11, (zero), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv2i64_truncstore_nxv2i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; RV64-NEXT: vnsrl.wi v12, v8, 0 -; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; RV64-NEXT: vsoxei64.v v12, (zero), v10, v0.t ; RV64-NEXT: ret %tval = trunc %val to @@ -482,13 +482,13 @@ define void @vpscatter_nxv4i32( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv4i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv4i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv4i32.nxv4p0i32( %val, %ptrs, %m, i32 %evl) @@ -498,13 +498,13 @@ define void @vpscatter_truemask_nxv4i32( %val, %ptrs, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_truemask_nxv4i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v10 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_truemask_nxv4i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v12 ; RV64-NEXT: ret %mhead = insertelement poison, i1 1, i32 0 @@ -518,13 +518,13 @@ define void @vpscatter_nxv8i32( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; RV32-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; RV64-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv8i32.nxv8p0i32( %val, %ptrs, %m, i32 %evl) @@ -534,19 +534,19 @@ define void @vpscatter_baseidx_nxv8i8_nxv8i32( %val, i32* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_nxv8i8_nxv8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_nxv8i8_nxv8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v12 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds i32, i32* %base, %idxs @@ -557,19 +557,19 @@ define void @vpscatter_baseidx_sext_nxv8i8_nxv8i32( %val, i32* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_sext_nxv8i8_nxv8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_sext_nxv8i8_nxv8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v12 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = sext %idxs to @@ -581,19 +581,19 @@ define void @vpscatter_baseidx_zext_nxv8i8_nxv8i32( %val, i32* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf4 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf8 v16, v12 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = zext %idxs to @@ -605,19 +605,19 @@ define void @vpscatter_baseidx_nxv8i16_nxv8i32( %val, i32* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_nxv8i16_nxv8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_nxv8i16_nxv8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v16, v12 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds i32, i32* %base, %idxs @@ -628,19 +628,19 @@ define void @vpscatter_baseidx_sext_nxv8i16_nxv8i32( %val, i32* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_sext_nxv8i16_nxv8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_sext_nxv8i16_nxv8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v16, v12 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = sext %idxs to @@ -652,19 +652,19 @@ define void @vpscatter_baseidx_zext_nxv8i16_nxv8i32( %val, i32* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_zext_nxv8i16_nxv8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf2 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_zext_nxv8i16_nxv8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf4 v16, v12 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = zext %idxs to @@ -676,18 +676,18 @@ define void @vpscatter_baseidx_nxv8i32( %val, i32* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_nxv8i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsll.vi v12, v12, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_nxv8i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf2 v16, v12 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds i32, i32* %base, %idxs @@ -700,13 +700,13 @@ define void @vpscatter_nxv1i64( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv1i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv1i64.nxv1p0i64( %val, %ptrs, %m, i32 %evl) @@ -718,13 +718,13 @@ define void @vpscatter_nxv2i64( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv2i64.nxv2p0i64( %val, %ptrs, %m, i32 %evl) @@ -736,13 +736,13 @@ define void @vpscatter_nxv4i64( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv4i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv4i64.nxv4p0i64( %val, %ptrs, %m, i32 %evl) @@ -752,13 +752,13 @@ define void @vpscatter_truemask_nxv4i64( %val, %ptrs, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_truemask_nxv4i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v12 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_truemask_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v12 ; RV64-NEXT: ret %mhead = insertelement poison, i1 1, i32 0 @@ -772,13 +772,13 @@ define void @vpscatter_nxv8i64( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv8i64.nxv8p0i64( %val, %ptrs, %m, i32 %evl) @@ -788,19 +788,19 @@ define void @vpscatter_baseidx_nxv8i8_nxv8i64( %val, i64* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_nxv8i8_nxv8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v20, v16 ; RV32-NEXT: vsll.vi v16, v20, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_nxv8i8_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, %idxs @@ -811,19 +811,19 @@ define void @vpscatter_baseidx_sext_nxv8i8_nxv8i64( %val, i64* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_sext_nxv8i8_nxv8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v20, v16 ; RV32-NEXT: vsll.vi v16, v20, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_sext_nxv8i8_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = sext %idxs to @@ -835,19 +835,19 @@ define void @vpscatter_baseidx_zext_nxv8i8_nxv8i64( %val, i64* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf4 v20, v16 ; RV32-NEXT: vsll.vi v16, v20, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf8 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = zext %idxs to @@ -859,19 +859,19 @@ define void @vpscatter_baseidx_nxv8i16_nxv8i64( %val, i64* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_nxv8i16_nxv8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v20, v16 ; RV32-NEXT: vsll.vi v16, v20, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_nxv8i16_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, %idxs @@ -882,19 +882,19 @@ define void @vpscatter_baseidx_sext_nxv8i16_nxv8i64( %val, i64* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_sext_nxv8i16_nxv8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v20, v16 ; RV32-NEXT: vsll.vi v16, v20, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_sext_nxv8i16_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = sext %idxs to @@ -906,19 +906,19 @@ define void @vpscatter_baseidx_zext_nxv8i16_nxv8i64( %val, i64* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_zext_nxv8i16_nxv8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf2 v20, v16 ; RV32-NEXT: vsll.vi v16, v20, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_zext_nxv8i16_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf4 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = zext %idxs to @@ -930,18 +930,18 @@ define void @vpscatter_baseidx_nxv8i32_nxv8i64( %val, i64* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_nxv8i32_nxv8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsll.vi v16, v16, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_nxv8i32_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf2 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, %idxs @@ -952,18 +952,18 @@ define void @vpscatter_baseidx_sext_nxv8i32_nxv8i64( %val, i64* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_sext_nxv8i32_nxv8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsll.vi v16, v16, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_sext_nxv8i32_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf2 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = sext %idxs to @@ -975,18 +975,18 @@ define void @vpscatter_baseidx_zext_nxv8i32_nxv8i64( %val, i64* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_zext_nxv8i32_nxv8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsll.vi v16, v16, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_zext_nxv8i32_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf2 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = zext %idxs to @@ -998,18 +998,18 @@ define void @vpscatter_baseidx_nxv8i64( %val, i64* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_nxv8i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vnsrl.wi v24, v16, 0 ; RV32-NEXT: vsll.vi v16, v24, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsll.vi v16, v16, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, %idxs @@ -1022,13 +1022,13 @@ define void @vpscatter_nxv1f16( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv1f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv1f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv1f16.nxv1p0f16( %val, %ptrs, %m, i32 %evl) @@ -1040,13 +1040,13 @@ define void @vpscatter_nxv2f16( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv2f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv2f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv2f16.nxv2p0f16( %val, %ptrs, %m, i32 %evl) @@ -1058,13 +1058,13 @@ define void @vpscatter_nxv4f16( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv4f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv4f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv4f16.nxv4p0f16( %val, %ptrs, %m, i32 %evl) @@ -1074,13 +1074,13 @@ define void @vpscatter_truemask_nxv4f16( %val, %ptrs, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_truemask_nxv4f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v10 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_truemask_nxv4f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v12 ; RV64-NEXT: ret %mhead = insertelement poison, i1 1, i32 0 @@ -1094,13 +1094,13 @@ define void @vpscatter_nxv8f16( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv8f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv8f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv8f16.nxv8p0f16( %val, %ptrs, %m, i32 %evl) @@ -1110,19 +1110,19 @@ define void @vpscatter_baseidx_nxv8i8_nxv8f16( %val, half* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_nxv8i8_nxv8f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v10 ; RV32-NEXT: vadd.vv v12, v12, v12 -; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_nxv8i8_nxv8f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v10 ; RV64-NEXT: vadd.vv v16, v16, v16 -; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds half, half* %base, %idxs @@ -1133,19 +1133,19 @@ define void @vpscatter_baseidx_sext_nxv8i8_nxv8f16( %val, half* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_sext_nxv8i8_nxv8f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v12, v10 ; RV32-NEXT: vadd.vv v12, v12, v12 -; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_sext_nxv8i8_nxv8f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v10 ; RV64-NEXT: vadd.vv v16, v16, v16 -; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = sext %idxs to @@ -1157,19 +1157,19 @@ define void @vpscatter_baseidx_zext_nxv8i8_nxv8f16( %val, half* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf4 v12, v10 ; RV32-NEXT: vadd.vv v12, v12, v12 -; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf8 v16, v10 ; RV64-NEXT: vadd.vv v16, v16, v16 -; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = zext %idxs to @@ -1181,19 +1181,19 @@ define void @vpscatter_baseidx_nxv8f16( %val, half* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_nxv8f16: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v12, v10 ; RV32-NEXT: vadd.vv v12, v12, v12 -; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_nxv8f16: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v16, v10 ; RV64-NEXT: vadd.vv v16, v16, v16 -; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds half, half* %base, %idxs @@ -1206,13 +1206,13 @@ define void @vpscatter_nxv1f32( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv1f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv1f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv1f32.nxv1p0f32( %val, %ptrs, %m, i32 %evl) @@ -1224,13 +1224,13 @@ define void @vpscatter_nxv2f32( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv2f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv2f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv2f32.nxv2p0f32( %val, %ptrs, %m, i32 %evl) @@ -1242,13 +1242,13 @@ define void @vpscatter_nxv4f32( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv4f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv4f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv4f32.nxv4p0f32( %val, %ptrs, %m, i32 %evl) @@ -1258,13 +1258,13 @@ define void @vpscatter_truemask_nxv4f32( %val, %ptrs, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_truemask_nxv4f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v10 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_truemask_nxv4f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v12 ; RV64-NEXT: ret %mhead = insertelement poison, i1 1, i32 0 @@ -1278,13 +1278,13 @@ define void @vpscatter_nxv8f32( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; RV32-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; RV64-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv8f32.nxv8p0f32( %val, %ptrs, %m, i32 %evl) @@ -1294,19 +1294,19 @@ define void @vpscatter_baseidx_nxv8i8_nxv8f32( %val, float* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_nxv8i8_nxv8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_nxv8i8_nxv8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v12 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds float, float* %base, %idxs @@ -1317,19 +1317,19 @@ define void @vpscatter_baseidx_sext_nxv8i8_nxv8f32( %val, float* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_sext_nxv8i8_nxv8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_sext_nxv8i8_nxv8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v16, v12 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = sext %idxs to @@ -1341,19 +1341,19 @@ define void @vpscatter_baseidx_zext_nxv8i8_nxv8f32( %val, float* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf4 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf8 v16, v12 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = zext %idxs to @@ -1365,19 +1365,19 @@ define void @vpscatter_baseidx_nxv8i16_nxv8f32( %val, float* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_nxv8i16_nxv8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_nxv8i16_nxv8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v16, v12 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds float, float* %base, %idxs @@ -1388,19 +1388,19 @@ define void @vpscatter_baseidx_sext_nxv8i16_nxv8f32( %val, float* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_sext_nxv8i16_nxv8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_sext_nxv8i16_nxv8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v16, v12 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = sext %idxs to @@ -1412,19 +1412,19 @@ define void @vpscatter_baseidx_zext_nxv8i16_nxv8f32( %val, float* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_zext_nxv8i16_nxv8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf2 v16, v12 ; RV32-NEXT: vsll.vi v12, v16, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_zext_nxv8i16_nxv8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf4 v16, v12 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = zext %idxs to @@ -1436,18 +1436,18 @@ define void @vpscatter_baseidx_nxv8f32( %val, float* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_nxv8f32: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsll.vi v12, v12, 2 -; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_nxv8f32: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf2 v16, v12 ; RV64-NEXT: vsll.vi v16, v16, 2 -; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds float, float* %base, %idxs @@ -1460,13 +1460,13 @@ define void @vpscatter_nxv1f64( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv1f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv1f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv1f64.nxv1p0f64( %val, %ptrs, %m, i32 %evl) @@ -1478,13 +1478,13 @@ define void @vpscatter_nxv2f64( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv2f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv2f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv2f64.nxv2p0f64( %val, %ptrs, %m, i32 %evl) @@ -1496,13 +1496,13 @@ define void @vpscatter_nxv4f64( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv4f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v12, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv4f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv4f64.nxv4p0f64( %val, %ptrs, %m, i32 %evl) @@ -1512,13 +1512,13 @@ define void @vpscatter_truemask_nxv4f64( %val, %ptrs, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_truemask_nxv4f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v12 ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_truemask_nxv4f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v12 ; RV64-NEXT: ret %mhead = insertelement poison, i1 1, i32 0 @@ -1532,13 +1532,13 @@ define void @vpscatter_nxv6f64( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv6f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv6f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv6f64.nxv6p0f64( %val, %ptrs, %m, i32 %evl) @@ -1548,19 +1548,19 @@ define void @vpscatter_baseidx_nxv6i8_nxv6f64( %val, double* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_nxv6i8_nxv6f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v20, v16 ; RV32-NEXT: vsll.vi v16, v20, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_nxv6i8_nxv6f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, %idxs @@ -1571,19 +1571,19 @@ define void @vpscatter_baseidx_sext_nxv6i8_nxv6f64( %val, double* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_sext_nxv6i8_nxv6f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v20, v16 ; RV32-NEXT: vsll.vi v16, v20, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_sext_nxv6i8_nxv6f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = sext %idxs to @@ -1595,19 +1595,19 @@ define void @vpscatter_baseidx_zext_nxv6i8_nxv6f64( %val, double* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_zext_nxv6i8_nxv6f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf4 v20, v16 ; RV32-NEXT: vsll.vi v16, v20, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_zext_nxv6i8_nxv6f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf8 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = zext %idxs to @@ -1619,19 +1619,19 @@ define void @vpscatter_baseidx_nxv6i16_nxv6f64( %val, double* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_nxv6i16_nxv6f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v20, v16 ; RV32-NEXT: vsll.vi v16, v20, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_nxv6i16_nxv6f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, %idxs @@ -1642,19 +1642,19 @@ define void @vpscatter_baseidx_sext_nxv6i16_nxv6f64( %val, double* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_sext_nxv6i16_nxv6f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v20, v16 ; RV32-NEXT: vsll.vi v16, v20, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_sext_nxv6i16_nxv6f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = sext %idxs to @@ -1666,19 +1666,19 @@ define void @vpscatter_baseidx_zext_nxv6i16_nxv6f64( %val, double* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_zext_nxv6i16_nxv6f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf2 v20, v16 ; RV32-NEXT: vsll.vi v16, v20, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_zext_nxv6i16_nxv6f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf4 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = zext %idxs to @@ -1690,18 +1690,18 @@ define void @vpscatter_baseidx_nxv6i32_nxv6f64( %val, double* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_nxv6i32_nxv6f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsll.vi v16, v16, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_nxv6i32_nxv6f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf2 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, %idxs @@ -1712,18 +1712,18 @@ define void @vpscatter_baseidx_sext_nxv6i32_nxv6f64( %val, double* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_sext_nxv6i32_nxv6f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsll.vi v16, v16, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_sext_nxv6i32_nxv6f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf2 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = sext %idxs to @@ -1735,18 +1735,18 @@ define void @vpscatter_baseidx_zext_nxv6i32_nxv6f64( %val, double* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_zext_nxv6i32_nxv6f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsll.vi v16, v16, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_zext_nxv6i32_nxv6f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf2 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = zext %idxs to @@ -1758,18 +1758,18 @@ define void @vpscatter_baseidx_nxv6f64( %val, double* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_nxv6f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vnsrl.wi v24, v16, 0 ; RV32-NEXT: vsll.vi v16, v24, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_nxv6f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsll.vi v16, v16, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, %idxs @@ -1782,13 +1782,13 @@ define void @vpscatter_nxv8f64( %val, %ptrs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_nxv8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_nxv8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t ; RV64-NEXT: ret call void @llvm.vp.scatter.nxv8f64.nxv8p0f64( %val, %ptrs, %m, i32 %evl) @@ -1798,19 +1798,19 @@ define void @vpscatter_baseidx_nxv8i8_nxv8f64( %val, double* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_nxv8i8_nxv8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v20, v16 ; RV32-NEXT: vsll.vi v16, v20, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_nxv8i8_nxv8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, %idxs @@ -1821,19 +1821,19 @@ define void @vpscatter_baseidx_sext_nxv8i8_nxv8f64( %val, double* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_sext_nxv8i8_nxv8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf4 v20, v16 ; RV32-NEXT: vsll.vi v16, v20, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_sext_nxv8i8_nxv8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf8 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = sext %idxs to @@ -1845,19 +1845,19 @@ define void @vpscatter_baseidx_zext_nxv8i8_nxv8f64( %val, double* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf4 v20, v16 ; RV32-NEXT: vsll.vi v16, v20, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf8 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = zext %idxs to @@ -1869,19 +1869,19 @@ define void @vpscatter_baseidx_nxv8i16_nxv8f64( %val, double* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_nxv8i16_nxv8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v20, v16 ; RV32-NEXT: vsll.vi v16, v20, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_nxv8i16_nxv8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, %idxs @@ -1892,19 +1892,19 @@ define void @vpscatter_baseidx_sext_nxv8i16_nxv8f64( %val, double* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_sext_nxv8i16_nxv8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsext.vf2 v20, v16 ; RV32-NEXT: vsll.vi v16, v20, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_sext_nxv8i16_nxv8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = sext %idxs to @@ -1916,19 +1916,19 @@ define void @vpscatter_baseidx_zext_nxv8i16_nxv8f64( %val, double* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_zext_nxv8i16_nxv8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vzext.vf2 v20, v16 ; RV32-NEXT: vsll.vi v16, v20, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_zext_nxv8i16_nxv8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf4 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = zext %idxs to @@ -1940,18 +1940,18 @@ define void @vpscatter_baseidx_nxv8i32_nxv8f64( %val, double* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_nxv8i32_nxv8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsll.vi v16, v16, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_nxv8i32_nxv8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf2 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, %idxs @@ -1962,18 +1962,18 @@ define void @vpscatter_baseidx_sext_nxv8i32_nxv8f64( %val, double* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_sext_nxv8i32_nxv8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsll.vi v16, v16, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_sext_nxv8i32_nxv8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf2 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = sext %idxs to @@ -1985,18 +1985,18 @@ define void @vpscatter_baseidx_zext_nxv8i32_nxv8f64( %val, double* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_zext_nxv8i32_nxv8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vsll.vi v16, v16, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_zext_nxv8i32_nxv8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf2 v24, v16 ; RV64-NEXT: vsll.vi v16, v24, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %eidxs = zext %idxs to @@ -2008,18 +2008,18 @@ define void @vpscatter_baseidx_nxv8f64( %val, double* %base, %idxs, %m, i32 zeroext %evl) { ; RV32-LABEL: vpscatter_baseidx_nxv8f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vnsrl.wi v24, v16, 0 ; RV32-NEXT: vsll.vi v16, v24, 3 -; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vpscatter_baseidx_nxv8f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; RV64-NEXT: vsll.vi v16, v16, 3 -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, %idxs @@ -2040,17 +2040,17 @@ ; RV32-NEXT: mv a2, a0 ; RV32-NEXT: .LBB95_2: ; RV32-NEXT: li a3, 0 -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (zero), v24, v0.t ; RV32-NEXT: srli a2, a0, 3 -; RV32-NEXT: vsetvli a4, zero, e8, mf4, ta, mu +; RV32-NEXT: vsetvli a4, zero, e8, mf4, ta, ma ; RV32-NEXT: sub a0, a1, a0 ; RV32-NEXT: vslidedown.vx v0, v0, a2 ; RV32-NEXT: bltu a1, a0, .LBB95_4 ; RV32-NEXT: # %bb.3: ; RV32-NEXT: mv a3, a0 ; RV32-NEXT: .LBB95_4: -; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v16, (zero), v28, v0.t ; RV32-NEXT: ret ; @@ -2074,17 +2074,17 @@ ; RV64-NEXT: .LBB95_2: ; RV64-NEXT: li a4, 0 ; RV64-NEXT: vl8re64.v v24, (a0) -; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t ; RV64-NEXT: srli a3, a1, 3 -; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; RV64-NEXT: sub a0, a2, a1 ; RV64-NEXT: vslidedown.vx v0, v0, a3 ; RV64-NEXT: bltu a2, a0, .LBB95_4 ; RV64-NEXT: # %bb.3: ; RV64-NEXT: mv a4, a0 ; RV64-NEXT: .LBB95_4: -; RV64-NEXT: vsetvli zero, a4, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a4, e64, m8, ta, ma ; RV64-NEXT: addi a0, sp, 16 ; RV64-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload ; RV64-NEXT: vsoxei64.v v8, (zero), v24, v0.t @@ -2101,7 +2101,7 @@ ; RV32-LABEL: vpscatter_baseidx_nxv16i16_nxv16f64: ; RV32: # %bb.0: ; RV32-NEXT: vl4re16.v v4, (a1) -; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; RV32-NEXT: vsext.vf2 v24, v4 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: vsll.vi v24, v24, 3 @@ -2111,17 +2111,17 @@ ; RV32-NEXT: mv a3, a1 ; RV32-NEXT: .LBB96_2: ; RV32-NEXT: li a4, 0 -; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: srli a3, a1, 3 -; RV32-NEXT: vsetvli a5, zero, e8, mf4, ta, mu +; RV32-NEXT: vsetvli a5, zero, e8, mf4, ta, ma ; RV32-NEXT: sub a1, a2, a1 ; RV32-NEXT: vslidedown.vx v0, v0, a3 ; RV32-NEXT: bltu a2, a1, .LBB96_4 ; RV32-NEXT: # %bb.3: ; RV32-NEXT: mv a4, a1 ; RV32-NEXT: .LBB96_4: -; RV32-NEXT: vsetvli zero, a4, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a4, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v16, (a0), v28, v0.t ; RV32-NEXT: ret ; @@ -2135,7 +2135,7 @@ ; RV64-NEXT: vl4re16.v v4, (a1) ; RV64-NEXT: addi a1, sp, 16 ; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v16, v4 ; RV64-NEXT: vsll.vi v16, v16, 3 ; RV64-NEXT: csrr a1, vlenb @@ -2147,17 +2147,17 @@ ; RV64-NEXT: .LBB96_2: ; RV64-NEXT: li a4, 0 ; RV64-NEXT: vsll.vi v24, v24, 3 -; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: srli a3, a1, 3 -; RV64-NEXT: vsetvli a5, zero, e8, mf4, ta, mu +; RV64-NEXT: vsetvli a5, zero, e8, mf4, ta, ma ; RV64-NEXT: sub a1, a2, a1 ; RV64-NEXT: vslidedown.vx v0, v0, a3 ; RV64-NEXT: bltu a2, a1, .LBB96_4 ; RV64-NEXT: # %bb.3: ; RV64-NEXT: mv a4, a1 ; RV64-NEXT: .LBB96_4: -; RV64-NEXT: vsetvli zero, a4, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a4, e64, m8, ta, ma ; RV64-NEXT: addi a1, sp, 16 ; RV64-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload ; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t @@ -2175,7 +2175,7 @@ ; RV32-LABEL: vpscatter_baseidx_sext_nxv16i16_nxv16f64: ; RV32: # %bb.0: ; RV32-NEXT: vl4re16.v v4, (a1) -; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; RV32-NEXT: vsext.vf2 v24, v4 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: vsll.vi v24, v24, 3 @@ -2185,17 +2185,17 @@ ; RV32-NEXT: mv a3, a1 ; RV32-NEXT: .LBB97_2: ; RV32-NEXT: li a4, 0 -; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: srli a3, a1, 3 -; RV32-NEXT: vsetvli a5, zero, e8, mf4, ta, mu +; RV32-NEXT: vsetvli a5, zero, e8, mf4, ta, ma ; RV32-NEXT: sub a1, a2, a1 ; RV32-NEXT: vslidedown.vx v0, v0, a3 ; RV32-NEXT: bltu a2, a1, .LBB97_4 ; RV32-NEXT: # %bb.3: ; RV32-NEXT: mv a4, a1 ; RV32-NEXT: .LBB97_4: -; RV32-NEXT: vsetvli zero, a4, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a4, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v16, (a0), v28, v0.t ; RV32-NEXT: ret ; @@ -2209,7 +2209,7 @@ ; RV64-NEXT: vl4re16.v v4, (a1) ; RV64-NEXT: addi a1, sp, 16 ; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsext.vf4 v16, v4 ; RV64-NEXT: vsll.vi v16, v16, 3 ; RV64-NEXT: csrr a1, vlenb @@ -2221,17 +2221,17 @@ ; RV64-NEXT: .LBB97_2: ; RV64-NEXT: li a4, 0 ; RV64-NEXT: vsll.vi v24, v24, 3 -; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: srli a3, a1, 3 -; RV64-NEXT: vsetvli a5, zero, e8, mf4, ta, mu +; RV64-NEXT: vsetvli a5, zero, e8, mf4, ta, ma ; RV64-NEXT: sub a1, a2, a1 ; RV64-NEXT: vslidedown.vx v0, v0, a3 ; RV64-NEXT: bltu a2, a1, .LBB97_4 ; RV64-NEXT: # %bb.3: ; RV64-NEXT: mv a4, a1 ; RV64-NEXT: .LBB97_4: -; RV64-NEXT: vsetvli zero, a4, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a4, e64, m8, ta, ma ; RV64-NEXT: addi a1, sp, 16 ; RV64-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload ; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t @@ -2250,7 +2250,7 @@ ; RV32-LABEL: vpscatter_baseidx_zext_nxv16i16_nxv16f64: ; RV32: # %bb.0: ; RV32-NEXT: vl4re16.v v4, (a1) -; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; RV32-NEXT: vzext.vf2 v24, v4 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: vsll.vi v24, v24, 3 @@ -2260,17 +2260,17 @@ ; RV32-NEXT: mv a3, a1 ; RV32-NEXT: .LBB98_2: ; RV32-NEXT: li a4, 0 -; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t ; RV32-NEXT: srli a3, a1, 3 -; RV32-NEXT: vsetvli a5, zero, e8, mf4, ta, mu +; RV32-NEXT: vsetvli a5, zero, e8, mf4, ta, ma ; RV32-NEXT: sub a1, a2, a1 ; RV32-NEXT: vslidedown.vx v0, v0, a3 ; RV32-NEXT: bltu a2, a1, .LBB98_4 ; RV32-NEXT: # %bb.3: ; RV32-NEXT: mv a4, a1 ; RV32-NEXT: .LBB98_4: -; RV32-NEXT: vsetvli zero, a4, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a4, e64, m8, ta, ma ; RV32-NEXT: vsoxei32.v v16, (a0), v28, v0.t ; RV32-NEXT: ret ; @@ -2284,7 +2284,7 @@ ; RV64-NEXT: vl4re16.v v4, (a1) ; RV64-NEXT: addi a1, sp, 16 ; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vzext.vf4 v16, v4 ; RV64-NEXT: vsll.vi v16, v16, 3 ; RV64-NEXT: csrr a1, vlenb @@ -2296,17 +2296,17 @@ ; RV64-NEXT: .LBB98_2: ; RV64-NEXT: li a4, 0 ; RV64-NEXT: vsll.vi v24, v24, 3 -; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; RV64-NEXT: srli a3, a1, 3 -; RV64-NEXT: vsetvli a5, zero, e8, mf4, ta, mu +; RV64-NEXT: vsetvli a5, zero, e8, mf4, ta, ma ; RV64-NEXT: sub a1, a2, a1 ; RV64-NEXT: vslidedown.vx v0, v0, a3 ; RV64-NEXT: bltu a2, a1, .LBB98_4 ; RV64-NEXT: # %bb.3: ; RV64-NEXT: mv a4, a1 ; RV64-NEXT: .LBB98_4: -; RV64-NEXT: vsetvli zero, a4, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a4, e64, m8, ta, ma ; RV64-NEXT: addi a1, sp, 16 ; RV64-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload ; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/vpstore.ll --- a/llvm/test/CodeGen/RISCV/rvv/vpstore.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vpstore.ll @@ -9,7 +9,7 @@ define void @vpstore_nxv1i8( %val, * %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vse8.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.nxv1i8.p0nxv1i8( %val, * %ptr, %m, i32 %evl) @@ -21,7 +21,7 @@ define void @vpstore_nxv2i8( %val, * %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vse8.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.nxv2i8.p0nxv2i8( %val, * %ptr, %m, i32 %evl) @@ -33,7 +33,7 @@ define void @vpstore_nxv3i8( %val, * %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv3i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vse8.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.nxv3i8.p0nxv3i8( %val, * %ptr, %m, i32 %evl) @@ -45,7 +45,7 @@ define void @vpstore_nxv4i8( %val, * %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vse8.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.nxv4i8.p0nxv4i8( %val, * %ptr, %m, i32 %evl) @@ -57,7 +57,7 @@ define void @vpstore_nxv8i8( %val, * %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vse8.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.nxv8i8.p0nxv8i8( %val, * %ptr, %m, i32 %evl) @@ -69,7 +69,7 @@ define void @vpstore_nxv1i16( %val, * %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vse16.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.nxv1i16.p0nxv1i16( %val, * %ptr, %m, i32 %evl) @@ -81,7 +81,7 @@ define void @vpstore_nxv2i16( %val, * %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vse16.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.nxv2i16.p0nxv2i16( %val, * %ptr, %m, i32 %evl) @@ -93,7 +93,7 @@ define void @vpstore_nxv4i16( %val, * %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vse16.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.nxv4i16.p0nxv4i16( %val, * %ptr, %m, i32 %evl) @@ -105,7 +105,7 @@ define void @vpstore_nxv8i16( %val, * %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vse16.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.nxv8i16.p0nxv8i16( %val, * %ptr, %m, i32 %evl) @@ -117,7 +117,7 @@ define void @vpstore_nxv1i32( %val, * %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vse32.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.nxv1i32.p0nxv1i32( %val, * %ptr, %m, i32 %evl) @@ -129,7 +129,7 @@ define void @vpstore_nxv2i32( %val, * %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.nxv2i32.p0nxv2i32( %val, * %ptr, %m, i32 %evl) @@ -141,7 +141,7 @@ define void @vpstore_nxv4i32( %val, * %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vse32.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.nxv4i32.p0nxv4i32( %val, * %ptr, %m, i32 %evl) @@ -153,7 +153,7 @@ define void @vpstore_nxv8i32( %val, * %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vse32.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.nxv8i32.p0nxv8i32( %val, * %ptr, %m, i32 %evl) @@ -165,7 +165,7 @@ define void @vpstore_nxv1i64( %val, * %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vse64.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.nxv1i64.p0nxv1i64( %val, * %ptr, %m, i32 %evl) @@ -177,7 +177,7 @@ define void @vpstore_nxv2i64( %val, * %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vse64.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.nxv2i64.p0nxv2i64( %val, * %ptr, %m, i32 %evl) @@ -189,7 +189,7 @@ define void @vpstore_nxv4i64( %val, * %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vse64.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.nxv4i64.p0nxv4i64( %val, * %ptr, %m, i32 %evl) @@ -201,7 +201,7 @@ define void @vpstore_nxv8i64( %val, * %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vse64.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.nxv8i64.p0nxv8i64( %val, * %ptr, %m, i32 %evl) @@ -213,7 +213,7 @@ define void @vpstore_nxv1f16( %val, * %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vse16.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.nxv1f16.p0nxv1f16( %val, * %ptr, %m, i32 %evl) @@ -225,7 +225,7 @@ define void @vpstore_nxv2f16( %val, * %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vse16.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.nxv2f16.p0nxv2f16( %val, * %ptr, %m, i32 %evl) @@ -237,7 +237,7 @@ define void @vpstore_nxv4f16( %val, * %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vse16.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.nxv4f16.p0nxv4f16( %val, * %ptr, %m, i32 %evl) @@ -249,7 +249,7 @@ define void @vpstore_nxv8f16( %val, * %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vse16.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.nxv8f16.p0nxv8f16( %val, * %ptr, %m, i32 %evl) @@ -261,7 +261,7 @@ define void @vpstore_nxv1f32( %val, * %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vse32.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.nxv1f32.p0nxv1f32( %val, * %ptr, %m, i32 %evl) @@ -273,7 +273,7 @@ define void @vpstore_nxv2f32( %val, * %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.nxv2f32.p0nxv2f32( %val, * %ptr, %m, i32 %evl) @@ -285,7 +285,7 @@ define void @vpstore_nxv4f32( %val, * %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vse32.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.nxv4f32.p0nxv4f32( %val, * %ptr, %m, i32 %evl) @@ -297,7 +297,7 @@ define void @vpstore_nxv8f32( %val, * %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vse32.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.nxv8f32.p0nxv8f32( %val, * %ptr, %m, i32 %evl) @@ -309,7 +309,7 @@ define void @vpstore_nxv1f64( %val, * %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vse64.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.nxv1f64.p0nxv1f64( %val, * %ptr, %m, i32 %evl) @@ -321,7 +321,7 @@ define void @vpstore_nxv2f64( %val, * %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vse64.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.nxv2f64.p0nxv2f64( %val, * %ptr, %m, i32 %evl) @@ -333,7 +333,7 @@ define void @vpstore_nxv4f64( %val, * %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vse64.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.nxv4f64.p0nxv4f64( %val, * %ptr, %m, i32 %evl) @@ -345,7 +345,7 @@ define void @vpstore_nxv8f64( %val, * %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vse64.v v8, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.nxv8f64.p0nxv8f64( %val, * %ptr, %m, i32 %evl) @@ -355,7 +355,7 @@ define void @vpstore_nxv1i8_allones_mask( %val, * %ptr, i32 zeroext %evl) { ; CHECK-LABEL: vpstore_nxv1i8_allones_mask: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %a = insertelement poison, i1 true, i32 0 @@ -376,10 +376,10 @@ ; CHECK-NEXT: mv a3, a2 ; CHECK-NEXT: .LBB30_2: ; CHECK-NEXT: li a4, 0 -; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; CHECK-NEXT: vse64.v v8, (a0), v0.t ; CHECK-NEXT: srli a5, a2, 3 -; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma ; CHECK-NEXT: sub a3, a1, a2 ; CHECK-NEXT: vslidedown.vx v0, v0, a5 ; CHECK-NEXT: bltu a1, a3, .LBB30_4 @@ -388,7 +388,7 @@ ; CHECK-NEXT: .LBB30_4: ; CHECK-NEXT: slli a1, a2, 3 ; CHECK-NEXT: add a0, a0, a1 -; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma ; CHECK-NEXT: vse64.v v16, (a0), v0.t ; CHECK-NEXT: ret call void @llvm.vp.store.nxv16f64.p0nxv16f64( %val, * %ptr, %m, i32 %evl) @@ -424,7 +424,7 @@ ; CHECK-NEXT: .LBB31_4: ; CHECK-NEXT: li a6, 0 ; CHECK-NEXT: vl8re64.v v16, (a0) -; CHECK-NEXT: vsetvli zero, a7, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a7, e64, m8, ta, ma ; CHECK-NEXT: sub a0, a5, a3 ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vse64.v v8, (a1), v0.t @@ -434,11 +434,11 @@ ; CHECK-NEXT: .LBB31_6: ; CHECK-NEXT: li a0, 0 ; CHECK-NEXT: srli a5, a3, 3 -; CHECK-NEXT: vsetvli a7, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a7, zero, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v24, a5 ; CHECK-NEXT: slli a5, a3, 3 ; CHECK-NEXT: add a5, a1, a5 -; CHECK-NEXT: vsetvli zero, a6, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a6, e64, m8, ta, ma ; CHECK-NEXT: addi a6, sp, 16 ; CHECK-NEXT: vl8re8.v v8, (a6) # Unknown-size Folded Reload ; CHECK-NEXT: vse64.v v8, (a5), v0.t @@ -449,14 +449,14 @@ ; CHECK-NEXT: # %bb.7: ; CHECK-NEXT: mv a0, a6 ; CHECK-NEXT: .LBB31_8: -; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v24, a5 ; CHECK-NEXT: add a1, a1, a4 ; CHECK-NEXT: bltu a0, a3, .LBB31_10 ; CHECK-NEXT: # %bb.9: ; CHECK-NEXT: mv a0, a3 ; CHECK-NEXT: .LBB31_10: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vse64.v v16, (a1), v0.t ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 diff --git a/llvm/test/CodeGen/RISCV/rvv/vredand-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredand-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vredand-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredand-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vredand_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vredand.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -33,7 +33,7 @@ define @intrinsic_vredand_mask_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -56,7 +56,7 @@ define @intrinsic_vredand_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vredand.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -79,7 +79,7 @@ define @intrinsic_vredand_mask_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -102,7 +102,7 @@ define @intrinsic_vredand_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vredand.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -125,7 +125,7 @@ define @intrinsic_vredand_mask_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -148,7 +148,7 @@ define @intrinsic_vredand_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vredand.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -171,7 +171,7 @@ define @intrinsic_vredand_mask_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -194,7 +194,7 @@ define @intrinsic_vredand_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vredand.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define @intrinsic_vredand_mask_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vredand.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -240,7 +240,7 @@ define @intrinsic_vredand_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vredand.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -263,7 +263,7 @@ define @intrinsic_vredand_mask_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vredand.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -286,7 +286,7 @@ define @intrinsic_vredand_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vredand.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -309,7 +309,7 @@ define @intrinsic_vredand_mask_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -332,7 +332,7 @@ define @intrinsic_vredand_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vredand.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ define @intrinsic_vredand_mask_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -378,7 +378,7 @@ define @intrinsic_vredand_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vredand.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -401,7 +401,7 @@ define @intrinsic_vredand_mask_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -424,7 +424,7 @@ define @intrinsic_vredand_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vredand.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -447,7 +447,7 @@ define @intrinsic_vredand_mask_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vredand.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -470,7 +470,7 @@ define @intrinsic_vredand_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vredand.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -493,7 +493,7 @@ define @intrinsic_vredand_mask_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vredand.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +516,7 @@ define @intrinsic_vredand_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vredand.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -539,7 +539,7 @@ define @intrinsic_vredand_mask_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vredand.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -562,7 +562,7 @@ define @intrinsic_vredand_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vredand.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -585,7 +585,7 @@ define @intrinsic_vredand_mask_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -608,7 +608,7 @@ define @intrinsic_vredand_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vredand.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -631,7 +631,7 @@ define @intrinsic_vredand_mask_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -654,7 +654,7 @@ define @intrinsic_vredand_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vredand.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -677,7 +677,7 @@ define @intrinsic_vredand_mask_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vredand.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -700,7 +700,7 @@ define @intrinsic_vredand_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vredand.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -723,7 +723,7 @@ define @intrinsic_vredand_mask_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vredand.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +746,7 @@ define @intrinsic_vredand_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vredand.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -769,7 +769,7 @@ define @intrinsic_vredand_mask_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vredand.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -792,7 +792,7 @@ define @intrinsic_vredand_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vredand.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -815,7 +815,7 @@ define @intrinsic_vredand_mask_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -838,7 +838,7 @@ define @intrinsic_vredand_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vredand.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -861,7 +861,7 @@ define @intrinsic_vredand_mask_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vredand.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -884,7 +884,7 @@ define @intrinsic_vredand_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vredand.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -907,7 +907,7 @@ define @intrinsic_vredand_mask_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vredand.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -930,7 +930,7 @@ define @intrinsic_vredand_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vredand.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vredand_mask_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vredand.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vredand-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredand-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vredand-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredand-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vredand_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vredand.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -33,7 +33,7 @@ define @intrinsic_vredand_mask_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -56,7 +56,7 @@ define @intrinsic_vredand_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vredand.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -79,7 +79,7 @@ define @intrinsic_vredand_mask_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -102,7 +102,7 @@ define @intrinsic_vredand_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vredand.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -125,7 +125,7 @@ define @intrinsic_vredand_mask_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -148,7 +148,7 @@ define @intrinsic_vredand_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vredand.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -171,7 +171,7 @@ define @intrinsic_vredand_mask_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -194,7 +194,7 @@ define @intrinsic_vredand_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vredand.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define @intrinsic_vredand_mask_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vredand.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -240,7 +240,7 @@ define @intrinsic_vredand_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vredand.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -263,7 +263,7 @@ define @intrinsic_vredand_mask_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vredand.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -286,7 +286,7 @@ define @intrinsic_vredand_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vredand.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -309,7 +309,7 @@ define @intrinsic_vredand_mask_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -332,7 +332,7 @@ define @intrinsic_vredand_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vredand.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ define @intrinsic_vredand_mask_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -378,7 +378,7 @@ define @intrinsic_vredand_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vredand.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -401,7 +401,7 @@ define @intrinsic_vredand_mask_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -424,7 +424,7 @@ define @intrinsic_vredand_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vredand.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -447,7 +447,7 @@ define @intrinsic_vredand_mask_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vredand.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -470,7 +470,7 @@ define @intrinsic_vredand_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vredand.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -493,7 +493,7 @@ define @intrinsic_vredand_mask_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vredand.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +516,7 @@ define @intrinsic_vredand_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vredand.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -539,7 +539,7 @@ define @intrinsic_vredand_mask_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vredand.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -562,7 +562,7 @@ define @intrinsic_vredand_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vredand.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -585,7 +585,7 @@ define @intrinsic_vredand_mask_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -608,7 +608,7 @@ define @intrinsic_vredand_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vredand.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -631,7 +631,7 @@ define @intrinsic_vredand_mask_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -654,7 +654,7 @@ define @intrinsic_vredand_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vredand.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -677,7 +677,7 @@ define @intrinsic_vredand_mask_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vredand.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -700,7 +700,7 @@ define @intrinsic_vredand_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vredand.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -723,7 +723,7 @@ define @intrinsic_vredand_mask_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vredand.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +746,7 @@ define @intrinsic_vredand_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vredand.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -769,7 +769,7 @@ define @intrinsic_vredand_mask_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vredand.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -792,7 +792,7 @@ define @intrinsic_vredand_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vredand.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -815,7 +815,7 @@ define @intrinsic_vredand_mask_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -838,7 +838,7 @@ define @intrinsic_vredand_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vredand.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -861,7 +861,7 @@ define @intrinsic_vredand_mask_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vredand.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -884,7 +884,7 @@ define @intrinsic_vredand_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vredand.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -907,7 +907,7 @@ define @intrinsic_vredand_mask_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vredand.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -930,7 +930,7 @@ define @intrinsic_vredand_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vredand.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vredand_mask_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vredand.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vredmax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredmax-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vredmax-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredmax-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vredmax_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -33,7 +33,7 @@ define @intrinsic_vredmax_mask_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -56,7 +56,7 @@ define @intrinsic_vredmax_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -79,7 +79,7 @@ define @intrinsic_vredmax_mask_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -102,7 +102,7 @@ define @intrinsic_vredmax_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -125,7 +125,7 @@ define @intrinsic_vredmax_mask_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -148,7 +148,7 @@ define @intrinsic_vredmax_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -171,7 +171,7 @@ define @intrinsic_vredmax_mask_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -194,7 +194,7 @@ define @intrinsic_vredmax_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vredmax.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define @intrinsic_vredmax_mask_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vredmax.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -240,7 +240,7 @@ define @intrinsic_vredmax_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vredmax.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -263,7 +263,7 @@ define @intrinsic_vredmax_mask_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vredmax.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -286,7 +286,7 @@ define @intrinsic_vredmax_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -309,7 +309,7 @@ define @intrinsic_vredmax_mask_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -332,7 +332,7 @@ define @intrinsic_vredmax_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ define @intrinsic_vredmax_mask_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -378,7 +378,7 @@ define @intrinsic_vredmax_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -401,7 +401,7 @@ define @intrinsic_vredmax_mask_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -424,7 +424,7 @@ define @intrinsic_vredmax_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vredmax.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -447,7 +447,7 @@ define @intrinsic_vredmax_mask_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vredmax.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -470,7 +470,7 @@ define @intrinsic_vredmax_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vredmax.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -493,7 +493,7 @@ define @intrinsic_vredmax_mask_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vredmax.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +516,7 @@ define @intrinsic_vredmax_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vredmax.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -539,7 +539,7 @@ define @intrinsic_vredmax_mask_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vredmax.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -562,7 +562,7 @@ define @intrinsic_vredmax_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -585,7 +585,7 @@ define @intrinsic_vredmax_mask_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -608,7 +608,7 @@ define @intrinsic_vredmax_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -631,7 +631,7 @@ define @intrinsic_vredmax_mask_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -654,7 +654,7 @@ define @intrinsic_vredmax_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vredmax.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -677,7 +677,7 @@ define @intrinsic_vredmax_mask_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vredmax.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -700,7 +700,7 @@ define @intrinsic_vredmax_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vredmax.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -723,7 +723,7 @@ define @intrinsic_vredmax_mask_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vredmax.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +746,7 @@ define @intrinsic_vredmax_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vredmax.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -769,7 +769,7 @@ define @intrinsic_vredmax_mask_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vredmax.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -792,7 +792,7 @@ define @intrinsic_vredmax_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -815,7 +815,7 @@ define @intrinsic_vredmax_mask_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -838,7 +838,7 @@ define @intrinsic_vredmax_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vredmax.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -861,7 +861,7 @@ define @intrinsic_vredmax_mask_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vredmax.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -884,7 +884,7 @@ define @intrinsic_vredmax_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vredmax.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -907,7 +907,7 @@ define @intrinsic_vredmax_mask_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vredmax.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -930,7 +930,7 @@ define @intrinsic_vredmax_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vredmax.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vredmax_mask_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vredmax.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vredmax-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredmax-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vredmax-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredmax-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vredmax_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -33,7 +33,7 @@ define @intrinsic_vredmax_mask_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -56,7 +56,7 @@ define @intrinsic_vredmax_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -79,7 +79,7 @@ define @intrinsic_vredmax_mask_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -102,7 +102,7 @@ define @intrinsic_vredmax_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -125,7 +125,7 @@ define @intrinsic_vredmax_mask_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -148,7 +148,7 @@ define @intrinsic_vredmax_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -171,7 +171,7 @@ define @intrinsic_vredmax_mask_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -194,7 +194,7 @@ define @intrinsic_vredmax_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vredmax.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define @intrinsic_vredmax_mask_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vredmax.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -240,7 +240,7 @@ define @intrinsic_vredmax_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vredmax.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -263,7 +263,7 @@ define @intrinsic_vredmax_mask_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vredmax.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -286,7 +286,7 @@ define @intrinsic_vredmax_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -309,7 +309,7 @@ define @intrinsic_vredmax_mask_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -332,7 +332,7 @@ define @intrinsic_vredmax_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ define @intrinsic_vredmax_mask_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -378,7 +378,7 @@ define @intrinsic_vredmax_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -401,7 +401,7 @@ define @intrinsic_vredmax_mask_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -424,7 +424,7 @@ define @intrinsic_vredmax_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vredmax.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -447,7 +447,7 @@ define @intrinsic_vredmax_mask_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vredmax.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -470,7 +470,7 @@ define @intrinsic_vredmax_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vredmax.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -493,7 +493,7 @@ define @intrinsic_vredmax_mask_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vredmax.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +516,7 @@ define @intrinsic_vredmax_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vredmax.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -539,7 +539,7 @@ define @intrinsic_vredmax_mask_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vredmax.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -562,7 +562,7 @@ define @intrinsic_vredmax_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -585,7 +585,7 @@ define @intrinsic_vredmax_mask_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -608,7 +608,7 @@ define @intrinsic_vredmax_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -631,7 +631,7 @@ define @intrinsic_vredmax_mask_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -654,7 +654,7 @@ define @intrinsic_vredmax_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vredmax.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -677,7 +677,7 @@ define @intrinsic_vredmax_mask_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vredmax.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -700,7 +700,7 @@ define @intrinsic_vredmax_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vredmax.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -723,7 +723,7 @@ define @intrinsic_vredmax_mask_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vredmax.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +746,7 @@ define @intrinsic_vredmax_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vredmax.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -769,7 +769,7 @@ define @intrinsic_vredmax_mask_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vredmax.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -792,7 +792,7 @@ define @intrinsic_vredmax_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -815,7 +815,7 @@ define @intrinsic_vredmax_mask_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -838,7 +838,7 @@ define @intrinsic_vredmax_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vredmax.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -861,7 +861,7 @@ define @intrinsic_vredmax_mask_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vredmax.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -884,7 +884,7 @@ define @intrinsic_vredmax_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vredmax.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -907,7 +907,7 @@ define @intrinsic_vredmax_mask_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vredmax.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -930,7 +930,7 @@ define @intrinsic_vredmax_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vredmax.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vredmax_mask_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vredmax.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vredmaxu_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -33,7 +33,7 @@ define @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -56,7 +56,7 @@ define @intrinsic_vredmaxu_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -79,7 +79,7 @@ define @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -102,7 +102,7 @@ define @intrinsic_vredmaxu_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -125,7 +125,7 @@ define @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -148,7 +148,7 @@ define @intrinsic_vredmaxu_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -171,7 +171,7 @@ define @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -194,7 +194,7 @@ define @intrinsic_vredmaxu_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -240,7 +240,7 @@ define @intrinsic_vredmaxu_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -263,7 +263,7 @@ define @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -286,7 +286,7 @@ define @intrinsic_vredmaxu_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -309,7 +309,7 @@ define @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -332,7 +332,7 @@ define @intrinsic_vredmaxu_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ define @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -378,7 +378,7 @@ define @intrinsic_vredmaxu_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -401,7 +401,7 @@ define @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -424,7 +424,7 @@ define @intrinsic_vredmaxu_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -447,7 +447,7 @@ define @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -470,7 +470,7 @@ define @intrinsic_vredmaxu_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -493,7 +493,7 @@ define @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +516,7 @@ define @intrinsic_vredmaxu_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -539,7 +539,7 @@ define @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -562,7 +562,7 @@ define @intrinsic_vredmaxu_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -585,7 +585,7 @@ define @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -608,7 +608,7 @@ define @intrinsic_vredmaxu_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -631,7 +631,7 @@ define @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -654,7 +654,7 @@ define @intrinsic_vredmaxu_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -677,7 +677,7 @@ define @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -700,7 +700,7 @@ define @intrinsic_vredmaxu_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -723,7 +723,7 @@ define @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +746,7 @@ define @intrinsic_vredmaxu_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -769,7 +769,7 @@ define @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -792,7 +792,7 @@ define @intrinsic_vredmaxu_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -815,7 +815,7 @@ define @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -838,7 +838,7 @@ define @intrinsic_vredmaxu_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -861,7 +861,7 @@ define @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -884,7 +884,7 @@ define @intrinsic_vredmaxu_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -907,7 +907,7 @@ define @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -930,7 +930,7 @@ define @intrinsic_vredmaxu_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vredmaxu_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -33,7 +33,7 @@ define @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -56,7 +56,7 @@ define @intrinsic_vredmaxu_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -79,7 +79,7 @@ define @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -102,7 +102,7 @@ define @intrinsic_vredmaxu_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -125,7 +125,7 @@ define @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -148,7 +148,7 @@ define @intrinsic_vredmaxu_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -171,7 +171,7 @@ define @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -194,7 +194,7 @@ define @intrinsic_vredmaxu_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -240,7 +240,7 @@ define @intrinsic_vredmaxu_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -263,7 +263,7 @@ define @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -286,7 +286,7 @@ define @intrinsic_vredmaxu_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -309,7 +309,7 @@ define @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -332,7 +332,7 @@ define @intrinsic_vredmaxu_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ define @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -378,7 +378,7 @@ define @intrinsic_vredmaxu_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -401,7 +401,7 @@ define @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -424,7 +424,7 @@ define @intrinsic_vredmaxu_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -447,7 +447,7 @@ define @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -470,7 +470,7 @@ define @intrinsic_vredmaxu_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -493,7 +493,7 @@ define @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +516,7 @@ define @intrinsic_vredmaxu_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -539,7 +539,7 @@ define @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -562,7 +562,7 @@ define @intrinsic_vredmaxu_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -585,7 +585,7 @@ define @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -608,7 +608,7 @@ define @intrinsic_vredmaxu_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -631,7 +631,7 @@ define @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -654,7 +654,7 @@ define @intrinsic_vredmaxu_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -677,7 +677,7 @@ define @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -700,7 +700,7 @@ define @intrinsic_vredmaxu_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -723,7 +723,7 @@ define @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +746,7 @@ define @intrinsic_vredmaxu_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -769,7 +769,7 @@ define @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -792,7 +792,7 @@ define @intrinsic_vredmaxu_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -815,7 +815,7 @@ define @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -838,7 +838,7 @@ define @intrinsic_vredmaxu_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -861,7 +861,7 @@ define @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -884,7 +884,7 @@ define @intrinsic_vredmaxu_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -907,7 +907,7 @@ define @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -930,7 +930,7 @@ define @intrinsic_vredmaxu_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vredmaxu.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vredmin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredmin-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vredmin-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredmin-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vredmin_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -33,7 +33,7 @@ define @intrinsic_vredmin_mask_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -56,7 +56,7 @@ define @intrinsic_vredmin_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -79,7 +79,7 @@ define @intrinsic_vredmin_mask_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -102,7 +102,7 @@ define @intrinsic_vredmin_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -125,7 +125,7 @@ define @intrinsic_vredmin_mask_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -148,7 +148,7 @@ define @intrinsic_vredmin_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -171,7 +171,7 @@ define @intrinsic_vredmin_mask_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -194,7 +194,7 @@ define @intrinsic_vredmin_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vredmin.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define @intrinsic_vredmin_mask_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vredmin.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -240,7 +240,7 @@ define @intrinsic_vredmin_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vredmin.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -263,7 +263,7 @@ define @intrinsic_vredmin_mask_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vredmin.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -286,7 +286,7 @@ define @intrinsic_vredmin_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -309,7 +309,7 @@ define @intrinsic_vredmin_mask_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -332,7 +332,7 @@ define @intrinsic_vredmin_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ define @intrinsic_vredmin_mask_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -378,7 +378,7 @@ define @intrinsic_vredmin_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -401,7 +401,7 @@ define @intrinsic_vredmin_mask_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -424,7 +424,7 @@ define @intrinsic_vredmin_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vredmin.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -447,7 +447,7 @@ define @intrinsic_vredmin_mask_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vredmin.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -470,7 +470,7 @@ define @intrinsic_vredmin_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vredmin.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -493,7 +493,7 @@ define @intrinsic_vredmin_mask_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vredmin.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +516,7 @@ define @intrinsic_vredmin_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vredmin.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -539,7 +539,7 @@ define @intrinsic_vredmin_mask_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vredmin.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -562,7 +562,7 @@ define @intrinsic_vredmin_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -585,7 +585,7 @@ define @intrinsic_vredmin_mask_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -608,7 +608,7 @@ define @intrinsic_vredmin_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -631,7 +631,7 @@ define @intrinsic_vredmin_mask_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -654,7 +654,7 @@ define @intrinsic_vredmin_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vredmin.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -677,7 +677,7 @@ define @intrinsic_vredmin_mask_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vredmin.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -700,7 +700,7 @@ define @intrinsic_vredmin_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vredmin.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -723,7 +723,7 @@ define @intrinsic_vredmin_mask_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vredmin.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +746,7 @@ define @intrinsic_vredmin_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vredmin.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -769,7 +769,7 @@ define @intrinsic_vredmin_mask_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vredmin.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -792,7 +792,7 @@ define @intrinsic_vredmin_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -815,7 +815,7 @@ define @intrinsic_vredmin_mask_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -838,7 +838,7 @@ define @intrinsic_vredmin_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vredmin.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -861,7 +861,7 @@ define @intrinsic_vredmin_mask_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vredmin.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -884,7 +884,7 @@ define @intrinsic_vredmin_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vredmin.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -907,7 +907,7 @@ define @intrinsic_vredmin_mask_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vredmin.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -930,7 +930,7 @@ define @intrinsic_vredmin_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vredmin.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vredmin_mask_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vredmin.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vredmin-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredmin-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vredmin-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredmin-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vredmin_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -33,7 +33,7 @@ define @intrinsic_vredmin_mask_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -56,7 +56,7 @@ define @intrinsic_vredmin_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -79,7 +79,7 @@ define @intrinsic_vredmin_mask_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -102,7 +102,7 @@ define @intrinsic_vredmin_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -125,7 +125,7 @@ define @intrinsic_vredmin_mask_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -148,7 +148,7 @@ define @intrinsic_vredmin_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -171,7 +171,7 @@ define @intrinsic_vredmin_mask_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -194,7 +194,7 @@ define @intrinsic_vredmin_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vredmin.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define @intrinsic_vredmin_mask_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vredmin.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -240,7 +240,7 @@ define @intrinsic_vredmin_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vredmin.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -263,7 +263,7 @@ define @intrinsic_vredmin_mask_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vredmin.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -286,7 +286,7 @@ define @intrinsic_vredmin_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -309,7 +309,7 @@ define @intrinsic_vredmin_mask_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -332,7 +332,7 @@ define @intrinsic_vredmin_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ define @intrinsic_vredmin_mask_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -378,7 +378,7 @@ define @intrinsic_vredmin_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -401,7 +401,7 @@ define @intrinsic_vredmin_mask_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -424,7 +424,7 @@ define @intrinsic_vredmin_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vredmin.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -447,7 +447,7 @@ define @intrinsic_vredmin_mask_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vredmin.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -470,7 +470,7 @@ define @intrinsic_vredmin_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vredmin.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -493,7 +493,7 @@ define @intrinsic_vredmin_mask_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vredmin.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +516,7 @@ define @intrinsic_vredmin_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vredmin.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -539,7 +539,7 @@ define @intrinsic_vredmin_mask_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vredmin.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -562,7 +562,7 @@ define @intrinsic_vredmin_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -585,7 +585,7 @@ define @intrinsic_vredmin_mask_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -608,7 +608,7 @@ define @intrinsic_vredmin_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -631,7 +631,7 @@ define @intrinsic_vredmin_mask_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -654,7 +654,7 @@ define @intrinsic_vredmin_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vredmin.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -677,7 +677,7 @@ define @intrinsic_vredmin_mask_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vredmin.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -700,7 +700,7 @@ define @intrinsic_vredmin_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vredmin.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -723,7 +723,7 @@ define @intrinsic_vredmin_mask_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vredmin.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +746,7 @@ define @intrinsic_vredmin_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vredmin.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -769,7 +769,7 @@ define @intrinsic_vredmin_mask_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vredmin.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -792,7 +792,7 @@ define @intrinsic_vredmin_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -815,7 +815,7 @@ define @intrinsic_vredmin_mask_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -838,7 +838,7 @@ define @intrinsic_vredmin_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vredmin.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -861,7 +861,7 @@ define @intrinsic_vredmin_mask_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vredmin.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -884,7 +884,7 @@ define @intrinsic_vredmin_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vredmin.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -907,7 +907,7 @@ define @intrinsic_vredmin_mask_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vredmin.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -930,7 +930,7 @@ define @intrinsic_vredmin_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vredmin.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vredmin_mask_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vredmin.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vredminu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredminu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vredminu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredminu-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vredminu_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -33,7 +33,7 @@ define @intrinsic_vredminu_mask_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -56,7 +56,7 @@ define @intrinsic_vredminu_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -79,7 +79,7 @@ define @intrinsic_vredminu_mask_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -102,7 +102,7 @@ define @intrinsic_vredminu_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -125,7 +125,7 @@ define @intrinsic_vredminu_mask_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -148,7 +148,7 @@ define @intrinsic_vredminu_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -171,7 +171,7 @@ define @intrinsic_vredminu_mask_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -194,7 +194,7 @@ define @intrinsic_vredminu_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vredminu.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define @intrinsic_vredminu_mask_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vredminu.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -240,7 +240,7 @@ define @intrinsic_vredminu_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vredminu.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -263,7 +263,7 @@ define @intrinsic_vredminu_mask_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vredminu.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -286,7 +286,7 @@ define @intrinsic_vredminu_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -309,7 +309,7 @@ define @intrinsic_vredminu_mask_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -332,7 +332,7 @@ define @intrinsic_vredminu_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ define @intrinsic_vredminu_mask_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -378,7 +378,7 @@ define @intrinsic_vredminu_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -401,7 +401,7 @@ define @intrinsic_vredminu_mask_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -424,7 +424,7 @@ define @intrinsic_vredminu_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vredminu.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -447,7 +447,7 @@ define @intrinsic_vredminu_mask_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vredminu.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -470,7 +470,7 @@ define @intrinsic_vredminu_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vredminu.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -493,7 +493,7 @@ define @intrinsic_vredminu_mask_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vredminu.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +516,7 @@ define @intrinsic_vredminu_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vredminu.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -539,7 +539,7 @@ define @intrinsic_vredminu_mask_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vredminu.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -562,7 +562,7 @@ define @intrinsic_vredminu_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -585,7 +585,7 @@ define @intrinsic_vredminu_mask_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -608,7 +608,7 @@ define @intrinsic_vredminu_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -631,7 +631,7 @@ define @intrinsic_vredminu_mask_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -654,7 +654,7 @@ define @intrinsic_vredminu_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vredminu.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -677,7 +677,7 @@ define @intrinsic_vredminu_mask_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vredminu.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -700,7 +700,7 @@ define @intrinsic_vredminu_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vredminu.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -723,7 +723,7 @@ define @intrinsic_vredminu_mask_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vredminu.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +746,7 @@ define @intrinsic_vredminu_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vredminu.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -769,7 +769,7 @@ define @intrinsic_vredminu_mask_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vredminu.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -792,7 +792,7 @@ define @intrinsic_vredminu_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -815,7 +815,7 @@ define @intrinsic_vredminu_mask_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -838,7 +838,7 @@ define @intrinsic_vredminu_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vredminu.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -861,7 +861,7 @@ define @intrinsic_vredminu_mask_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vredminu.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -884,7 +884,7 @@ define @intrinsic_vredminu_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vredminu.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -907,7 +907,7 @@ define @intrinsic_vredminu_mask_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vredminu.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -930,7 +930,7 @@ define @intrinsic_vredminu_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vredminu.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vredminu_mask_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vredminu.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vredminu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredminu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vredminu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredminu-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vredminu_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -33,7 +33,7 @@ define @intrinsic_vredminu_mask_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -56,7 +56,7 @@ define @intrinsic_vredminu_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -79,7 +79,7 @@ define @intrinsic_vredminu_mask_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -102,7 +102,7 @@ define @intrinsic_vredminu_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -125,7 +125,7 @@ define @intrinsic_vredminu_mask_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -148,7 +148,7 @@ define @intrinsic_vredminu_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -171,7 +171,7 @@ define @intrinsic_vredminu_mask_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -194,7 +194,7 @@ define @intrinsic_vredminu_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vredminu.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define @intrinsic_vredminu_mask_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vredminu.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -240,7 +240,7 @@ define @intrinsic_vredminu_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vredminu.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -263,7 +263,7 @@ define @intrinsic_vredminu_mask_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vredminu.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -286,7 +286,7 @@ define @intrinsic_vredminu_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -309,7 +309,7 @@ define @intrinsic_vredminu_mask_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -332,7 +332,7 @@ define @intrinsic_vredminu_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ define @intrinsic_vredminu_mask_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -378,7 +378,7 @@ define @intrinsic_vredminu_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -401,7 +401,7 @@ define @intrinsic_vredminu_mask_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -424,7 +424,7 @@ define @intrinsic_vredminu_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vredminu.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -447,7 +447,7 @@ define @intrinsic_vredminu_mask_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vredminu.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -470,7 +470,7 @@ define @intrinsic_vredminu_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vredminu.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -493,7 +493,7 @@ define @intrinsic_vredminu_mask_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vredminu.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +516,7 @@ define @intrinsic_vredminu_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vredminu.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -539,7 +539,7 @@ define @intrinsic_vredminu_mask_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vredminu.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -562,7 +562,7 @@ define @intrinsic_vredminu_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -585,7 +585,7 @@ define @intrinsic_vredminu_mask_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -608,7 +608,7 @@ define @intrinsic_vredminu_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -631,7 +631,7 @@ define @intrinsic_vredminu_mask_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -654,7 +654,7 @@ define @intrinsic_vredminu_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vredminu.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -677,7 +677,7 @@ define @intrinsic_vredminu_mask_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vredminu.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -700,7 +700,7 @@ define @intrinsic_vredminu_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vredminu.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -723,7 +723,7 @@ define @intrinsic_vredminu_mask_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vredminu.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +746,7 @@ define @intrinsic_vredminu_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vredminu.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -769,7 +769,7 @@ define @intrinsic_vredminu_mask_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vredminu.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -792,7 +792,7 @@ define @intrinsic_vredminu_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -815,7 +815,7 @@ define @intrinsic_vredminu_mask_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -838,7 +838,7 @@ define @intrinsic_vredminu_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vredminu.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -861,7 +861,7 @@ define @intrinsic_vredminu_mask_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vredminu.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -884,7 +884,7 @@ define @intrinsic_vredminu_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vredminu.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -907,7 +907,7 @@ define @intrinsic_vredminu_mask_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vredminu.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -930,7 +930,7 @@ define @intrinsic_vredminu_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vredminu.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vredminu_mask_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vredminu.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vredor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredor-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vredor-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredor-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vredor_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vredor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -33,7 +33,7 @@ define @intrinsic_vredor_mask_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -56,7 +56,7 @@ define @intrinsic_vredor_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vredor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -79,7 +79,7 @@ define @intrinsic_vredor_mask_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -102,7 +102,7 @@ define @intrinsic_vredor_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vredor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -125,7 +125,7 @@ define @intrinsic_vredor_mask_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -148,7 +148,7 @@ define @intrinsic_vredor_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vredor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -171,7 +171,7 @@ define @intrinsic_vredor_mask_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -194,7 +194,7 @@ define @intrinsic_vredor_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vredor.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define @intrinsic_vredor_mask_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vredor.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -240,7 +240,7 @@ define @intrinsic_vredor_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vredor.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -263,7 +263,7 @@ define @intrinsic_vredor_mask_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vredor.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -286,7 +286,7 @@ define @intrinsic_vredor_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vredor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -309,7 +309,7 @@ define @intrinsic_vredor_mask_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -332,7 +332,7 @@ define @intrinsic_vredor_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vredor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ define @intrinsic_vredor_mask_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -378,7 +378,7 @@ define @intrinsic_vredor_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vredor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -401,7 +401,7 @@ define @intrinsic_vredor_mask_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -424,7 +424,7 @@ define @intrinsic_vredor_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vredor.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -447,7 +447,7 @@ define @intrinsic_vredor_mask_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vredor.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -470,7 +470,7 @@ define @intrinsic_vredor_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vredor.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -493,7 +493,7 @@ define @intrinsic_vredor_mask_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vredor.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +516,7 @@ define @intrinsic_vredor_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vredor.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -539,7 +539,7 @@ define @intrinsic_vredor_mask_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vredor.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -562,7 +562,7 @@ define @intrinsic_vredor_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vredor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -585,7 +585,7 @@ define @intrinsic_vredor_mask_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -608,7 +608,7 @@ define @intrinsic_vredor_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vredor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -631,7 +631,7 @@ define @intrinsic_vredor_mask_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -654,7 +654,7 @@ define @intrinsic_vredor_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vredor.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -677,7 +677,7 @@ define @intrinsic_vredor_mask_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vredor.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -700,7 +700,7 @@ define @intrinsic_vredor_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vredor.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -723,7 +723,7 @@ define @intrinsic_vredor_mask_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vredor.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +746,7 @@ define @intrinsic_vredor_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vredor.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -769,7 +769,7 @@ define @intrinsic_vredor_mask_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vredor.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -792,7 +792,7 @@ define @intrinsic_vredor_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vredor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -815,7 +815,7 @@ define @intrinsic_vredor_mask_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -838,7 +838,7 @@ define @intrinsic_vredor_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vredor.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -861,7 +861,7 @@ define @intrinsic_vredor_mask_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vredor.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -884,7 +884,7 @@ define @intrinsic_vredor_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vredor.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -907,7 +907,7 @@ define @intrinsic_vredor_mask_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vredor.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -930,7 +930,7 @@ define @intrinsic_vredor_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vredor.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vredor_mask_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vredor.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vredor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredor-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vredor-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredor-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vredor_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vredor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -33,7 +33,7 @@ define @intrinsic_vredor_mask_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -56,7 +56,7 @@ define @intrinsic_vredor_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vredor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -79,7 +79,7 @@ define @intrinsic_vredor_mask_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -102,7 +102,7 @@ define @intrinsic_vredor_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vredor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -125,7 +125,7 @@ define @intrinsic_vredor_mask_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -148,7 +148,7 @@ define @intrinsic_vredor_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vredor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -171,7 +171,7 @@ define @intrinsic_vredor_mask_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -194,7 +194,7 @@ define @intrinsic_vredor_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vredor.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define @intrinsic_vredor_mask_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vredor.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -240,7 +240,7 @@ define @intrinsic_vredor_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vredor.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -263,7 +263,7 @@ define @intrinsic_vredor_mask_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vredor.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -286,7 +286,7 @@ define @intrinsic_vredor_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vredor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -309,7 +309,7 @@ define @intrinsic_vredor_mask_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -332,7 +332,7 @@ define @intrinsic_vredor_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vredor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ define @intrinsic_vredor_mask_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -378,7 +378,7 @@ define @intrinsic_vredor_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vredor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -401,7 +401,7 @@ define @intrinsic_vredor_mask_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -424,7 +424,7 @@ define @intrinsic_vredor_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vredor.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -447,7 +447,7 @@ define @intrinsic_vredor_mask_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vredor.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -470,7 +470,7 @@ define @intrinsic_vredor_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vredor.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -493,7 +493,7 @@ define @intrinsic_vredor_mask_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vredor.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +516,7 @@ define @intrinsic_vredor_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vredor.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -539,7 +539,7 @@ define @intrinsic_vredor_mask_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vredor.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -562,7 +562,7 @@ define @intrinsic_vredor_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vredor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -585,7 +585,7 @@ define @intrinsic_vredor_mask_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -608,7 +608,7 @@ define @intrinsic_vredor_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vredor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -631,7 +631,7 @@ define @intrinsic_vredor_mask_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -654,7 +654,7 @@ define @intrinsic_vredor_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vredor.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -677,7 +677,7 @@ define @intrinsic_vredor_mask_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vredor.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -700,7 +700,7 @@ define @intrinsic_vredor_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vredor.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -723,7 +723,7 @@ define @intrinsic_vredor_mask_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vredor.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +746,7 @@ define @intrinsic_vredor_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vredor.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -769,7 +769,7 @@ define @intrinsic_vredor_mask_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vredor.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -792,7 +792,7 @@ define @intrinsic_vredor_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vredor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -815,7 +815,7 @@ define @intrinsic_vredor_mask_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -838,7 +838,7 @@ define @intrinsic_vredor_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vredor.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -861,7 +861,7 @@ define @intrinsic_vredor_mask_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vredor.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -884,7 +884,7 @@ define @intrinsic_vredor_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vredor.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -907,7 +907,7 @@ define @intrinsic_vredor_mask_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vredor.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -930,7 +930,7 @@ define @intrinsic_vredor_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vredor.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vredor_mask_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vredor.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vredsum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredsum-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vredsum-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredsum-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vredsum_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -33,7 +33,7 @@ define @intrinsic_vredsum_mask_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -56,7 +56,7 @@ define @intrinsic_vredsum_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -79,7 +79,7 @@ define @intrinsic_vredsum_mask_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -102,7 +102,7 @@ define @intrinsic_vredsum_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -125,7 +125,7 @@ define @intrinsic_vredsum_mask_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -148,7 +148,7 @@ define @intrinsic_vredsum_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -171,7 +171,7 @@ define @intrinsic_vredsum_mask_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -194,7 +194,7 @@ define @intrinsic_vredsum_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vredsum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define @intrinsic_vredsum_mask_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vredsum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -240,7 +240,7 @@ define @intrinsic_vredsum_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vredsum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -263,7 +263,7 @@ define @intrinsic_vredsum_mask_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vredsum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -286,7 +286,7 @@ define @intrinsic_vredsum_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -309,7 +309,7 @@ define @intrinsic_vredsum_mask_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -332,7 +332,7 @@ define @intrinsic_vredsum_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ define @intrinsic_vredsum_mask_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -378,7 +378,7 @@ define @intrinsic_vredsum_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -401,7 +401,7 @@ define @intrinsic_vredsum_mask_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -424,7 +424,7 @@ define @intrinsic_vredsum_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vredsum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -447,7 +447,7 @@ define @intrinsic_vredsum_mask_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vredsum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -470,7 +470,7 @@ define @intrinsic_vredsum_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vredsum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -493,7 +493,7 @@ define @intrinsic_vredsum_mask_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vredsum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +516,7 @@ define @intrinsic_vredsum_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vredsum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -539,7 +539,7 @@ define @intrinsic_vredsum_mask_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vredsum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -562,7 +562,7 @@ define @intrinsic_vredsum_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -585,7 +585,7 @@ define @intrinsic_vredsum_mask_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -608,7 +608,7 @@ define @intrinsic_vredsum_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -631,7 +631,7 @@ define @intrinsic_vredsum_mask_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -654,7 +654,7 @@ define @intrinsic_vredsum_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vredsum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -677,7 +677,7 @@ define @intrinsic_vredsum_mask_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vredsum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -700,7 +700,7 @@ define @intrinsic_vredsum_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vredsum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -723,7 +723,7 @@ define @intrinsic_vredsum_mask_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vredsum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +746,7 @@ define @intrinsic_vredsum_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vredsum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -769,7 +769,7 @@ define @intrinsic_vredsum_mask_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vredsum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -792,7 +792,7 @@ define @intrinsic_vredsum_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -815,7 +815,7 @@ define @intrinsic_vredsum_mask_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -838,7 +838,7 @@ define @intrinsic_vredsum_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vredsum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -861,7 +861,7 @@ define @intrinsic_vredsum_mask_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vredsum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -884,7 +884,7 @@ define @intrinsic_vredsum_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vredsum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -907,7 +907,7 @@ define @intrinsic_vredsum_mask_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vredsum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -930,7 +930,7 @@ define @intrinsic_vredsum_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vredsum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vredsum_mask_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vredsum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vredsum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredsum-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vredsum-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredsum-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vredsum_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -33,7 +33,7 @@ define @intrinsic_vredsum_mask_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -56,7 +56,7 @@ define @intrinsic_vredsum_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -79,7 +79,7 @@ define @intrinsic_vredsum_mask_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -102,7 +102,7 @@ define @intrinsic_vredsum_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -125,7 +125,7 @@ define @intrinsic_vredsum_mask_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -148,7 +148,7 @@ define @intrinsic_vredsum_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -171,7 +171,7 @@ define @intrinsic_vredsum_mask_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -194,7 +194,7 @@ define @intrinsic_vredsum_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vredsum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define @intrinsic_vredsum_mask_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vredsum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -240,7 +240,7 @@ define @intrinsic_vredsum_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vredsum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -263,7 +263,7 @@ define @intrinsic_vredsum_mask_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vredsum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -286,7 +286,7 @@ define @intrinsic_vredsum_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -309,7 +309,7 @@ define @intrinsic_vredsum_mask_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -332,7 +332,7 @@ define @intrinsic_vredsum_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ define @intrinsic_vredsum_mask_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -378,7 +378,7 @@ define @intrinsic_vredsum_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -401,7 +401,7 @@ define @intrinsic_vredsum_mask_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -424,7 +424,7 @@ define @intrinsic_vredsum_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vredsum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -447,7 +447,7 @@ define @intrinsic_vredsum_mask_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vredsum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -470,7 +470,7 @@ define @intrinsic_vredsum_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vredsum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -493,7 +493,7 @@ define @intrinsic_vredsum_mask_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vredsum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +516,7 @@ define @intrinsic_vredsum_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vredsum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -539,7 +539,7 @@ define @intrinsic_vredsum_mask_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vredsum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -562,7 +562,7 @@ define @intrinsic_vredsum_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -585,7 +585,7 @@ define @intrinsic_vredsum_mask_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -608,7 +608,7 @@ define @intrinsic_vredsum_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -631,7 +631,7 @@ define @intrinsic_vredsum_mask_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -654,7 +654,7 @@ define @intrinsic_vredsum_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vredsum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -677,7 +677,7 @@ define @intrinsic_vredsum_mask_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vredsum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -700,7 +700,7 @@ define @intrinsic_vredsum_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vredsum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -723,7 +723,7 @@ define @intrinsic_vredsum_mask_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vredsum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +746,7 @@ define @intrinsic_vredsum_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vredsum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -769,7 +769,7 @@ define @intrinsic_vredsum_mask_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vredsum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -792,7 +792,7 @@ define @intrinsic_vredsum_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -815,7 +815,7 @@ define @intrinsic_vredsum_mask_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -838,7 +838,7 @@ define @intrinsic_vredsum_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vredsum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -861,7 +861,7 @@ define @intrinsic_vredsum_mask_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vredsum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -884,7 +884,7 @@ define @intrinsic_vredsum_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vredsum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -907,7 +907,7 @@ define @intrinsic_vredsum_mask_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vredsum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -930,7 +930,7 @@ define @intrinsic_vredsum_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vredsum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vredsum_mask_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vredsum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll @@ -9,9 +9,9 @@ define half @vreduce_fadd_nxv1f16( %v, half %s) { ; CHECK-LABEL: vreduce_fadd_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -22,9 +22,9 @@ define half @vreduce_ord_fadd_nxv1f16( %v, half %s) { ; CHECK-LABEL: vreduce_ord_fadd_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfredosum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -37,9 +37,9 @@ define half @vreduce_fadd_nxv2f16( %v, half %s) { ; CHECK-LABEL: vreduce_fadd_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -50,9 +50,9 @@ define half @vreduce_ord_fadd_nxv2f16( %v, half %s) { ; CHECK-LABEL: vreduce_ord_fadd_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfredosum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -65,9 +65,9 @@ define half @vreduce_fadd_nxv4f16( %v, half %s) { ; CHECK-LABEL: vreduce_fadd_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -78,9 +78,9 @@ define half @vreduce_ord_fadd_nxv4f16( %v, half %s) { ; CHECK-LABEL: vreduce_ord_fadd_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfredosum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -93,9 +93,9 @@ define float @vreduce_fadd_nxv1f32( %v, float %s) { ; CHECK-LABEL: vreduce_fadd_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -106,9 +106,9 @@ define float @vreduce_ord_fadd_nxv1f32( %v, float %s) { ; CHECK-LABEL: vreduce_ord_fadd_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfredosum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -119,11 +119,11 @@ define float @vreduce_fwadd_nxv1f32( %v, float %s) { ; CHECK-LABEL: vreduce_fwadd_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfwredusum.vs v8, v8, v9 -; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %e = fpext %v to @@ -134,11 +134,11 @@ define float @vreduce_ord_fwadd_nxv1f32( %v, float %s) { ; CHECK-LABEL: vreduce_ord_fwadd_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfwredosum.vs v8, v8, v9 -; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %e = fpext %v to @@ -151,9 +151,9 @@ define float @vreduce_fadd_nxv2f32( %v, float %s) { ; CHECK-LABEL: vreduce_fadd_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -164,9 +164,9 @@ define float @vreduce_ord_fadd_nxv2f32( %v, float %s) { ; CHECK-LABEL: vreduce_ord_fadd_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfredosum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -177,11 +177,11 @@ define float @vreduce_fwadd_nxv2f32( %v, float %s) { ; CHECK-LABEL: vreduce_fwadd_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfwredusum.vs v8, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %e = fpext %v to @@ -192,11 +192,11 @@ define float @vreduce_ord_fwadd_nxv2f32( %v, float %s) { ; CHECK-LABEL: vreduce_ord_fwadd_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfwredosum.vs v8, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %e = fpext %v to @@ -209,9 +209,9 @@ define float @vreduce_fadd_nxv4f32( %v, float %s) { ; CHECK-LABEL: vreduce_fadd_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v10, fa0 -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -222,9 +222,9 @@ define float @vreduce_ord_fadd_nxv4f32( %v, float %s) { ; CHECK-LABEL: vreduce_ord_fadd_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v10, fa0 -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfredosum.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -235,11 +235,11 @@ define float @vreduce_fwadd_nxv4f32( %v, float %s) { ; CHECK-LABEL: vreduce_fwadd_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfwredusum.vs v8, v8, v9 -; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %e = fpext %v to @@ -250,11 +250,11 @@ define float @vreduce_ord_fwadd_nxv4f32( %v, float %s) { ; CHECK-LABEL: vreduce_ord_fwadd_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfwredosum.vs v8, v8, v9 -; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %e = fpext %v to @@ -267,9 +267,9 @@ define double @vreduce_fadd_nxv1f64( %v, double %s) { ; CHECK-LABEL: vreduce_fadd_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -280,9 +280,9 @@ define double @vreduce_ord_fadd_nxv1f64( %v, double %s) { ; CHECK-LABEL: vreduce_ord_fadd_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfredosum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -293,11 +293,11 @@ define double @vreduce_fwadd_nxv1f64( %v, double %s) { ; CHECK-LABEL: vreduce_fwadd_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwredusum.vs v8, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %e = fpext %v to @@ -308,11 +308,11 @@ define double @vreduce_ord_fwadd_nxv1f64( %v, double %s) { ; CHECK-LABEL: vreduce_ord_fwadd_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfwredosum.vs v8, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %e = fpext %v to @@ -325,9 +325,9 @@ define double @vreduce_fadd_nxv2f64( %v, double %s) { ; CHECK-LABEL: vreduce_fadd_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v10, fa0 -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -338,9 +338,9 @@ define double @vreduce_ord_fadd_nxv2f64( %v, double %s) { ; CHECK-LABEL: vreduce_ord_fadd_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v10, fa0 -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vfredosum.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -351,11 +351,11 @@ define double @vreduce_fwadd_nxv2f64( %v, double %s) { ; CHECK-LABEL: vreduce_fwadd_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwredusum.vs v8, v8, v9 -; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %e = fpext %v to @@ -366,11 +366,11 @@ define double @vreduce_ord_fwadd_nxv2f64( %v, double %s) { ; CHECK-LABEL: vreduce_ord_fwadd_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfwredosum.vs v8, v8, v9 -; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %e = fpext %v to @@ -383,9 +383,9 @@ define double @vreduce_fadd_nxv4f64( %v, double %s) { ; CHECK-LABEL: vreduce_fadd_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v12, fa0 -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v8, v12 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -396,9 +396,9 @@ define double @vreduce_ord_fadd_nxv4f64( %v, double %s) { ; CHECK-LABEL: vreduce_ord_fadd_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v12, fa0 -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vfredosum.vs v8, v8, v12 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -409,11 +409,11 @@ define double @vreduce_fwadd_nxv4f64( %v, double %s) { ; CHECK-LABEL: vreduce_fwadd_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v10, fa0 -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfwredusum.vs v8, v8, v10 -; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %e = fpext %v to @@ -424,11 +424,11 @@ define double @vreduce_ord_fwadd_nxv4f64( %v, double %s) { ; CHECK-LABEL: vreduce_ord_fwadd_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v10, fa0 -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfwredosum.vs v8, v8, v10 -; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %e = fpext %v to @@ -443,9 +443,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI30_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI30_0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vlse16.v v9, (a0), zero -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -458,9 +458,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI31_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI31_0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vlse16.v v9, (a0), zero -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -473,9 +473,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI32_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI32_0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vlse16.v v9, (a0), zero -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -490,9 +490,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI33_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI33_0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vlse16.v v9, (a0), zero -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -507,9 +507,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI34_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI34_0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vlse16.v v9, (a0), zero -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -522,13 +522,13 @@ define half @vreduce_fmin_nxv64f16( %v) { ; CHECK-LABEL: vreduce_fmin_nxv64f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v16 ; CHECK-NEXT: lui a0, %hi(.LCPI35_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI35_0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vlse16.v v16, (a0), zero -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -543,9 +543,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI36_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI36_0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vlse32.v v9, (a0), zero -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -558,9 +558,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI37_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI37_0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vlse32.v v9, (a0), zero -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -573,9 +573,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI38_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI38_0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vlse32.v v9, (a0), zero -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -590,9 +590,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI39_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI39_0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vlse32.v v9, (a0), zero -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -607,9 +607,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI40_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI40_0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vlse32.v v10, (a0), zero -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -622,13 +622,13 @@ define float @vreduce_fmin_nxv32f32( %v) { ; CHECK-LABEL: vreduce_fmin_nxv32f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v16 ; CHECK-NEXT: lui a0, %hi(.LCPI41_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI41_0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vlse32.v v16, (a0), zero -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -643,9 +643,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI42_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI42_0) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v9, (a0), zero -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -658,9 +658,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI43_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI43_0) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v9, (a0), zero -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -673,9 +673,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI44_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI44_0) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v9, (a0), zero -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -690,9 +690,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI45_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI45_0) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -707,9 +707,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI46_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI46_0) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v8, v12 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -722,13 +722,13 @@ define double @vreduce_fmin_nxv16f64( %v) { ; CHECK-LABEL: vreduce_fmin_nxv16f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfmin.vv v8, v8, v16 ; CHECK-NEXT: lui a0, %hi(.LCPI47_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI47_0) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -743,9 +743,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI48_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI48_0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vlse16.v v9, (a0), zero -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -758,9 +758,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI49_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI49_0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vlse16.v v9, (a0), zero -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -773,9 +773,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI50_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI50_0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vlse16.v v9, (a0), zero -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -790,9 +790,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI51_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI51_0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vlse16.v v9, (a0), zero -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -807,9 +807,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI52_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI52_0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vlse16.v v9, (a0), zero -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -822,13 +822,13 @@ define half @vreduce_fmax_nxv64f16( %v) { ; CHECK-LABEL: vreduce_fmax_nxv64f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v16 ; CHECK-NEXT: lui a0, %hi(.LCPI53_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI53_0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vlse16.v v16, (a0), zero -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -843,9 +843,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI54_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI54_0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vlse32.v v9, (a0), zero -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -858,9 +858,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI55_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI55_0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vlse32.v v9, (a0), zero -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -873,9 +873,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI56_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI56_0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vlse32.v v9, (a0), zero -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -890,9 +890,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI57_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI57_0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vlse32.v v9, (a0), zero -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -907,9 +907,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI58_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI58_0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vlse32.v v10, (a0), zero -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -922,13 +922,13 @@ define float @vreduce_fmax_nxv32f32( %v) { ; CHECK-LABEL: vreduce_fmax_nxv32f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v16 ; CHECK-NEXT: lui a0, %hi(.LCPI59_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI59_0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vlse32.v v16, (a0), zero -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -943,9 +943,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI60_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI60_0) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v9, (a0), zero -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -958,9 +958,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI61_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI61_0) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v9, (a0), zero -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -973,9 +973,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI62_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI62_0) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v9, (a0), zero -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -990,9 +990,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI63_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI63_0) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -1007,9 +1007,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI64_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI64_0) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v8, v12 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -1022,13 +1022,13 @@ define double @vreduce_fmax_nxv16f64( %v) { ; CHECK-LABEL: vreduce_fmax_nxv16f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfmax.vv v8, v8, v16 ; CHECK-NEXT: lui a0, %hi(.LCPI65_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI65_0) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -1039,9 +1039,9 @@ define float @vreduce_nsz_fadd_nxv1f32( %v, float %s) { ; CHECK-LABEL: vreduce_nsz_fadd_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -1062,13 +1062,13 @@ ; CHECK-NEXT: add a0, a1, a0 ; CHECK-NEXT: fmv.h.x ft0, zero ; CHECK-NEXT: fneg.h ft0, ft0 -; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma ; CHECK-NEXT: vfmv.v.f v9, ft0 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vslideup.vx v8, v9, a1 -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfredosum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -1086,13 +1086,13 @@ ; CHECK-NEXT: add a1, a0, a0 ; CHECK-NEXT: fmv.h.x ft0, zero ; CHECK-NEXT: fneg.h ft0, ft0 -; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma ; CHECK-NEXT: vfmv.v.f v10, ft0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma ; CHECK-NEXT: vslideup.vx v9, v10, a0 -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v10, fa0 -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfredosum.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -1110,17 +1110,17 @@ ; CHECK-NEXT: add a1, a0, a0 ; CHECK-NEXT: fmv.h.x ft0, zero ; CHECK-NEXT: fneg.h ft0, ft0 -; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma ; CHECK-NEXT: vfmv.v.f v12, ft0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma ; CHECK-NEXT: vslideup.vx v10, v12, a0 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vslideup.vi v11, v12, 0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma ; CHECK-NEXT: vslideup.vx v11, v12, a0 -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v12, fa0 -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfredosum.vs v8, v8, v12 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -1133,13 +1133,13 @@ define half @vreduce_ord_fadd_nxv12f16( %v, half %s) { ; CHECK-LABEL: vreduce_ord_fadd_nxv12f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v12, fa0 ; CHECK-NEXT: fmv.h.x ft0, zero ; CHECK-NEXT: fneg.h ft0, ft0 -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfmv.v.f v11, ft0 -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfredosum.vs v8, v8, v12 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -1158,13 +1158,13 @@ ; CHECK-NEXT: add a0, a1, a0 ; CHECK-NEXT: fmv.h.x ft0, zero ; CHECK-NEXT: fneg.h ft0, ft0 -; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma ; CHECK-NEXT: vfmv.v.f v9, ft0 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vslideup.vx v8, v9, a1 -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -1180,13 +1180,13 @@ ; CHECK-NEXT: add a1, a0, a0 ; CHECK-NEXT: fmv.h.x ft0, zero ; CHECK-NEXT: fneg.h ft0, ft0 -; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma ; CHECK-NEXT: vfmv.v.f v10, ft0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma ; CHECK-NEXT: vslideup.vx v9, v10, a0 -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v10, fa0 -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfredusum.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -1204,17 +1204,17 @@ ; CHECK-NEXT: flh ft0, %lo(.LCPI73_0)(a1) ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: add a1, a0, a0 -; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma ; CHECK-NEXT: vfmv.v.f v12, ft0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma ; CHECK-NEXT: vslideup.vx v10, v12, a0 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vslideup.vi v11, v12, 0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma ; CHECK-NEXT: vslideup.vx v11, v12, a0 -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v12, ft0 -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfredmin.vs v8, v8, v12 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -1229,11 +1229,11 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, %hi(.LCPI74_0) ; CHECK-NEXT: flh ft0, %lo(.LCPI74_0)(a0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v12, ft0 -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfmv.v.f v11, ft0 -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v8, v12 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll @@ -9,9 +9,9 @@ define half @vpreduce_fadd_nxv1f16(half %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 ; CHECK-NEXT: ret @@ -22,9 +22,9 @@ define half @vpreduce_ord_fadd_nxv1f16(half %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_ord_fadd_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 ; CHECK-NEXT: ret @@ -37,9 +37,9 @@ define half @vpreduce_fadd_nxv2f16(half %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 ; CHECK-NEXT: ret @@ -50,9 +50,9 @@ define half @vpreduce_ord_fadd_nxv2f16(half %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_ord_fadd_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 ; CHECK-NEXT: ret @@ -65,9 +65,9 @@ define half @vpreduce_fadd_nxv4f16(half %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 ; CHECK-NEXT: ret @@ -78,9 +78,9 @@ define half @vpreduce_ord_fadd_nxv4f16(half %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_ord_fadd_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 ; CHECK-NEXT: ret @@ -95,7 +95,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: srli a1, a2, 1 -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: slli a2, a2, 2 ; CHECK-NEXT: vfmv.s.f v25, fa0 ; CHECK-NEXT: mv a3, a0 @@ -104,19 +104,19 @@ ; CHECK-NEXT: mv a3, a2 ; CHECK-NEXT: .LBB6_2: ; CHECK-NEXT: li a4, 0 -; CHECK-NEXT: vsetvli a5, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a5, zero, e8, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v24, v0, a1 -; CHECK-NEXT: vsetvli zero, a3, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a3, e16, m8, tu, ma ; CHECK-NEXT: vfredusum.vs v25, v8, v25, v0.t ; CHECK-NEXT: vfmv.f.s ft0, v25 -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: sub a1, a0, a2 ; CHECK-NEXT: vfmv.s.f v8, ft0 ; CHECK-NEXT: bltu a0, a1, .LBB6_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a4, a1 ; CHECK-NEXT: .LBB6_4: -; CHECK-NEXT: vsetvli zero, a4, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a4, e16, m8, tu, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfredusum.vs v8, v16, v8, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -130,7 +130,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: srli a1, a2, 1 -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: slli a2, a2, 2 ; CHECK-NEXT: vfmv.s.f v25, fa0 ; CHECK-NEXT: mv a3, a0 @@ -139,19 +139,19 @@ ; CHECK-NEXT: mv a3, a2 ; CHECK-NEXT: .LBB7_2: ; CHECK-NEXT: li a4, 0 -; CHECK-NEXT: vsetvli a5, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a5, zero, e8, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v24, v0, a1 -; CHECK-NEXT: vsetvli zero, a3, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a3, e16, m8, tu, ma ; CHECK-NEXT: vfredosum.vs v25, v8, v25, v0.t ; CHECK-NEXT: vfmv.f.s ft0, v25 -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: sub a1, a0, a2 ; CHECK-NEXT: vfmv.s.f v8, ft0 ; CHECK-NEXT: bltu a0, a1, .LBB7_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a4, a1 ; CHECK-NEXT: .LBB7_4: -; CHECK-NEXT: vsetvli zero, a4, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a4, e16, m8, tu, ma ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: vfredosum.vs v8, v16, v8, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -165,9 +165,9 @@ define float @vpreduce_fadd_nxv1f32(float %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 ; CHECK-NEXT: ret @@ -178,9 +178,9 @@ define float @vpreduce_ord_fadd_nxv1f32(float %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_ord_fadd_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 ; CHECK-NEXT: ret @@ -193,9 +193,9 @@ define float @vpreduce_fadd_nxv2f32(float %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 ; CHECK-NEXT: ret @@ -206,9 +206,9 @@ define float @vpreduce_ord_fadd_nxv2f32(float %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_ord_fadd_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 ; CHECK-NEXT: ret @@ -221,9 +221,9 @@ define float @vpreduce_fadd_nxv4f32(float %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v10, fa0 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfredusum.vs v10, v8, v10, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v10 ; CHECK-NEXT: ret @@ -234,9 +234,9 @@ define float @vpreduce_ord_fadd_nxv4f32(float %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_ord_fadd_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v10, fa0 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vfredosum.vs v10, v8, v10, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v10 ; CHECK-NEXT: ret @@ -249,9 +249,9 @@ define double @vpreduce_fadd_nxv1f64(double %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 ; CHECK-NEXT: ret @@ -262,9 +262,9 @@ define double @vpreduce_ord_fadd_nxv1f64(double %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_ord_fadd_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v9 ; CHECK-NEXT: ret @@ -277,9 +277,9 @@ define double @vpreduce_fadd_nxv2f64(double %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v10, fa0 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vfredusum.vs v10, v8, v10, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v10 ; CHECK-NEXT: ret @@ -290,9 +290,9 @@ define double @vpreduce_ord_fadd_nxv2f64(double %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_ord_fadd_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v10, fa0 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vfredosum.vs v10, v8, v10, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v10 ; CHECK-NEXT: ret @@ -305,9 +305,9 @@ define double @vpreduce_fadd_nxv3f64(double %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_nxv3f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v12, fa0 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vfredusum.vs v12, v8, v12, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v12 ; CHECK-NEXT: ret @@ -318,9 +318,9 @@ define double @vpreduce_ord_fadd_nxv3f64(double %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_ord_fadd_nxv3f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v12, fa0 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vfredosum.vs v12, v8, v12, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v12 ; CHECK-NEXT: ret @@ -333,9 +333,9 @@ define double @vpreduce_fadd_nxv4f64(double %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v12, fa0 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vfredusum.vs v12, v8, v12, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v12 ; CHECK-NEXT: ret @@ -346,9 +346,9 @@ define double @vpreduce_ord_fadd_nxv4f64(double %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_ord_fadd_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v12, fa0 -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vfredosum.vs v12, v8, v12, v0.t ; CHECK-NEXT: vfmv.f.s fa0, v12 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll @@ -9,9 +9,9 @@ define signext i8 @vpreduce_add_nxv1i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -25,9 +25,9 @@ ; CHECK-LABEL: vpreduce_umax_nxv1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: andi a0, a0, 255 -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma ; CHECK-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -40,9 +40,9 @@ define signext i8 @vpreduce_smax_nxv1i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -56,9 +56,9 @@ ; CHECK-LABEL: vpreduce_umin_nxv1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: andi a0, a0, 255 -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma ; CHECK-NEXT: vredminu.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -71,9 +71,9 @@ define signext i8 @vpreduce_smin_nxv1i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -86,9 +86,9 @@ define signext i8 @vpreduce_and_nxv1i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -101,9 +101,9 @@ define signext i8 @vpreduce_or_nxv1i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -116,9 +116,9 @@ define signext i8 @vpreduce_xor_nxv1i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -131,9 +131,9 @@ define signext i8 @vpreduce_add_nxv2i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -147,9 +147,9 @@ ; CHECK-LABEL: vpreduce_umax_nxv2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: andi a0, a0, 255 -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma ; CHECK-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -162,9 +162,9 @@ define signext i8 @vpreduce_smax_nxv2i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -178,9 +178,9 @@ ; CHECK-LABEL: vpreduce_umin_nxv2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: andi a0, a0, 255 -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma ; CHECK-NEXT: vredminu.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -193,9 +193,9 @@ define signext i8 @vpreduce_smin_nxv2i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -208,9 +208,9 @@ define signext i8 @vpreduce_and_nxv2i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -223,9 +223,9 @@ define signext i8 @vpreduce_or_nxv2i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -238,9 +238,9 @@ define signext i8 @vpreduce_xor_nxv2i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -253,9 +253,9 @@ define signext i8 @vpreduce_smax_nxv3i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv3i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -268,9 +268,9 @@ define signext i8 @vpreduce_add_nxv4i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -284,9 +284,9 @@ ; CHECK-LABEL: vpreduce_umax_nxv4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: andi a0, a0, 255 -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma ; CHECK-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -299,9 +299,9 @@ define signext i8 @vpreduce_smax_nxv4i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -315,9 +315,9 @@ ; CHECK-LABEL: vpreduce_umin_nxv4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: andi a0, a0, 255 -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma ; CHECK-NEXT: vredminu.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -330,9 +330,9 @@ define signext i8 @vpreduce_smin_nxv4i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -345,9 +345,9 @@ define signext i8 @vpreduce_and_nxv4i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -360,9 +360,9 @@ define signext i8 @vpreduce_or_nxv4i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -375,9 +375,9 @@ define signext i8 @vpreduce_xor_nxv4i8(i8 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -390,9 +390,9 @@ define signext i16 @vpreduce_add_nxv1i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -407,9 +407,9 @@ ; RV32: # %bb.0: ; RV32-NEXT: slli a0, a0, 16 ; RV32-NEXT: srli a0, a0, 16 -; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV32-NEXT: vmv.s.x v9, a0 -; RV32-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; RV32-NEXT: vsetvli zero, a1, e16, mf4, tu, ma ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: ret @@ -418,9 +418,9 @@ ; RV64: # %bb.0: ; RV64-NEXT: slli a0, a0, 48 ; RV64-NEXT: srli a0, a0, 48 -; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; RV64-NEXT: vsetvli zero, a1, e16, mf4, tu, ma ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -433,9 +433,9 @@ define signext i16 @vpreduce_smax_nxv1i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -450,9 +450,9 @@ ; RV32: # %bb.0: ; RV32-NEXT: slli a0, a0, 16 ; RV32-NEXT: srli a0, a0, 16 -; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV32-NEXT: vmv.s.x v9, a0 -; RV32-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; RV32-NEXT: vsetvli zero, a1, e16, mf4, tu, ma ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: ret @@ -461,9 +461,9 @@ ; RV64: # %bb.0: ; RV64-NEXT: slli a0, a0, 48 ; RV64-NEXT: srli a0, a0, 48 -; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; RV64-NEXT: vsetvli zero, a1, e16, mf4, tu, ma ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -476,9 +476,9 @@ define signext i16 @vpreduce_smin_nxv1i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -491,9 +491,9 @@ define signext i16 @vpreduce_and_nxv1i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -506,9 +506,9 @@ define signext i16 @vpreduce_or_nxv1i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -521,9 +521,9 @@ define signext i16 @vpreduce_xor_nxv1i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -536,9 +536,9 @@ define signext i16 @vpreduce_add_nxv2i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -553,9 +553,9 @@ ; RV32: # %bb.0: ; RV32-NEXT: slli a0, a0, 16 ; RV32-NEXT: srli a0, a0, 16 -; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV32-NEXT: vmv.s.x v9, a0 -; RV32-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; RV32-NEXT: vsetvli zero, a1, e16, mf2, tu, ma ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: ret @@ -564,9 +564,9 @@ ; RV64: # %bb.0: ; RV64-NEXT: slli a0, a0, 48 ; RV64-NEXT: srli a0, a0, 48 -; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; RV64-NEXT: vsetvli zero, a1, e16, mf2, tu, ma ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -579,9 +579,9 @@ define signext i16 @vpreduce_smax_nxv2i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -596,9 +596,9 @@ ; RV32: # %bb.0: ; RV32-NEXT: slli a0, a0, 16 ; RV32-NEXT: srli a0, a0, 16 -; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV32-NEXT: vmv.s.x v9, a0 -; RV32-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; RV32-NEXT: vsetvli zero, a1, e16, mf2, tu, ma ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: ret @@ -607,9 +607,9 @@ ; RV64: # %bb.0: ; RV64-NEXT: slli a0, a0, 48 ; RV64-NEXT: srli a0, a0, 48 -; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; RV64-NEXT: vsetvli zero, a1, e16, mf2, tu, ma ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -622,9 +622,9 @@ define signext i16 @vpreduce_smin_nxv2i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -637,9 +637,9 @@ define signext i16 @vpreduce_and_nxv2i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -652,9 +652,9 @@ define signext i16 @vpreduce_or_nxv2i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -667,9 +667,9 @@ define signext i16 @vpreduce_xor_nxv2i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -682,9 +682,9 @@ define signext i16 @vpreduce_add_nxv4i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -699,9 +699,9 @@ ; RV32: # %bb.0: ; RV32-NEXT: slli a0, a0, 16 ; RV32-NEXT: srli a0, a0, 16 -; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV32-NEXT: vmv.s.x v9, a0 -; RV32-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; RV32-NEXT: vsetvli zero, a1, e16, m1, tu, ma ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: ret @@ -710,9 +710,9 @@ ; RV64: # %bb.0: ; RV64-NEXT: slli a0, a0, 48 ; RV64-NEXT: srli a0, a0, 48 -; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; RV64-NEXT: vsetvli zero, a1, e16, m1, tu, ma ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -725,9 +725,9 @@ define signext i16 @vpreduce_smax_nxv4i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -742,9 +742,9 @@ ; RV32: # %bb.0: ; RV32-NEXT: slli a0, a0, 16 ; RV32-NEXT: srli a0, a0, 16 -; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV32-NEXT: vmv.s.x v9, a0 -; RV32-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; RV32-NEXT: vsetvli zero, a1, e16, m1, tu, ma ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: ret @@ -753,9 +753,9 @@ ; RV64: # %bb.0: ; RV64-NEXT: slli a0, a0, 48 ; RV64-NEXT: srli a0, a0, 48 -; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; RV64-NEXT: vsetvli zero, a1, e16, m1, tu, ma ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -768,9 +768,9 @@ define signext i16 @vpreduce_smin_nxv4i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -783,9 +783,9 @@ define signext i16 @vpreduce_and_nxv4i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -798,9 +798,9 @@ define signext i16 @vpreduce_or_nxv4i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -813,9 +813,9 @@ define signext i16 @vpreduce_xor_nxv4i16(i16 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -828,9 +828,9 @@ define signext i32 @vpreduce_add_nxv1i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -843,9 +843,9 @@ define signext i32 @vpreduce_umax_nxv1i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umax_nxv1i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32-NEXT: vmv.s.x v9, a0 -; RV32-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; RV32-NEXT: vsetvli zero, a1, e32, mf2, tu, ma ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: ret @@ -854,9 +854,9 @@ ; RV64: # %bb.0: ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: srli a0, a0, 32 -; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; RV64-NEXT: vsetvli zero, a1, e32, mf2, tu, ma ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -869,9 +869,9 @@ define signext i32 @vpreduce_smax_nxv1i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -884,9 +884,9 @@ define signext i32 @vpreduce_umin_nxv1i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umin_nxv1i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32-NEXT: vmv.s.x v9, a0 -; RV32-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; RV32-NEXT: vsetvli zero, a1, e32, mf2, tu, ma ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: ret @@ -895,9 +895,9 @@ ; RV64: # %bb.0: ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: srli a0, a0, 32 -; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; RV64-NEXT: vsetvli zero, a1, e32, mf2, tu, ma ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -910,9 +910,9 @@ define signext i32 @vpreduce_smin_nxv1i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -925,9 +925,9 @@ define signext i32 @vpreduce_and_nxv1i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -940,9 +940,9 @@ define signext i32 @vpreduce_or_nxv1i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -955,9 +955,9 @@ define signext i32 @vpreduce_xor_nxv1i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -970,9 +970,9 @@ define signext i32 @vpreduce_add_nxv2i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; CHECK-NEXT: vredsum.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -985,9 +985,9 @@ define signext i32 @vpreduce_umax_nxv2i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umax_nxv2i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32-NEXT: vmv.s.x v9, a0 -; RV32-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; RV32-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: ret @@ -996,9 +996,9 @@ ; RV64: # %bb.0: ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: srli a0, a0, 32 -; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; RV64-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -1011,9 +1011,9 @@ define signext i32 @vpreduce_smax_nxv2i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; CHECK-NEXT: vredmax.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -1026,9 +1026,9 @@ define signext i32 @vpreduce_umin_nxv2i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umin_nxv2i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32-NEXT: vmv.s.x v9, a0 -; RV32-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; RV32-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: ret @@ -1037,9 +1037,9 @@ ; RV64: # %bb.0: ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: srli a0, a0, 32 -; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; RV64-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -1052,9 +1052,9 @@ define signext i32 @vpreduce_smin_nxv2i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; CHECK-NEXT: vredmin.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -1067,9 +1067,9 @@ define signext i32 @vpreduce_and_nxv2i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; CHECK-NEXT: vredand.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -1082,9 +1082,9 @@ define signext i32 @vpreduce_or_nxv2i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; CHECK-NEXT: vredor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -1097,9 +1097,9 @@ define signext i32 @vpreduce_xor_nxv2i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; CHECK-NEXT: vredxor.vs v9, v8, v9, v0.t ; CHECK-NEXT: vmv.x.s a0, v9 ; CHECK-NEXT: ret @@ -1112,9 +1112,9 @@ define signext i32 @vpreduce_add_nxv4i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_add_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma ; CHECK-NEXT: vredsum.vs v10, v8, v10, v0.t ; CHECK-NEXT: vmv.x.s a0, v10 ; CHECK-NEXT: ret @@ -1127,9 +1127,9 @@ define signext i32 @vpreduce_umax_nxv4i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umax_nxv4i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32-NEXT: vmv.s.x v10, a0 -; RV32-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; RV32-NEXT: vsetvli zero, a1, e32, m2, tu, ma ; RV32-NEXT: vredmaxu.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 ; RV32-NEXT: ret @@ -1138,9 +1138,9 @@ ; RV64: # %bb.0: ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: srli a0, a0, 32 -; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, a0 -; RV64-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; RV64-NEXT: vsetvli zero, a1, e32, m2, tu, ma ; RV64-NEXT: vredmaxu.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 ; RV64-NEXT: ret @@ -1155,7 +1155,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: csrr a3, vlenb ; RV32-NEXT: srli a2, a3, 2 -; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32-NEXT: slli a3, a3, 1 ; RV32-NEXT: vmv.s.x v25, a0 ; RV32-NEXT: mv a0, a1 @@ -1164,19 +1164,19 @@ ; RV32-NEXT: mv a0, a3 ; RV32-NEXT: .LBB67_2: ; RV32-NEXT: li a4, 0 -; RV32-NEXT: vsetvli a5, zero, e8, mf2, ta, mu +; RV32-NEXT: vsetvli a5, zero, e8, mf2, ta, ma ; RV32-NEXT: vslidedown.vx v24, v0, a2 -; RV32-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; RV32-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; RV32-NEXT: vredmaxu.vs v25, v8, v25, v0.t ; RV32-NEXT: vmv.x.s a2, v25 -; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32-NEXT: sub a0, a1, a3 ; RV32-NEXT: vmv.s.x v8, a2 ; RV32-NEXT: bltu a1, a0, .LBB67_4 ; RV32-NEXT: # %bb.3: ; RV32-NEXT: mv a4, a0 ; RV32-NEXT: .LBB67_4: -; RV32-NEXT: vsetvli zero, a4, e32, m8, tu, mu +; RV32-NEXT: vsetvli zero, a4, e32, m8, tu, ma ; RV32-NEXT: vmv1r.v v0, v24 ; RV32-NEXT: vredmaxu.vs v8, v16, v8, v0.t ; RV32-NEXT: vmv.x.s a0, v8 @@ -1195,21 +1195,21 @@ ; RV64-NEXT: mv a4, a0 ; RV64-NEXT: .LBB67_2: ; RV64-NEXT: li a5, 0 -; RV64-NEXT: vsetvli a6, zero, e8, mf2, ta, mu +; RV64-NEXT: vsetvli a6, zero, e8, mf2, ta, ma ; RV64-NEXT: vslidedown.vx v24, v0, a2 -; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-NEXT: vmv.s.x v25, a3 -; RV64-NEXT: vsetvli zero, a4, e32, m8, tu, mu +; RV64-NEXT: vsetvli zero, a4, e32, m8, tu, ma ; RV64-NEXT: vredmaxu.vs v25, v8, v25, v0.t ; RV64-NEXT: vmv.x.s a2, v25 -; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-NEXT: sub a0, a1, a0 ; RV64-NEXT: vmv.s.x v8, a2 ; RV64-NEXT: bltu a1, a0, .LBB67_4 ; RV64-NEXT: # %bb.3: ; RV64-NEXT: mv a5, a0 ; RV64-NEXT: .LBB67_4: -; RV64-NEXT: vsetvli zero, a5, e32, m8, tu, mu +; RV64-NEXT: vsetvli zero, a5, e32, m8, tu, ma ; RV64-NEXT: vmv1r.v v0, v24 ; RV64-NEXT: vredmaxu.vs v8, v16, v8, v0.t ; RV64-NEXT: vmv.x.s a0, v8 @@ -1223,9 +1223,9 @@ define signext i32 @vpreduce_smax_nxv4i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma ; CHECK-NEXT: vredmax.vs v10, v8, v10, v0.t ; CHECK-NEXT: vmv.x.s a0, v10 ; CHECK-NEXT: ret @@ -1238,9 +1238,9 @@ define signext i32 @vpreduce_umin_nxv4i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umin_nxv4i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32-NEXT: vmv.s.x v10, a0 -; RV32-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; RV32-NEXT: vsetvli zero, a1, e32, m2, tu, ma ; RV32-NEXT: vredminu.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 ; RV32-NEXT: ret @@ -1249,9 +1249,9 @@ ; RV64: # %bb.0: ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: srli a0, a0, 32 -; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, a0 -; RV64-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; RV64-NEXT: vsetvli zero, a1, e32, m2, tu, ma ; RV64-NEXT: vredminu.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 ; RV64-NEXT: ret @@ -1264,9 +1264,9 @@ define signext i32 @vpreduce_smin_nxv4i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smin_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma ; CHECK-NEXT: vredmin.vs v10, v8, v10, v0.t ; CHECK-NEXT: vmv.x.s a0, v10 ; CHECK-NEXT: ret @@ -1279,9 +1279,9 @@ define signext i32 @vpreduce_and_nxv4i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma ; CHECK-NEXT: vredand.vs v10, v8, v10, v0.t ; CHECK-NEXT: vmv.x.s a0, v10 ; CHECK-NEXT: ret @@ -1294,9 +1294,9 @@ define signext i32 @vpreduce_or_nxv4i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_or_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma ; CHECK-NEXT: vredor.vs v10, v8, v10, v0.t ; CHECK-NEXT: vmv.x.s a0, v10 ; CHECK-NEXT: ret @@ -1309,9 +1309,9 @@ define signext i32 @vpreduce_xor_nxv4i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_xor_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma ; CHECK-NEXT: vredxor.vs v10, v8, v10, v0.t ; CHECK-NEXT: vmv.x.s a0, v10 ; CHECK-NEXT: ret @@ -1329,13 +1329,13 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, ma ; RV32-NEXT: vredsum.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -1343,9 +1343,9 @@ ; ; RV64-LABEL: vpreduce_add_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma ; RV64-NEXT: vredsum.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -1361,14 +1361,14 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e32, mf2, tu, mu +; RV32-NEXT: vsetvli zero, a2, e32, mf2, tu, ma ; RV32-NEXT: vwredsum.vs v9, v8, v9, v0.t -; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -1376,11 +1376,11 @@ ; ; RV64-LABEL: vpwreduce_add_nxv1i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; RV64-NEXT: vsetvli zero, a1, e32, mf2, tu, ma ; RV64-NEXT: vwredsum.vs v9, v8, v9, v0.t -; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret %e = sext %v to @@ -1396,14 +1396,14 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e32, mf2, tu, mu +; RV32-NEXT: vsetvli zero, a2, e32, mf2, tu, ma ; RV32-NEXT: vwredsum.vs v9, v8, v9, v0.t -; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -1411,11 +1411,11 @@ ; ; RV64-LABEL: vpwreduce_uadd_nxv1i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; RV64-NEXT: vsetvli zero, a1, e32, mf2, tu, ma ; RV64-NEXT: vwredsum.vs v9, v8, v9, v0.t -; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret %e = sext %v to @@ -1433,13 +1433,13 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, ma ; RV32-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -1447,9 +1447,9 @@ ; ; RV64-LABEL: vpreduce_umax_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma ; RV64-NEXT: vredmaxu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -1467,13 +1467,13 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, ma ; RV32-NEXT: vredmax.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -1481,9 +1481,9 @@ ; ; RV64-LABEL: vpreduce_smax_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma ; RV64-NEXT: vredmax.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -1501,13 +1501,13 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, ma ; RV32-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -1515,9 +1515,9 @@ ; ; RV64-LABEL: vpreduce_umin_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma ; RV64-NEXT: vredminu.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -1535,13 +1535,13 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, ma ; RV32-NEXT: vredmin.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -1549,9 +1549,9 @@ ; ; RV64-LABEL: vpreduce_smin_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma ; RV64-NEXT: vredmin.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -1569,13 +1569,13 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, ma ; RV32-NEXT: vredand.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -1583,9 +1583,9 @@ ; ; RV64-LABEL: vpreduce_and_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma ; RV64-NEXT: vredand.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -1603,13 +1603,13 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, ma ; RV32-NEXT: vredor.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -1617,9 +1617,9 @@ ; ; RV64-LABEL: vpreduce_or_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma ; RV64-NEXT: vredor.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -1637,13 +1637,13 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, ma ; RV32-NEXT: vredxor.vs v9, v8, v9, v0.t ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -1651,9 +1651,9 @@ ; ; RV64-LABEL: vpreduce_xor_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma ; RV64-NEXT: vredxor.vs v9, v8, v9, v0.t ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret @@ -1671,13 +1671,13 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, ma ; RV32-NEXT: vredsum.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -1685,9 +1685,9 @@ ; ; RV64-LABEL: vpreduce_add_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, ma ; RV64-NEXT: vredsum.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 ; RV64-NEXT: ret @@ -1703,14 +1703,14 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e32, m1, tu, mu +; RV32-NEXT: vsetvli zero, a2, e32, m1, tu, ma ; RV32-NEXT: vwredsum.vs v9, v8, v9, v0.t -; RV32-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -1718,11 +1718,11 @@ ; ; RV64-LABEL: vwpreduce_add_nxv2i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; RV64-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; RV64-NEXT: vwredsum.vs v9, v8, v9, v0.t -; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret %e = sext %v to @@ -1738,14 +1738,14 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e32, m1, tu, mu +; RV32-NEXT: vsetvli zero, a2, e32, m1, tu, ma ; RV32-NEXT: vwredsum.vs v9, v8, v9, v0.t -; RV32-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; RV32-NEXT: vmv.x.s a0, v9 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v9, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -1753,11 +1753,11 @@ ; ; RV64-LABEL: vwpreduce_uadd_nxv2i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; RV64-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; RV64-NEXT: vwredsum.vs v9, v8, v9, v0.t -; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; RV64-NEXT: vmv.x.s a0, v9 ; RV64-NEXT: ret %e = sext %v to @@ -1775,13 +1775,13 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, ma ; RV32-NEXT: vredmaxu.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -1789,9 +1789,9 @@ ; ; RV64-LABEL: vpreduce_umax_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, ma ; RV64-NEXT: vredmaxu.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 ; RV64-NEXT: ret @@ -1809,13 +1809,13 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, ma ; RV32-NEXT: vredmax.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -1823,9 +1823,9 @@ ; ; RV64-LABEL: vpreduce_smax_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, ma ; RV64-NEXT: vredmax.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 ; RV64-NEXT: ret @@ -1843,13 +1843,13 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, ma ; RV32-NEXT: vredminu.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -1857,9 +1857,9 @@ ; ; RV64-LABEL: vpreduce_umin_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, ma ; RV64-NEXT: vredminu.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 ; RV64-NEXT: ret @@ -1877,13 +1877,13 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, ma ; RV32-NEXT: vredmin.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -1891,9 +1891,9 @@ ; ; RV64-LABEL: vpreduce_smin_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, ma ; RV64-NEXT: vredmin.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 ; RV64-NEXT: ret @@ -1911,13 +1911,13 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, ma ; RV32-NEXT: vredand.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -1925,9 +1925,9 @@ ; ; RV64-LABEL: vpreduce_and_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, ma ; RV64-NEXT: vredand.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 ; RV64-NEXT: ret @@ -1945,13 +1945,13 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, ma ; RV32-NEXT: vredor.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -1959,9 +1959,9 @@ ; ; RV64-LABEL: vpreduce_or_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, ma ; RV64-NEXT: vredor.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 ; RV64-NEXT: ret @@ -1979,13 +1979,13 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, ma ; RV32-NEXT: vredxor.vs v10, v8, v10, v0.t ; RV32-NEXT: vmv.x.s a0, v10 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -1993,9 +1993,9 @@ ; ; RV64-LABEL: vpreduce_xor_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, ma ; RV64-NEXT: vredxor.vs v10, v8, v10, v0.t ; RV64-NEXT: vmv.x.s a0, v10 ; RV64-NEXT: ret @@ -2013,13 +2013,13 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, ma ; RV32-NEXT: vredsum.vs v12, v8, v12, v0.t ; RV32-NEXT: vmv.x.s a0, v12 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v12, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -2027,9 +2027,9 @@ ; ; RV64-LABEL: vpreduce_add_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v12, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, ma ; RV64-NEXT: vredsum.vs v12, v8, v12, v0.t ; RV64-NEXT: vmv.x.s a0, v12 ; RV64-NEXT: ret @@ -2045,14 +2045,14 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e32, m2, tu, mu +; RV32-NEXT: vsetvli zero, a2, e32, m2, tu, ma ; RV32-NEXT: vwredsum.vs v10, v8, v10, v0.t -; RV32-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; RV32-NEXT: vmv.x.s a0, v10 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -2060,11 +2060,11 @@ ; ; RV64-LABEL: vpwreduce_add_nxv4i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, a0 -; RV64-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; RV64-NEXT: vsetvli zero, a1, e32, m2, tu, ma ; RV64-NEXT: vwredsum.vs v10, v8, v10, v0.t -; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; RV64-NEXT: vmv.x.s a0, v10 ; RV64-NEXT: ret %e = sext %v to @@ -2080,14 +2080,14 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e32, m2, tu, mu +; RV32-NEXT: vsetvli zero, a2, e32, m2, tu, ma ; RV32-NEXT: vwredsumu.vs v10, v8, v10, v0.t -; RV32-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; RV32-NEXT: vmv.x.s a0, v10 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v10, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -2095,11 +2095,11 @@ ; ; RV64-LABEL: vpwreduce_uadd_nxv4i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, a0 -; RV64-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; RV64-NEXT: vsetvli zero, a1, e32, m2, tu, ma ; RV64-NEXT: vwredsumu.vs v10, v8, v10, v0.t -; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; RV64-NEXT: vmv.x.s a0, v10 ; RV64-NEXT: ret %e = zext %v to @@ -2117,13 +2117,13 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, ma ; RV32-NEXT: vredmaxu.vs v12, v8, v12, v0.t ; RV32-NEXT: vmv.x.s a0, v12 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v12, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -2131,9 +2131,9 @@ ; ; RV64-LABEL: vpreduce_umax_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v12, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, ma ; RV64-NEXT: vredmaxu.vs v12, v8, v12, v0.t ; RV64-NEXT: vmv.x.s a0, v12 ; RV64-NEXT: ret @@ -2151,13 +2151,13 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, ma ; RV32-NEXT: vredmax.vs v12, v8, v12, v0.t ; RV32-NEXT: vmv.x.s a0, v12 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v12, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -2165,9 +2165,9 @@ ; ; RV64-LABEL: vpreduce_smax_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v12, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, ma ; RV64-NEXT: vredmax.vs v12, v8, v12, v0.t ; RV64-NEXT: vmv.x.s a0, v12 ; RV64-NEXT: ret @@ -2185,13 +2185,13 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, ma ; RV32-NEXT: vredminu.vs v12, v8, v12, v0.t ; RV32-NEXT: vmv.x.s a0, v12 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v12, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -2199,9 +2199,9 @@ ; ; RV64-LABEL: vpreduce_umin_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v12, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, ma ; RV64-NEXT: vredminu.vs v12, v8, v12, v0.t ; RV64-NEXT: vmv.x.s a0, v12 ; RV64-NEXT: ret @@ -2219,13 +2219,13 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, ma ; RV32-NEXT: vredmin.vs v12, v8, v12, v0.t ; RV32-NEXT: vmv.x.s a0, v12 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v12, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -2233,9 +2233,9 @@ ; ; RV64-LABEL: vpreduce_smin_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v12, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, ma ; RV64-NEXT: vredmin.vs v12, v8, v12, v0.t ; RV64-NEXT: vmv.x.s a0, v12 ; RV64-NEXT: ret @@ -2253,13 +2253,13 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, ma ; RV32-NEXT: vredand.vs v12, v8, v12, v0.t ; RV32-NEXT: vmv.x.s a0, v12 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v12, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -2267,9 +2267,9 @@ ; ; RV64-LABEL: vpreduce_and_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v12, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, ma ; RV64-NEXT: vredand.vs v12, v8, v12, v0.t ; RV64-NEXT: vmv.x.s a0, v12 ; RV64-NEXT: ret @@ -2287,13 +2287,13 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, ma ; RV32-NEXT: vredor.vs v12, v8, v12, v0.t ; RV32-NEXT: vmv.x.s a0, v12 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v12, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -2301,9 +2301,9 @@ ; ; RV64-LABEL: vpreduce_or_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v12, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, ma ; RV64-NEXT: vredor.vs v12, v8, v12, v0.t ; RV64-NEXT: vmv.x.s a0, v12 ; RV64-NEXT: ret @@ -2321,13 +2321,13 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, ma ; RV32-NEXT: vredxor.vs v12, v8, v12, v0.t ; RV32-NEXT: vmv.x.s a0, v12 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v12, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -2335,9 +2335,9 @@ ; ; RV64-LABEL: vpreduce_xor_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v12, a0 -; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, ma ; RV64-NEXT: vredxor.vs v12, v8, v12, v0.t ; RV64-NEXT: vmv.x.s a0, v12 ; RV64-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-int.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-int.ll --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-int.ll @@ -9,9 +9,9 @@ define signext i8 @vreduce_add_nxv1i8( %v) { ; CHECK-LABEL: vreduce_add_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -24,9 +24,9 @@ define signext i8 @vreduce_umax_nxv1i8( %v) { ; CHECK-LABEL: vreduce_umax_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -40,9 +40,9 @@ ; CHECK-LABEL: vreduce_smax_nxv1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -128 -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -55,9 +55,9 @@ define signext i8 @vreduce_umin_nxv1i8( %v) { ; CHECK-LABEL: vreduce_umin_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, -1 -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -71,9 +71,9 @@ ; CHECK-LABEL: vreduce_smin_nxv1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 127 -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vredmin.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -86,9 +86,9 @@ define signext i8 @vreduce_and_nxv1i8( %v) { ; CHECK-LABEL: vreduce_and_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, -1 -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -101,9 +101,9 @@ define signext i8 @vreduce_or_nxv1i8( %v) { ; CHECK-LABEL: vreduce_or_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -116,9 +116,9 @@ define signext i8 @vreduce_xor_nxv1i8( %v) { ; CHECK-LABEL: vreduce_xor_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -131,9 +131,9 @@ define signext i8 @vreduce_add_nxv2i8( %v) { ; CHECK-LABEL: vreduce_add_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -146,9 +146,9 @@ define signext i8 @vreduce_umax_nxv2i8( %v) { ; CHECK-LABEL: vreduce_umax_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -162,9 +162,9 @@ ; CHECK-LABEL: vreduce_smax_nxv2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -128 -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -177,9 +177,9 @@ define signext i8 @vreduce_umin_nxv2i8( %v) { ; CHECK-LABEL: vreduce_umin_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, -1 -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -193,9 +193,9 @@ ; CHECK-LABEL: vreduce_smin_nxv2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 127 -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vredmin.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -208,9 +208,9 @@ define signext i8 @vreduce_and_nxv2i8( %v) { ; CHECK-LABEL: vreduce_and_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, -1 -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -223,9 +223,9 @@ define signext i8 @vreduce_or_nxv2i8( %v) { ; CHECK-LABEL: vreduce_or_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -238,9 +238,9 @@ define signext i8 @vreduce_xor_nxv2i8( %v) { ; CHECK-LABEL: vreduce_xor_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -253,9 +253,9 @@ define signext i8 @vreduce_add_nxv4i8( %v) { ; CHECK-LABEL: vreduce_add_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -268,9 +268,9 @@ define signext i8 @vreduce_umax_nxv4i8( %v) { ; CHECK-LABEL: vreduce_umax_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -284,9 +284,9 @@ ; CHECK-LABEL: vreduce_smax_nxv4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -128 -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -299,9 +299,9 @@ define signext i8 @vreduce_umin_nxv4i8( %v) { ; CHECK-LABEL: vreduce_umin_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, -1 -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -315,9 +315,9 @@ ; CHECK-LABEL: vreduce_smin_nxv4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 127 -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vredmin.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -330,9 +330,9 @@ define signext i8 @vreduce_and_nxv4i8( %v) { ; CHECK-LABEL: vreduce_and_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, -1 -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -345,9 +345,9 @@ define signext i8 @vreduce_or_nxv4i8( %v) { ; CHECK-LABEL: vreduce_or_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -360,9 +360,9 @@ define signext i8 @vreduce_xor_nxv4i8( %v) { ; CHECK-LABEL: vreduce_xor_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -375,9 +375,9 @@ define signext i16 @vreduce_add_nxv1i16( %v) { ; CHECK-LABEL: vreduce_add_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -388,11 +388,11 @@ define signext i16 @vwreduce_add_nxv1i8( %v) { ; CHECK-LABEL: vwreduce_add_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v8, v9 -; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %e = sext %v to @@ -403,11 +403,11 @@ define signext i16 @vwreduce_uadd_nxv1i8( %v) { ; CHECK-LABEL: vwreduce_uadd_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v8, v9 -; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %e = sext %v to @@ -420,9 +420,9 @@ define signext i16 @vreduce_umax_nxv1i16( %v) { ; CHECK-LABEL: vreduce_umax_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -436,9 +436,9 @@ ; CHECK-LABEL: vreduce_smax_nxv1i16: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, 1048568 -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -451,9 +451,9 @@ define signext i16 @vreduce_umin_nxv1i16( %v) { ; CHECK-LABEL: vreduce_umin_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, -1 -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -468,9 +468,9 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 8 ; RV32-NEXT: addi a0, a0, -1 -; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV32-NEXT: vmv.s.x v9, a0 -; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; RV32-NEXT: vredmin.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: ret @@ -479,9 +479,9 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 8 ; RV64-NEXT: addiw a0, a0, -1 -; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; RV64-NEXT: vredmin.vs v8, v8, v9 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -494,9 +494,9 @@ define signext i16 @vreduce_and_nxv1i16( %v) { ; CHECK-LABEL: vreduce_and_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, -1 -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -509,9 +509,9 @@ define signext i16 @vreduce_or_nxv1i16( %v) { ; CHECK-LABEL: vreduce_or_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -524,9 +524,9 @@ define signext i16 @vreduce_xor_nxv1i16( %v) { ; CHECK-LABEL: vreduce_xor_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -539,9 +539,9 @@ define signext i16 @vreduce_add_nxv2i16( %v) { ; CHECK-LABEL: vreduce_add_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -552,11 +552,11 @@ define signext i16 @vwreduce_add_nxv2i8( %v) { ; CHECK-LABEL: vwreduce_add_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v8, v9 -; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %e = sext %v to @@ -567,11 +567,11 @@ define signext i16 @vwreduce_uadd_nxv2i8( %v) { ; CHECK-LABEL: vwreduce_uadd_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v8, v9 -; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m1, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %e = sext %v to @@ -584,9 +584,9 @@ define signext i16 @vreduce_umax_nxv2i16( %v) { ; CHECK-LABEL: vreduce_umax_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -600,9 +600,9 @@ ; CHECK-LABEL: vreduce_smax_nxv2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, 1048568 -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -615,9 +615,9 @@ define signext i16 @vreduce_umin_nxv2i16( %v) { ; CHECK-LABEL: vreduce_umin_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, -1 -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -632,9 +632,9 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 8 ; RV32-NEXT: addi a0, a0, -1 -; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV32-NEXT: vmv.s.x v9, a0 -; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; RV32-NEXT: vredmin.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: ret @@ -643,9 +643,9 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 8 ; RV64-NEXT: addiw a0, a0, -1 -; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; RV64-NEXT: vredmin.vs v8, v8, v9 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -658,9 +658,9 @@ define signext i16 @vreduce_and_nxv2i16( %v) { ; CHECK-LABEL: vreduce_and_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, -1 -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -673,9 +673,9 @@ define signext i16 @vreduce_or_nxv2i16( %v) { ; CHECK-LABEL: vreduce_or_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -688,9 +688,9 @@ define signext i16 @vreduce_xor_nxv2i16( %v) { ; CHECK-LABEL: vreduce_xor_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -703,9 +703,9 @@ define signext i16 @vreduce_add_nxv4i16( %v) { ; CHECK-LABEL: vreduce_add_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -716,11 +716,11 @@ define signext i16 @vwreduce_add_nxv4i8( %v) { ; CHECK-LABEL: vwreduce_add_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %e = sext %v to @@ -731,11 +731,11 @@ define signext i16 @vwreduce_uadd_nxv4i8( %v) { ; CHECK-LABEL: vwreduce_uadd_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %e = sext %v to @@ -748,9 +748,9 @@ define signext i16 @vreduce_umax_nxv4i16( %v) { ; CHECK-LABEL: vreduce_umax_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -764,9 +764,9 @@ ; CHECK-LABEL: vreduce_smax_nxv4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, 1048568 -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -779,9 +779,9 @@ define signext i16 @vreduce_umin_nxv4i16( %v) { ; CHECK-LABEL: vreduce_umin_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, -1 -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -796,9 +796,9 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 8 ; RV32-NEXT: addi a0, a0, -1 -; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV32-NEXT: vmv.s.x v9, a0 -; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; RV32-NEXT: vredmin.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: ret @@ -807,9 +807,9 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 8 ; RV64-NEXT: addiw a0, a0, -1 -; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; RV64-NEXT: vredmin.vs v8, v8, v9 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -822,9 +822,9 @@ define signext i16 @vreduce_and_nxv4i16( %v) { ; CHECK-LABEL: vreduce_and_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, -1 -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -837,9 +837,9 @@ define signext i16 @vreduce_or_nxv4i16( %v) { ; CHECK-LABEL: vreduce_or_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -852,9 +852,9 @@ define signext i16 @vreduce_xor_nxv4i16( %v) { ; CHECK-LABEL: vreduce_xor_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -867,9 +867,9 @@ define signext i32 @vreduce_add_nxv1i32( %v) { ; CHECK-LABEL: vreduce_add_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -880,11 +880,11 @@ define signext i32 @vwreduce_add_nxv1i16( %v) { ; CHECK-LABEL: vwreduce_add_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v8, v9 -; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %e = sext %v to @@ -895,11 +895,11 @@ define signext i32 @vwreduce_uadd_nxv1i16( %v) { ; CHECK-LABEL: vwreduce_uadd_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v8, v9 -; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %e = zext %v to @@ -912,9 +912,9 @@ define signext i32 @vreduce_umax_nxv1i32( %v) { ; CHECK-LABEL: vreduce_umax_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -928,9 +928,9 @@ ; CHECK-LABEL: vreduce_smax_nxv1i32: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, 524288 -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -943,9 +943,9 @@ define signext i32 @vreduce_umin_nxv1i32( %v) { ; CHECK-LABEL: vreduce_umin_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, -1 -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -960,9 +960,9 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: addi a0, a0, -1 -; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32-NEXT: vmv.s.x v9, a0 -; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; RV32-NEXT: vredmin.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: ret @@ -971,9 +971,9 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 524288 ; RV64-NEXT: addiw a0, a0, -1 -; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; RV64-NEXT: vredmin.vs v8, v8, v9 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -986,9 +986,9 @@ define signext i32 @vreduce_and_nxv1i32( %v) { ; CHECK-LABEL: vreduce_and_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, -1 -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -1001,9 +1001,9 @@ define signext i32 @vreduce_or_nxv1i32( %v) { ; CHECK-LABEL: vreduce_or_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -1016,9 +1016,9 @@ define signext i32 @vreduce_xor_nxv1i32( %v) { ; CHECK-LABEL: vreduce_xor_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -1031,9 +1031,9 @@ define signext i32 @vreduce_add_nxv2i32( %v) { ; CHECK-LABEL: vreduce_add_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vredsum.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -1044,11 +1044,11 @@ define signext i32 @vwreduce_add_nxv2i16( %v) { ; CHECK-LABEL: vwreduce_add_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %e = sext %v to @@ -1059,11 +1059,11 @@ define signext i32 @vwreduce_uadd_nxv2i16( %v) { ; CHECK-LABEL: vwreduce_uadd_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %e = zext %v to @@ -1076,9 +1076,9 @@ define signext i32 @vreduce_umax_nxv2i32( %v) { ; CHECK-LABEL: vreduce_umax_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -1092,9 +1092,9 @@ ; CHECK-LABEL: vreduce_smax_nxv2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, 524288 -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vredmax.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -1107,9 +1107,9 @@ define signext i32 @vreduce_umin_nxv2i32( %v) { ; CHECK-LABEL: vreduce_umin_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, -1 -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vredminu.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -1124,9 +1124,9 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: addi a0, a0, -1 -; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32-NEXT: vmv.s.x v9, a0 -; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV32-NEXT: vredmin.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: ret @@ -1135,9 +1135,9 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 524288 ; RV64-NEXT: addiw a0, a0, -1 -; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV64-NEXT: vredmin.vs v8, v8, v9 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -1150,9 +1150,9 @@ define signext i32 @vreduce_and_nxv2i32( %v) { ; CHECK-LABEL: vreduce_and_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, -1 -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vredand.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -1165,9 +1165,9 @@ define signext i32 @vreduce_or_nxv2i32( %v) { ; CHECK-LABEL: vreduce_or_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vredor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -1180,9 +1180,9 @@ define signext i32 @vreduce_xor_nxv2i32( %v) { ; CHECK-LABEL: vreduce_xor_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vredxor.vs v8, v8, v9 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -1195,9 +1195,9 @@ define signext i32 @vreduce_add_nxv4i32( %v) { ; CHECK-LABEL: vreduce_add_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, zero -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vredsum.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -1208,11 +1208,11 @@ define signext i32 @vwreduce_add_nxv4i16( %v) { ; CHECK-LABEL: vwreduce_add_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vwredsum.vs v8, v8, v9 -; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %e = sext %v to @@ -1223,11 +1223,11 @@ define signext i32 @vwreduce_uadd_nxv4i16( %v) { ; CHECK-LABEL: vwreduce_uadd_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, zero -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vwredsumu.vs v8, v8, v9 -; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %e = zext %v to @@ -1240,9 +1240,9 @@ define signext i32 @vreduce_umax_nxv4i32( %v) { ; CHECK-LABEL: vreduce_umax_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, zero -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vredmaxu.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -1256,9 +1256,9 @@ ; CHECK-LABEL: vreduce_smax_nxv4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, 524288 -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vredmax.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -1271,9 +1271,9 @@ define signext i32 @vreduce_umin_nxv4i32( %v) { ; CHECK-LABEL: vreduce_umin_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v10, -1 -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vredminu.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -1288,9 +1288,9 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 524288 ; RV32-NEXT: addi a0, a0, -1 -; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32-NEXT: vmv.s.x v10, a0 -; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; RV32-NEXT: vredmin.vs v8, v8, v10 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: ret @@ -1299,9 +1299,9 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 524288 ; RV64-NEXT: addiw a0, a0, -1 -; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, a0 -; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; RV64-NEXT: vredmin.vs v8, v8, v10 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -1314,9 +1314,9 @@ define signext i32 @vreduce_and_nxv4i32( %v) { ; CHECK-LABEL: vreduce_and_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v10, -1 -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vredand.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -1329,9 +1329,9 @@ define signext i32 @vreduce_or_nxv4i32( %v) { ; CHECK-LABEL: vreduce_or_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, zero -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vredor.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -1344,9 +1344,9 @@ define signext i32 @vreduce_xor_nxv4i32( %v) { ; CHECK-LABEL: vreduce_xor_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v10, zero -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vredxor.vs v8, v8, v10 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret @@ -1359,22 +1359,22 @@ define i64 @vreduce_add_nxv1i64( %v) { ; RV32-LABEL: vreduce_add_nxv1i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.s.x v9, zero -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV32-NEXT: vredsum.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_add_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, zero -; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV64-NEXT: vredsum.vs v8, v8, v9 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -1385,25 +1385,25 @@ define i64 @vwreduce_add_nxv1i32( %v) { ; RV32-LABEL: vwreduce_add_nxv1i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.s.x v9, zero -; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; RV32-NEXT: vwredsum.vs v8, v8, v9 -; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vwreduce_add_nxv1i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, zero -; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; RV64-NEXT: vwredsum.vs v8, v8, v9 -; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %e = sext %v to @@ -1414,25 +1414,25 @@ define i64 @vwreduce_uadd_nxv1i32( %v) { ; RV32-LABEL: vwreduce_uadd_nxv1i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.s.x v9, zero -; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; RV32-NEXT: vwredsumu.vs v8, v8, v9 -; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vwreduce_uadd_nxv1i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, zero -; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; RV64-NEXT: vwredsumu.vs v8, v8, v9 -; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %e = zext %v to @@ -1445,22 +1445,22 @@ define i64 @vreduce_umax_nxv1i64( %v) { ; RV32-LABEL: vreduce_umax_nxv1i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.s.x v9, zero -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV32-NEXT: vredmaxu.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_umax_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, zero -; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV64-NEXT: vredmaxu.vs v8, v8, v9 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -1479,13 +1479,13 @@ ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw zero, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV32-NEXT: vredmax.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -1495,9 +1495,9 @@ ; RV64: # %bb.0: ; RV64-NEXT: li a0, -1 ; RV64-NEXT: slli a0, a0, 63 -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV64-NEXT: vredmax.vs v8, v8, v9 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -1510,22 +1510,22 @@ define i64 @vreduce_umin_nxv1i64( %v) { ; RV32-LABEL: vreduce_umin_nxv1i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.v.i v9, -1 -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV32-NEXT: vredminu.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_umin_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.v.i v9, -1 -; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV64-NEXT: vredminu.vs v8, v8, v9 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -1546,13 +1546,13 @@ ; RV32-NEXT: addi a0, a0, -1 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV32-NEXT: vredmin.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -1562,9 +1562,9 @@ ; RV64: # %bb.0: ; RV64-NEXT: li a0, -1 ; RV64-NEXT: srli a0, a0, 1 -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, a0 -; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV64-NEXT: vredmin.vs v8, v8, v9 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -1577,22 +1577,22 @@ define i64 @vreduce_and_nxv1i64( %v) { ; RV32-LABEL: vreduce_and_nxv1i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.v.i v9, -1 -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV32-NEXT: vredand.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_and_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.v.i v9, -1 -; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV64-NEXT: vredand.vs v8, v8, v9 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -1605,22 +1605,22 @@ define i64 @vreduce_or_nxv1i64( %v) { ; RV32-LABEL: vreduce_or_nxv1i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.s.x v9, zero -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV32-NEXT: vredor.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_or_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, zero -; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV64-NEXT: vredor.vs v8, v8, v9 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -1633,22 +1633,22 @@ define i64 @vreduce_xor_nxv1i64( %v) { ; RV32-LABEL: vreduce_xor_nxv1i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.s.x v9, zero -; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV32-NEXT: vredxor.vs v8, v8, v9 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_xor_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, zero -; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV64-NEXT: vredxor.vs v8, v8, v9 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -1661,22 +1661,22 @@ define i64 @vreduce_add_nxv2i64( %v) { ; RV32-LABEL: vreduce_add_nxv2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.s.x v10, zero -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; RV32-NEXT: vredsum.vs v8, v8, v10 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_add_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, zero -; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; RV64-NEXT: vredsum.vs v8, v8, v10 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -1687,25 +1687,25 @@ define i64 @vwreduce_add_nxv2i32( %v) { ; RV32-LABEL: vwreduce_add_nxv2i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.s.x v9, zero -; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV32-NEXT: vwredsum.vs v8, v8, v9 -; RV32-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vwreduce_add_nxv2i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, zero -; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV64-NEXT: vwredsum.vs v8, v8, v9 -; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %e = sext %v to @@ -1716,25 +1716,25 @@ define i64 @vwreduce_uadd_nxv2i32( %v) { ; RV32-LABEL: vwreduce_uadd_nxv2i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.s.x v9, zero -; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV32-NEXT: vwredsumu.vs v8, v8, v9 -; RV32-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vwreduce_uadd_nxv2i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v9, zero -; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; RV64-NEXT: vwredsumu.vs v8, v8, v9 -; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %e = zext %v to @@ -1747,22 +1747,22 @@ define i64 @vreduce_umax_nxv2i64( %v) { ; RV32-LABEL: vreduce_umax_nxv2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.s.x v10, zero -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; RV32-NEXT: vredmaxu.vs v8, v8, v10 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_umax_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, zero -; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; RV64-NEXT: vredmaxu.vs v8, v8, v10 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -1781,13 +1781,13 @@ ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw zero, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; RV32-NEXT: vredmax.vs v8, v8, v10 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -1797,9 +1797,9 @@ ; RV64: # %bb.0: ; RV64-NEXT: li a0, -1 ; RV64-NEXT: slli a0, a0, 63 -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, a0 -; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; RV64-NEXT: vredmax.vs v8, v8, v10 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -1812,22 +1812,22 @@ define i64 @vreduce_umin_nxv2i64( %v) { ; RV32-LABEL: vreduce_umin_nxv2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.v.i v10, -1 -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; RV32-NEXT: vredminu.vs v8, v8, v10 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_umin_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.v.i v10, -1 -; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; RV64-NEXT: vredminu.vs v8, v8, v10 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -1848,13 +1848,13 @@ ; RV32-NEXT: addi a0, a0, -1 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; RV32-NEXT: vredmin.vs v8, v8, v10 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -1864,9 +1864,9 @@ ; RV64: # %bb.0: ; RV64-NEXT: li a0, -1 ; RV64-NEXT: srli a0, a0, 1 -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, a0 -; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; RV64-NEXT: vredmin.vs v8, v8, v10 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -1879,22 +1879,22 @@ define i64 @vreduce_and_nxv2i64( %v) { ; RV32-LABEL: vreduce_and_nxv2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.v.i v10, -1 -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; RV32-NEXT: vredand.vs v8, v8, v10 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_and_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.v.i v10, -1 -; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; RV64-NEXT: vredand.vs v8, v8, v10 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -1907,22 +1907,22 @@ define i64 @vreduce_or_nxv2i64( %v) { ; RV32-LABEL: vreduce_or_nxv2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.s.x v10, zero -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; RV32-NEXT: vredor.vs v8, v8, v10 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_or_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, zero -; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; RV64-NEXT: vredor.vs v8, v8, v10 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -1935,22 +1935,22 @@ define i64 @vreduce_xor_nxv2i64( %v) { ; RV32-LABEL: vreduce_xor_nxv2i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.s.x v10, zero -; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; RV32-NEXT: vredxor.vs v8, v8, v10 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_xor_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, zero -; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; RV64-NEXT: vredxor.vs v8, v8, v10 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -1963,22 +1963,22 @@ define i64 @vreduce_add_nxv4i64( %v) { ; RV32-LABEL: vreduce_add_nxv4i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.s.x v12, zero -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV32-NEXT: vredsum.vs v8, v8, v12 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_add_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v12, zero -; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV64-NEXT: vredsum.vs v8, v8, v12 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -1989,25 +1989,25 @@ define i64 @vwreduce_add_nxv4i32( %v) { ; RV32-LABEL: vwreduce_add_nxv4i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.s.x v10, zero -; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; RV32-NEXT: vwredsum.vs v8, v8, v10 -; RV32-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vwreduce_add_nxv4i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, zero -; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; RV64-NEXT: vwredsum.vs v8, v8, v10 -; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %e = sext %v to @@ -2018,25 +2018,25 @@ define i64 @vwreduce_uadd_nxv4i32( %v) { ; RV32-LABEL: vwreduce_uadd_nxv4i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.s.x v10, zero -; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; RV32-NEXT: vwredsumu.vs v8, v8, v10 -; RV32-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vwreduce_uadd_nxv4i32: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v10, zero -; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; RV64-NEXT: vwredsumu.vs v8, v8, v10 -; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret %e = zext %v to @@ -2049,22 +2049,22 @@ define i64 @vreduce_umax_nxv4i64( %v) { ; RV32-LABEL: vreduce_umax_nxv4i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.s.x v12, zero -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV32-NEXT: vredmaxu.vs v8, v8, v12 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_umax_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v12, zero -; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV64-NEXT: vredmaxu.vs v8, v8, v12 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -2083,13 +2083,13 @@ ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: sw zero, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV32-NEXT: vredmax.vs v8, v8, v12 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -2099,9 +2099,9 @@ ; RV64: # %bb.0: ; RV64-NEXT: li a0, -1 ; RV64-NEXT: slli a0, a0, 63 -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v12, a0 -; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV64-NEXT: vredmax.vs v8, v8, v12 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -2114,22 +2114,22 @@ define i64 @vreduce_umin_nxv4i64( %v) { ; RV32-LABEL: vreduce_umin_nxv4i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.v.i v12, -1 -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV32-NEXT: vredminu.vs v8, v8, v12 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_umin_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.v.i v12, -1 -; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV64-NEXT: vredminu.vs v8, v8, v12 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -2150,13 +2150,13 @@ ; RV32-NEXT: addi a0, a0, -1 ; RV32-NEXT: sw a0, 12(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV32-NEXT: vredmin.vs v8, v8, v12 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -2166,9 +2166,9 @@ ; RV64: # %bb.0: ; RV64-NEXT: li a0, -1 ; RV64-NEXT: srli a0, a0, 1 -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v12, a0 -; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV64-NEXT: vredmin.vs v8, v8, v12 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -2181,22 +2181,22 @@ define i64 @vreduce_and_nxv4i64( %v) { ; RV32-LABEL: vreduce_and_nxv4i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.v.i v12, -1 -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV32-NEXT: vredand.vs v8, v8, v12 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_and_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.v.i v12, -1 -; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV64-NEXT: vredand.vs v8, v8, v12 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -2209,22 +2209,22 @@ define i64 @vreduce_or_nxv4i64( %v) { ; RV32-LABEL: vreduce_or_nxv4i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.s.x v12, zero -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV32-NEXT: vredor.vs v8, v8, v12 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_or_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v12, zero -; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV64-NEXT: vredor.vs v8, v8, v12 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret @@ -2237,22 +2237,22 @@ define i64 @vreduce_xor_nxv4i64( %v) { ; RV32-LABEL: vreduce_xor_nxv4i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vmv.s.x v12, zero -; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV32-NEXT: vredxor.vs v8, v8, v12 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a1 ; RV32-NEXT: vmv.x.s a1, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: vreduce_xor_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vmv.s.x v12, zero -; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV64-NEXT: vredxor.vs v8, v8, v12 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll @@ -7,7 +7,7 @@ define signext i1 @vpreduce_and_nxv1i1(i1 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -25,7 +25,7 @@ ; CHECK-LABEL: vpreduce_or_nxv1i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 @@ -43,7 +43,7 @@ ; CHECK-LABEL: vpreduce_xor_nxv1i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: xor a0, a1, a0 @@ -59,7 +59,7 @@ define signext i1 @vpreduce_and_nxv2i1(i1 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -77,7 +77,7 @@ ; CHECK-LABEL: vpreduce_or_nxv2i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 @@ -95,7 +95,7 @@ ; CHECK-LABEL: vpreduce_xor_nxv2i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: xor a0, a1, a0 @@ -111,7 +111,7 @@ define signext i1 @vpreduce_and_nxv4i1(i1 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -129,7 +129,7 @@ ; CHECK-LABEL: vpreduce_or_nxv4i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 @@ -147,7 +147,7 @@ ; CHECK-LABEL: vpreduce_xor_nxv4i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: xor a0, a1, a0 @@ -163,7 +163,7 @@ define signext i1 @vpreduce_and_nxv8i1(i1 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -181,7 +181,7 @@ ; CHECK-LABEL: vpreduce_or_nxv8i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 @@ -199,7 +199,7 @@ ; CHECK-LABEL: vpreduce_xor_nxv8i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: xor a0, a1, a0 @@ -215,7 +215,7 @@ define signext i1 @vpreduce_and_nxv16i1(i1 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -233,7 +233,7 @@ ; CHECK-LABEL: vpreduce_or_nxv16i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 @@ -251,7 +251,7 @@ ; CHECK-LABEL: vpreduce_xor_nxv16i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: xor a0, a1, a0 @@ -267,7 +267,7 @@ define signext i1 @vpreduce_and_nxv32i1(i1 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -285,7 +285,7 @@ ; CHECK-LABEL: vpreduce_or_nxv32i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 @@ -303,7 +303,7 @@ ; CHECK-LABEL: vpreduce_xor_nxv32i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: xor a0, a1, a0 @@ -320,7 +320,7 @@ ; CHECK-LABEL: vpreduce_or_nxv40i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 @@ -337,7 +337,7 @@ define signext i1 @vpreduce_and_nxv64i1(i1 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_and_nxv64i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -355,7 +355,7 @@ ; CHECK-LABEL: vpreduce_or_nxv64i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 @@ -373,7 +373,7 @@ ; CHECK-LABEL: vpreduce_xor_nxv64i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: xor a0, a1, a0 @@ -398,7 +398,7 @@ ; CHECK-NEXT: mv a3, a2 ; CHECK-NEXT: .LBB22_2: ; CHECK-NEXT: li a4, 0 -; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vcpop.m a3, v11, v0.t ; CHECK-NEXT: snez a3, a3 @@ -408,7 +408,7 @@ ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a4, a2 ; CHECK-NEXT: .LBB22_4: -; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vcpop.m a1, v8, v0.t ; CHECK-NEXT: snez a1, a1 @@ -426,7 +426,7 @@ ; CHECK-LABEL: vpreduce_add_nxv1i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: xor a0, a1, a0 @@ -443,7 +443,7 @@ ; CHECK-LABEL: vpreduce_add_nxv2i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: xor a0, a1, a0 @@ -460,7 +460,7 @@ ; CHECK-LABEL: vpreduce_add_nxv4i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: xor a0, a1, a0 @@ -477,7 +477,7 @@ ; CHECK-LABEL: vpreduce_add_nxv8i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: xor a0, a1, a0 @@ -494,7 +494,7 @@ ; CHECK-LABEL: vpreduce_add_nxv16i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: xor a0, a1, a0 @@ -511,7 +511,7 @@ ; CHECK-LABEL: vpreduce_add_nxv32i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: xor a0, a1, a0 @@ -528,7 +528,7 @@ ; CHECK-LABEL: vpreduce_add_nxv64i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: xor a0, a1, a0 @@ -545,7 +545,7 @@ define signext i1 @vpreduce_smax_nxv1i1(i1 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -562,7 +562,7 @@ define signext i1 @vpreduce_smax_nxv2i1(i1 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -579,7 +579,7 @@ define signext i1 @vpreduce_smax_nxv4i1(i1 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -596,7 +596,7 @@ define signext i1 @vpreduce_smax_nxv8i1(i1 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -613,7 +613,7 @@ define signext i1 @vpreduce_smax_nxv16i1(i1 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -630,7 +630,7 @@ define signext i1 @vpreduce_smax_nxv32i1(i1 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -647,7 +647,7 @@ define signext i1 @vpreduce_smax_nxv64i1(i1 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_smax_nxv64i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -665,7 +665,7 @@ ; CHECK-LABEL: vpreduce_smin_nxv1i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 @@ -683,7 +683,7 @@ ; CHECK-LABEL: vpreduce_smin_nxv2i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 @@ -701,7 +701,7 @@ ; CHECK-LABEL: vpreduce_smin_nxv4i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 @@ -719,7 +719,7 @@ ; CHECK-LABEL: vpreduce_smin_nxv8i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 @@ -737,7 +737,7 @@ ; CHECK-LABEL: vpreduce_smin_nxv16i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 @@ -755,7 +755,7 @@ ; CHECK-LABEL: vpreduce_smin_nxv32i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 @@ -773,7 +773,7 @@ ; CHECK-LABEL: vpreduce_smin_nxv64i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 @@ -791,7 +791,7 @@ ; CHECK-LABEL: vpreduce_umax_nxv1i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 @@ -809,7 +809,7 @@ ; CHECK-LABEL: vpreduce_umax_nxv2i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 @@ -827,7 +827,7 @@ ; CHECK-LABEL: vpreduce_umax_nxv4i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 @@ -845,7 +845,7 @@ ; CHECK-LABEL: vpreduce_umax_nxv8i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 @@ -863,7 +863,7 @@ ; CHECK-LABEL: vpreduce_umax_nxv16i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 @@ -881,7 +881,7 @@ ; CHECK-LABEL: vpreduce_umax_nxv32i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 @@ -899,7 +899,7 @@ ; CHECK-LABEL: vpreduce_umax_nxv64i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: snez a1, a1 @@ -916,7 +916,7 @@ define signext i1 @vpreduce_umin_nxv1i1(i1 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -933,7 +933,7 @@ define signext i1 @vpreduce_umin_nxv2i1(i1 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -950,7 +950,7 @@ define signext i1 @vpreduce_umin_nxv4i1(i1 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -967,7 +967,7 @@ define signext i1 @vpreduce_umin_nxv8i1(i1 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -984,7 +984,7 @@ define signext i1 @vpreduce_umin_nxv16i1(i1 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -1001,7 +1001,7 @@ define signext i1 @vpreduce_umin_nxv32i1(i1 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_nxv32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -1018,7 +1018,7 @@ define signext i1 @vpreduce_umin_nxv64i1(i1 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_umin_nxv64i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -1035,7 +1035,7 @@ define signext i1 @vpreduce_mul_nxv1i1(i1 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_mul_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -1052,7 +1052,7 @@ define signext i1 @vpreduce_mul_nxv2i1(i1 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_mul_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -1069,7 +1069,7 @@ define signext i1 @vpreduce_mul_nxv4i1(i1 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_mul_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -1086,7 +1086,7 @@ define signext i1 @vpreduce_mul_nxv8i1(i1 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_mul_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -1103,7 +1103,7 @@ define signext i1 @vpreduce_mul_nxv16i1(i1 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_mul_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -1120,7 +1120,7 @@ define signext i1 @vpreduce_mul_nxv32i1(i1 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_mul_nxv32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t @@ -1137,7 +1137,7 @@ define signext i1 @vpreduce_mul_nxv64i1(i1 signext %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_mul_nxv64i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-mask.ll --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-mask.ll @@ -7,7 +7,7 @@ define signext i1 @vreduce_or_nxv1i1( %v) { ; CHECK-LABEL: vreduce_or_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: addi a0, a0, -1 @@ -21,7 +21,7 @@ define signext i1 @vreduce_xor_nxv1i1( %v) { ; CHECK-LABEL: vreduce_xor_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 @@ -35,7 +35,7 @@ define signext i1 @vreduce_and_nxv1i1( %v) { ; CHECK-LABEL: vreduce_and_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmnot.m v8, v0 ; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: snez a0, a0 @@ -50,7 +50,7 @@ define signext i1 @vreduce_umax_nxv1i1( %v) { ; CHECK-LABEL: vreduce_umax_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: addi a0, a0, -1 @@ -64,7 +64,7 @@ define signext i1 @vreduce_smax_nxv1i1( %v) { ; CHECK-LABEL: vreduce_smax_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmnot.m v8, v0 ; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: snez a0, a0 @@ -79,7 +79,7 @@ define signext i1 @vreduce_umin_nxv1i1( %v) { ; CHECK-LABEL: vreduce_umin_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmnot.m v8, v0 ; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: snez a0, a0 @@ -94,7 +94,7 @@ define signext i1 @vreduce_smin_nxv1i1( %v) { ; CHECK-LABEL: vreduce_smin_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: addi a0, a0, -1 @@ -108,7 +108,7 @@ define signext i1 @vreduce_or_nxv2i1( %v) { ; CHECK-LABEL: vreduce_or_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: addi a0, a0, -1 @@ -122,7 +122,7 @@ define signext i1 @vreduce_xor_nxv2i1( %v) { ; CHECK-LABEL: vreduce_xor_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 @@ -136,7 +136,7 @@ define signext i1 @vreduce_and_nxv2i1( %v) { ; CHECK-LABEL: vreduce_and_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmnot.m v8, v0 ; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: snez a0, a0 @@ -151,7 +151,7 @@ define signext i1 @vreduce_umax_nxv2i1( %v) { ; CHECK-LABEL: vreduce_umax_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: addi a0, a0, -1 @@ -165,7 +165,7 @@ define signext i1 @vreduce_smax_nxv2i1( %v) { ; CHECK-LABEL: vreduce_smax_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmnot.m v8, v0 ; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: snez a0, a0 @@ -180,7 +180,7 @@ define signext i1 @vreduce_umin_nxv2i1( %v) { ; CHECK-LABEL: vreduce_umin_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmnot.m v8, v0 ; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: snez a0, a0 @@ -195,7 +195,7 @@ define signext i1 @vreduce_smin_nxv2i1( %v) { ; CHECK-LABEL: vreduce_smin_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: addi a0, a0, -1 @@ -209,7 +209,7 @@ define signext i1 @vreduce_or_nxv4i1( %v) { ; CHECK-LABEL: vreduce_or_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: addi a0, a0, -1 @@ -223,7 +223,7 @@ define signext i1 @vreduce_xor_nxv4i1( %v) { ; CHECK-LABEL: vreduce_xor_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 @@ -237,7 +237,7 @@ define signext i1 @vreduce_and_nxv4i1( %v) { ; CHECK-LABEL: vreduce_and_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmnot.m v8, v0 ; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: snez a0, a0 @@ -252,7 +252,7 @@ define signext i1 @vreduce_umax_nxv4i1( %v) { ; CHECK-LABEL: vreduce_umax_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: addi a0, a0, -1 @@ -266,7 +266,7 @@ define signext i1 @vreduce_smax_nxv4i1( %v) { ; CHECK-LABEL: vreduce_smax_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmnot.m v8, v0 ; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: snez a0, a0 @@ -281,7 +281,7 @@ define signext i1 @vreduce_umin_nxv4i1( %v) { ; CHECK-LABEL: vreduce_umin_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmnot.m v8, v0 ; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: snez a0, a0 @@ -296,7 +296,7 @@ define signext i1 @vreduce_smin_nxv4i1( %v) { ; CHECK-LABEL: vreduce_smin_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: addi a0, a0, -1 @@ -310,7 +310,7 @@ define signext i1 @vreduce_or_nxv8i1( %v) { ; CHECK-LABEL: vreduce_or_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: addi a0, a0, -1 @@ -324,7 +324,7 @@ define signext i1 @vreduce_xor_nxv8i1( %v) { ; CHECK-LABEL: vreduce_xor_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 @@ -338,7 +338,7 @@ define signext i1 @vreduce_and_nxv8i1( %v) { ; CHECK-LABEL: vreduce_and_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmnot.m v8, v0 ; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: snez a0, a0 @@ -353,7 +353,7 @@ define signext i1 @vreduce_umax_nxv8i1( %v) { ; CHECK-LABEL: vreduce_umax_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: addi a0, a0, -1 @@ -367,7 +367,7 @@ define signext i1 @vreduce_smax_nxv8i1( %v) { ; CHECK-LABEL: vreduce_smax_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmnot.m v8, v0 ; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: snez a0, a0 @@ -382,7 +382,7 @@ define signext i1 @vreduce_umin_nxv8i1( %v) { ; CHECK-LABEL: vreduce_umin_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmnot.m v8, v0 ; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: snez a0, a0 @@ -397,7 +397,7 @@ define signext i1 @vreduce_smin_nxv8i1( %v) { ; CHECK-LABEL: vreduce_smin_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: addi a0, a0, -1 @@ -411,7 +411,7 @@ define signext i1 @vreduce_or_nxv16i1( %v) { ; CHECK-LABEL: vreduce_or_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: addi a0, a0, -1 @@ -425,7 +425,7 @@ define signext i1 @vreduce_xor_nxv16i1( %v) { ; CHECK-LABEL: vreduce_xor_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 @@ -439,7 +439,7 @@ define signext i1 @vreduce_and_nxv16i1( %v) { ; CHECK-LABEL: vreduce_and_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmnot.m v8, v0 ; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: snez a0, a0 @@ -454,7 +454,7 @@ define signext i1 @vreduce_umax_nxv16i1( %v) { ; CHECK-LABEL: vreduce_umax_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: addi a0, a0, -1 @@ -468,7 +468,7 @@ define signext i1 @vreduce_smax_nxv16i1( %v) { ; CHECK-LABEL: vreduce_smax_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmnot.m v8, v0 ; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: snez a0, a0 @@ -483,7 +483,7 @@ define signext i1 @vreduce_umin_nxv16i1( %v) { ; CHECK-LABEL: vreduce_umin_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmnot.m v8, v0 ; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: snez a0, a0 @@ -498,7 +498,7 @@ define signext i1 @vreduce_smin_nxv16i1( %v) { ; CHECK-LABEL: vreduce_smin_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: addi a0, a0, -1 @@ -512,7 +512,7 @@ define signext i1 @vreduce_or_nxv32i1( %v) { ; CHECK-LABEL: vreduce_or_nxv32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: addi a0, a0, -1 @@ -526,7 +526,7 @@ define signext i1 @vreduce_xor_nxv32i1( %v) { ; CHECK-LABEL: vreduce_xor_nxv32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 @@ -540,7 +540,7 @@ define signext i1 @vreduce_and_nxv32i1( %v) { ; CHECK-LABEL: vreduce_and_nxv32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vmnot.m v8, v0 ; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: snez a0, a0 @@ -555,7 +555,7 @@ define signext i1 @vreduce_umax_nxv32i1( %v) { ; CHECK-LABEL: vreduce_umax_nxv32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: addi a0, a0, -1 @@ -569,7 +569,7 @@ define signext i1 @vreduce_smax_nxv32i1( %v) { ; CHECK-LABEL: vreduce_smax_nxv32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vmnot.m v8, v0 ; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: snez a0, a0 @@ -584,7 +584,7 @@ define signext i1 @vreduce_umin_nxv32i1( %v) { ; CHECK-LABEL: vreduce_umin_nxv32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vmnot.m v8, v0 ; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: snez a0, a0 @@ -599,7 +599,7 @@ define signext i1 @vreduce_smin_nxv32i1( %v) { ; CHECK-LABEL: vreduce_smin_nxv32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: addi a0, a0, -1 @@ -613,7 +613,7 @@ define signext i1 @vreduce_or_nxv64i1( %v) { ; CHECK-LABEL: vreduce_or_nxv64i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: addi a0, a0, -1 @@ -627,7 +627,7 @@ define signext i1 @vreduce_xor_nxv64i1( %v) { ; CHECK-LABEL: vreduce_xor_nxv64i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 @@ -641,7 +641,7 @@ define signext i1 @vreduce_and_nxv64i1( %v) { ; CHECK-LABEL: vreduce_and_nxv64i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vmnot.m v8, v0 ; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: snez a0, a0 @@ -656,7 +656,7 @@ define signext i1 @vreduce_umax_nxv64i1( %v) { ; CHECK-LABEL: vreduce_umax_nxv64i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: addi a0, a0, -1 @@ -670,7 +670,7 @@ define signext i1 @vreduce_smax_nxv64i1( %v) { ; CHECK-LABEL: vreduce_smax_nxv64i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vmnot.m v8, v0 ; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: snez a0, a0 @@ -685,7 +685,7 @@ define signext i1 @vreduce_umin_nxv64i1( %v) { ; CHECK-LABEL: vreduce_umin_nxv64i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vmnot.m v8, v0 ; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: snez a0, a0 @@ -700,7 +700,7 @@ define signext i1 @vreduce_smin_nxv64i1( %v) { ; CHECK-LABEL: vreduce_smin_nxv64i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: addi a0, a0, -1 @@ -714,7 +714,7 @@ define signext i1 @vreduce_add_nxv1i1( %v) { ; CHECK-LABEL: vreduce_add_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 @@ -728,7 +728,7 @@ define signext i1 @vreduce_add_nxv2i1( %v) { ; CHECK-LABEL: vreduce_add_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 @@ -742,7 +742,7 @@ define signext i1 @vreduce_add_nxv4i1( %v) { ; CHECK-LABEL: vreduce_add_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 @@ -756,7 +756,7 @@ define signext i1 @vreduce_add_nxv8i1( %v) { ; CHECK-LABEL: vreduce_add_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 @@ -770,7 +770,7 @@ define signext i1 @vreduce_add_nxv16i1( %v) { ; CHECK-LABEL: vreduce_add_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 @@ -784,7 +784,7 @@ define signext i1 @vreduce_add_nxv32i1( %v) { ; CHECK-LABEL: vreduce_add_nxv32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 @@ -798,7 +798,7 @@ define signext i1 @vreduce_add_nxv64i1( %v) { ; CHECK-LABEL: vreduce_add_nxv64i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vcpop.m a0, v0 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vredxor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredxor-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vredxor-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredxor-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vredxor_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -33,7 +33,7 @@ define @intrinsic_vredxor_mask_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -56,7 +56,7 @@ define @intrinsic_vredxor_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -79,7 +79,7 @@ define @intrinsic_vredxor_mask_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -102,7 +102,7 @@ define @intrinsic_vredxor_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -125,7 +125,7 @@ define @intrinsic_vredxor_mask_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -148,7 +148,7 @@ define @intrinsic_vredxor_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -171,7 +171,7 @@ define @intrinsic_vredxor_mask_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -194,7 +194,7 @@ define @intrinsic_vredxor_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vredxor.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define @intrinsic_vredxor_mask_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vredxor.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -240,7 +240,7 @@ define @intrinsic_vredxor_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vredxor.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -263,7 +263,7 @@ define @intrinsic_vredxor_mask_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vredxor.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -286,7 +286,7 @@ define @intrinsic_vredxor_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -309,7 +309,7 @@ define @intrinsic_vredxor_mask_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -332,7 +332,7 @@ define @intrinsic_vredxor_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ define @intrinsic_vredxor_mask_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -378,7 +378,7 @@ define @intrinsic_vredxor_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -401,7 +401,7 @@ define @intrinsic_vredxor_mask_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -424,7 +424,7 @@ define @intrinsic_vredxor_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vredxor.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -447,7 +447,7 @@ define @intrinsic_vredxor_mask_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vredxor.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -470,7 +470,7 @@ define @intrinsic_vredxor_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vredxor.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -493,7 +493,7 @@ define @intrinsic_vredxor_mask_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vredxor.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +516,7 @@ define @intrinsic_vredxor_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vredxor.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -539,7 +539,7 @@ define @intrinsic_vredxor_mask_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vredxor.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -562,7 +562,7 @@ define @intrinsic_vredxor_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -585,7 +585,7 @@ define @intrinsic_vredxor_mask_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -608,7 +608,7 @@ define @intrinsic_vredxor_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -631,7 +631,7 @@ define @intrinsic_vredxor_mask_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -654,7 +654,7 @@ define @intrinsic_vredxor_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vredxor.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -677,7 +677,7 @@ define @intrinsic_vredxor_mask_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vredxor.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -700,7 +700,7 @@ define @intrinsic_vredxor_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vredxor.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -723,7 +723,7 @@ define @intrinsic_vredxor_mask_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vredxor.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +746,7 @@ define @intrinsic_vredxor_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vredxor.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -769,7 +769,7 @@ define @intrinsic_vredxor_mask_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vredxor.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -792,7 +792,7 @@ define @intrinsic_vredxor_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -815,7 +815,7 @@ define @intrinsic_vredxor_mask_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -838,7 +838,7 @@ define @intrinsic_vredxor_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vredxor.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -861,7 +861,7 @@ define @intrinsic_vredxor_mask_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vredxor.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -884,7 +884,7 @@ define @intrinsic_vredxor_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vredxor.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -907,7 +907,7 @@ define @intrinsic_vredxor_mask_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vredxor.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -930,7 +930,7 @@ define @intrinsic_vredxor_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vredxor.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vredxor_mask_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vredxor.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vredxor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredxor-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vredxor-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vredxor-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vredxor_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -33,7 +33,7 @@ define @intrinsic_vredxor_mask_vs_nxv8i8_nxv1i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv1i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -56,7 +56,7 @@ define @intrinsic_vredxor_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -79,7 +79,7 @@ define @intrinsic_vredxor_mask_vs_nxv8i8_nxv2i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv2i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -102,7 +102,7 @@ define @intrinsic_vredxor_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -125,7 +125,7 @@ define @intrinsic_vredxor_mask_vs_nxv8i8_nxv4i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv4i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -148,7 +148,7 @@ define @intrinsic_vredxor_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -171,7 +171,7 @@ define @intrinsic_vredxor_mask_vs_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -194,7 +194,7 @@ define @intrinsic_vredxor_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vredxor.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define @intrinsic_vredxor_mask_vs_nxv8i8_nxv16i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv16i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vredxor.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -240,7 +240,7 @@ define @intrinsic_vredxor_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vredxor.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -263,7 +263,7 @@ define @intrinsic_vredxor_mask_vs_nxv8i8_nxv32i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv32i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vredxor.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -286,7 +286,7 @@ define @intrinsic_vredxor_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -309,7 +309,7 @@ define @intrinsic_vredxor_mask_vs_nxv4i16_nxv1i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv1i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -332,7 +332,7 @@ define @intrinsic_vredxor_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ define @intrinsic_vredxor_mask_vs_nxv4i16_nxv2i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv2i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -378,7 +378,7 @@ define @intrinsic_vredxor_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -401,7 +401,7 @@ define @intrinsic_vredxor_mask_vs_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -424,7 +424,7 @@ define @intrinsic_vredxor_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vredxor.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -447,7 +447,7 @@ define @intrinsic_vredxor_mask_vs_nxv4i16_nxv8i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv8i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vredxor.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -470,7 +470,7 @@ define @intrinsic_vredxor_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vredxor.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -493,7 +493,7 @@ define @intrinsic_vredxor_mask_vs_nxv4i16_nxv16i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv16i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vredxor.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +516,7 @@ define @intrinsic_vredxor_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vredxor.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -539,7 +539,7 @@ define @intrinsic_vredxor_mask_vs_nxv4i16_nxv32i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv32i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vredxor.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -562,7 +562,7 @@ define @intrinsic_vredxor_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -585,7 +585,7 @@ define @intrinsic_vredxor_mask_vs_nxv2i32_nxv1i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv1i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -608,7 +608,7 @@ define @intrinsic_vredxor_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -631,7 +631,7 @@ define @intrinsic_vredxor_mask_vs_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -654,7 +654,7 @@ define @intrinsic_vredxor_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vredxor.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -677,7 +677,7 @@ define @intrinsic_vredxor_mask_vs_nxv2i32_nxv4i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv4i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vredxor.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -700,7 +700,7 @@ define @intrinsic_vredxor_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vredxor.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -723,7 +723,7 @@ define @intrinsic_vredxor_mask_vs_nxv2i32_nxv8i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv8i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vredxor.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +746,7 @@ define @intrinsic_vredxor_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vredxor.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -769,7 +769,7 @@ define @intrinsic_vredxor_mask_vs_nxv2i32_nxv16i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv16i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vredxor.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -792,7 +792,7 @@ define @intrinsic_vredxor_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -815,7 +815,7 @@ define @intrinsic_vredxor_mask_vs_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -838,7 +838,7 @@ define @intrinsic_vredxor_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vredxor.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -861,7 +861,7 @@ define @intrinsic_vredxor_mask_vs_nxv1i64_nxv2i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv2i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma ; CHECK-NEXT: vredxor.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -884,7 +884,7 @@ define @intrinsic_vredxor_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vredxor.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -907,7 +907,7 @@ define @intrinsic_vredxor_mask_vs_nxv1i64_nxv4i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv4i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma ; CHECK-NEXT: vredxor.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -930,7 +930,7 @@ define @intrinsic_vredxor_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vredxor.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vredxor_mask_vs_nxv1i64_nxv8i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv8i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma ; CHECK-NEXT: vredxor.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vrem_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vrem_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vrem_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vrem_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vrem_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -292,7 +292,7 @@ define @intrinsic_vrem_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vrem_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vrem_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vrem_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vrem_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vrem_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vrem_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vrem_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vrem_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -717,7 +717,7 @@ define @intrinsic_vrem_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -764,7 +764,7 @@ define @intrinsic_vrem_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -811,7 +811,7 @@ define @intrinsic_vrem_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vrem_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vrem_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vrem_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vrem_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1048,7 +1048,7 @@ define @intrinsic_vrem_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1095,7 +1095,7 @@ define @intrinsic_vrem_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1142,7 +1142,7 @@ define @intrinsic_vrem_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1189,7 +1189,7 @@ define @intrinsic_vrem_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1236,7 +1236,7 @@ define @intrinsic_vrem_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1283,7 +1283,7 @@ define @intrinsic_vrem_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1330,7 +1330,7 @@ define @intrinsic_vrem_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1377,7 +1377,7 @@ define @intrinsic_vrem_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1424,7 +1424,7 @@ define @intrinsic_vrem_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1471,7 +1471,7 @@ define @intrinsic_vrem_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1518,7 +1518,7 @@ define @intrinsic_vrem_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1565,7 +1565,7 @@ define @intrinsic_vrem_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1612,7 +1612,7 @@ define @intrinsic_vrem_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1659,7 +1659,7 @@ define @intrinsic_vrem_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1706,7 +1706,7 @@ define @intrinsic_vrem_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1753,7 +1753,7 @@ define @intrinsic_vrem_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1800,7 +1800,7 @@ define @intrinsic_vrem_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1847,7 +1847,7 @@ define @intrinsic_vrem_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1898,7 +1898,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 @@ -1957,7 +1957,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vrem.vv v8, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 @@ -2016,7 +2016,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vrem.vv v8, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 @@ -2075,7 +2075,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vrem.vv v8, v8, v16 ; CHECK-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vrem_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vrem_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vrem_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vrem_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vrem_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -292,7 +292,7 @@ define @intrinsic_vrem_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vrem_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vrem_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vrem_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vrem_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vrem_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vrem_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vrem_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vrem_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -717,7 +717,7 @@ define @intrinsic_vrem_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -764,7 +764,7 @@ define @intrinsic_vrem_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -811,7 +811,7 @@ define @intrinsic_vrem_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vrem_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vrem_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vrem_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vrem_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1048,7 +1048,7 @@ define @intrinsic_vrem_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1095,7 +1095,7 @@ define @intrinsic_vrem_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1142,7 +1142,7 @@ define @intrinsic_vrem_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1189,7 +1189,7 @@ define @intrinsic_vrem_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1236,7 +1236,7 @@ define @intrinsic_vrem_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1283,7 +1283,7 @@ define @intrinsic_vrem_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1330,7 +1330,7 @@ define @intrinsic_vrem_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1377,7 +1377,7 @@ define @intrinsic_vrem_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1424,7 +1424,7 @@ define @intrinsic_vrem_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1471,7 +1471,7 @@ define @intrinsic_vrem_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1518,7 +1518,7 @@ define @intrinsic_vrem_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1565,7 +1565,7 @@ define @intrinsic_vrem_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1612,7 +1612,7 @@ define @intrinsic_vrem_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1659,7 +1659,7 @@ define @intrinsic_vrem_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1706,7 +1706,7 @@ define @intrinsic_vrem_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1753,7 +1753,7 @@ define @intrinsic_vrem_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1800,7 +1800,7 @@ define @intrinsic_vrem_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1847,7 +1847,7 @@ define @intrinsic_vrem_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1894,7 +1894,7 @@ define @intrinsic_vrem_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1941,7 +1941,7 @@ define @intrinsic_vrem_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1988,7 +1988,7 @@ define @intrinsic_vrem_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -2035,7 +2035,7 @@ define @intrinsic_vrem_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrem_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll @@ -7,7 +7,7 @@ define @vrem_vv_nxv1i8( %va, %vb) { ; CHECK-LABEL: vrem_vv_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = srem %va, %vb @@ -17,7 +17,7 @@ define @vrem_vx_nxv1i8( %va, i8 signext %b) { ; CHECK-LABEL: vrem_vx_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -30,7 +30,7 @@ ; CHECK-LABEL: vrem_vi_nxv1i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 109 -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmulh.vx v9, v8, a0 ; CHECK-NEXT: vsub.vv v9, v9, v8 ; CHECK-NEXT: vsra.vi v9, v9, 2 @@ -48,7 +48,7 @@ define @vrem_vv_nxv2i8( %va, %vb) { ; CHECK-LABEL: vrem_vv_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = srem %va, %vb @@ -58,7 +58,7 @@ define @vrem_vx_nxv2i8( %va, i8 signext %b) { ; CHECK-LABEL: vrem_vx_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -71,7 +71,7 @@ ; CHECK-LABEL: vrem_vi_nxv2i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 109 -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmulh.vx v9, v8, a0 ; CHECK-NEXT: vsub.vv v9, v9, v8 ; CHECK-NEXT: vsra.vi v9, v9, 2 @@ -89,7 +89,7 @@ define @vrem_vv_nxv4i8( %va, %vb) { ; CHECK-LABEL: vrem_vv_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = srem %va, %vb @@ -99,7 +99,7 @@ define @vrem_vx_nxv4i8( %va, i8 signext %b) { ; CHECK-LABEL: vrem_vx_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -112,7 +112,7 @@ ; CHECK-LABEL: vrem_vi_nxv4i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 109 -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmulh.vx v9, v8, a0 ; CHECK-NEXT: vsub.vv v9, v9, v8 ; CHECK-NEXT: vsra.vi v9, v9, 2 @@ -130,7 +130,7 @@ define @vrem_vv_nxv8i8( %va, %vb) { ; CHECK-LABEL: vrem_vv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = srem %va, %vb @@ -140,7 +140,7 @@ define @vrem_vx_nxv8i8( %va, i8 signext %b) { ; CHECK-LABEL: vrem_vx_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -153,7 +153,7 @@ ; CHECK-LABEL: vrem_vi_nxv8i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 109 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmulh.vx v9, v8, a0 ; CHECK-NEXT: vsub.vv v9, v9, v8 ; CHECK-NEXT: vsra.vi v9, v9, 2 @@ -171,7 +171,7 @@ define @vrem_vv_nxv16i8( %va, %vb) { ; CHECK-LABEL: vrem_vv_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = srem %va, %vb @@ -181,7 +181,7 @@ define @vrem_vx_nxv16i8( %va, i8 signext %b) { ; CHECK-LABEL: vrem_vx_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -194,7 +194,7 @@ ; CHECK-LABEL: vrem_vi_nxv16i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 109 -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vmulh.vx v10, v8, a0 ; CHECK-NEXT: vsub.vv v10, v10, v8 ; CHECK-NEXT: vsra.vi v10, v10, 2 @@ -212,7 +212,7 @@ define @vrem_vv_nxv32i8( %va, %vb) { ; CHECK-LABEL: vrem_vv_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = srem %va, %vb @@ -222,7 +222,7 @@ define @vrem_vx_nxv32i8( %va, i8 signext %b) { ; CHECK-LABEL: vrem_vx_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -235,7 +235,7 @@ ; CHECK-LABEL: vrem_vi_nxv32i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 109 -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vmulh.vx v12, v8, a0 ; CHECK-NEXT: vsub.vv v12, v12, v8 ; CHECK-NEXT: vsra.vi v12, v12, 2 @@ -253,7 +253,7 @@ define @vrem_vv_nxv64i8( %va, %vb) { ; CHECK-LABEL: vrem_vv_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = srem %va, %vb @@ -263,7 +263,7 @@ define @vrem_vx_nxv64i8( %va, i8 signext %b) { ; CHECK-LABEL: vrem_vx_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -276,7 +276,7 @@ ; CHECK-LABEL: vrem_vi_nxv64i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 109 -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vmulh.vx v16, v8, a0 ; CHECK-NEXT: vsub.vv v16, v16, v8 ; CHECK-NEXT: vsra.vi v16, v16, 2 @@ -294,7 +294,7 @@ define @vrem_vv_nxv1i16( %va, %vb) { ; CHECK-LABEL: vrem_vv_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = srem %va, %vb @@ -304,7 +304,7 @@ define @vrem_vx_nxv1i16( %va, i16 signext %b) { ; CHECK-LABEL: vrem_vx_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -318,7 +318,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 1048571 ; RV32-NEXT: addi a0, a0, 1755 -; RV32-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; RV32-NEXT: vmulh.vx v9, v8, a0 ; RV32-NEXT: vsra.vi v9, v9, 1 ; RV32-NEXT: vsrl.vi v10, v9, 15 @@ -331,7 +331,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 1048571 ; RV64-NEXT: addiw a0, a0, 1755 -; RV64-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; RV64-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; RV64-NEXT: vmulh.vx v9, v8, a0 ; RV64-NEXT: vsra.vi v9, v9, 1 ; RV64-NEXT: vsrl.vi v10, v9, 15 @@ -348,7 +348,7 @@ define @vrem_vv_nxv2i16( %va, %vb) { ; CHECK-LABEL: vrem_vv_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = srem %va, %vb @@ -358,7 +358,7 @@ define @vrem_vx_nxv2i16( %va, i16 signext %b) { ; CHECK-LABEL: vrem_vx_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -372,7 +372,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 1048571 ; RV32-NEXT: addi a0, a0, 1755 -; RV32-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; RV32-NEXT: vmulh.vx v9, v8, a0 ; RV32-NEXT: vsra.vi v9, v9, 1 ; RV32-NEXT: vsrl.vi v10, v9, 15 @@ -385,7 +385,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 1048571 ; RV64-NEXT: addiw a0, a0, 1755 -; RV64-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; RV64-NEXT: vmulh.vx v9, v8, a0 ; RV64-NEXT: vsra.vi v9, v9, 1 ; RV64-NEXT: vsrl.vi v10, v9, 15 @@ -402,7 +402,7 @@ define @vrem_vv_nxv4i16( %va, %vb) { ; CHECK-LABEL: vrem_vv_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = srem %va, %vb @@ -412,7 +412,7 @@ define @vrem_vx_nxv4i16( %va, i16 signext %b) { ; CHECK-LABEL: vrem_vx_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -426,7 +426,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 1048571 ; RV32-NEXT: addi a0, a0, 1755 -; RV32-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; RV32-NEXT: vmulh.vx v9, v8, a0 ; RV32-NEXT: vsra.vi v9, v9, 1 ; RV32-NEXT: vsrl.vi v10, v9, 15 @@ -439,7 +439,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 1048571 ; RV64-NEXT: addiw a0, a0, 1755 -; RV64-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; RV64-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; RV64-NEXT: vmulh.vx v9, v8, a0 ; RV64-NEXT: vsra.vi v9, v9, 1 ; RV64-NEXT: vsrl.vi v10, v9, 15 @@ -456,7 +456,7 @@ define @vrem_vv_nxv8i16( %va, %vb) { ; CHECK-LABEL: vrem_vv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = srem %va, %vb @@ -466,7 +466,7 @@ define @vrem_vx_nxv8i16( %va, i16 signext %b) { ; CHECK-LABEL: vrem_vx_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -480,7 +480,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 1048571 ; RV32-NEXT: addi a0, a0, 1755 -; RV32-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; RV32-NEXT: vmulh.vx v10, v8, a0 ; RV32-NEXT: vsra.vi v10, v10, 1 ; RV32-NEXT: vsrl.vi v12, v10, 15 @@ -493,7 +493,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 1048571 ; RV64-NEXT: addiw a0, a0, 1755 -; RV64-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; RV64-NEXT: vmulh.vx v10, v8, a0 ; RV64-NEXT: vsra.vi v10, v10, 1 ; RV64-NEXT: vsrl.vi v12, v10, 15 @@ -510,7 +510,7 @@ define @vrem_vv_nxv16i16( %va, %vb) { ; CHECK-LABEL: vrem_vv_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = srem %va, %vb @@ -520,7 +520,7 @@ define @vrem_vx_nxv16i16( %va, i16 signext %b) { ; CHECK-LABEL: vrem_vx_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -534,7 +534,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 1048571 ; RV32-NEXT: addi a0, a0, 1755 -; RV32-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; RV32-NEXT: vmulh.vx v12, v8, a0 ; RV32-NEXT: vsra.vi v12, v12, 1 ; RV32-NEXT: vsrl.vi v16, v12, 15 @@ -547,7 +547,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 1048571 ; RV64-NEXT: addiw a0, a0, 1755 -; RV64-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; RV64-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; RV64-NEXT: vmulh.vx v12, v8, a0 ; RV64-NEXT: vsra.vi v12, v12, 1 ; RV64-NEXT: vsrl.vi v16, v12, 15 @@ -564,7 +564,7 @@ define @vrem_vv_nxv32i16( %va, %vb) { ; CHECK-LABEL: vrem_vv_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = srem %va, %vb @@ -574,7 +574,7 @@ define @vrem_vx_nxv32i16( %va, i16 signext %b) { ; CHECK-LABEL: vrem_vx_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -588,7 +588,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 1048571 ; RV32-NEXT: addi a0, a0, 1755 -; RV32-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; RV32-NEXT: vmulh.vx v16, v8, a0 ; RV32-NEXT: vsra.vi v16, v16, 1 ; RV32-NEXT: vsrl.vi v24, v16, 15 @@ -601,7 +601,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 1048571 ; RV64-NEXT: addiw a0, a0, 1755 -; RV64-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; RV64-NEXT: vmulh.vx v16, v8, a0 ; RV64-NEXT: vsra.vi v16, v16, 1 ; RV64-NEXT: vsrl.vi v24, v16, 15 @@ -618,7 +618,7 @@ define @vrem_vv_nxv1i32( %va, %vb) { ; CHECK-LABEL: vrem_vv_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = srem %va, %vb @@ -628,7 +628,7 @@ define @vrem_vx_nxv1i32( %va, i32 signext %b) { ; CHECK-LABEL: vrem_vx_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -642,7 +642,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 449390 ; RV32-NEXT: addi a0, a0, -1171 -; RV32-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; RV32-NEXT: vmulh.vx v9, v8, a0 ; RV32-NEXT: vsub.vv v9, v9, v8 ; RV32-NEXT: vsrl.vi v10, v9, 31 @@ -656,7 +656,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 449390 ; RV64-NEXT: addiw a0, a0, -1171 -; RV64-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; RV64-NEXT: vmulh.vx v9, v8, a0 ; RV64-NEXT: vsub.vv v9, v9, v8 ; RV64-NEXT: vsra.vi v9, v9, 2 @@ -674,7 +674,7 @@ define @vrem_vv_nxv2i32( %va, %vb) { ; CHECK-LABEL: vrem_vv_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = srem %va, %vb @@ -684,7 +684,7 @@ define @vrem_vx_nxv2i32( %va, i32 signext %b) { ; CHECK-LABEL: vrem_vx_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -698,7 +698,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 449390 ; RV32-NEXT: addi a0, a0, -1171 -; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; RV32-NEXT: vmulh.vx v9, v8, a0 ; RV32-NEXT: vsub.vv v9, v9, v8 ; RV32-NEXT: vsrl.vi v10, v9, 31 @@ -712,7 +712,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 449390 ; RV64-NEXT: addiw a0, a0, -1171 -; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; RV64-NEXT: vmulh.vx v9, v8, a0 ; RV64-NEXT: vsub.vv v9, v9, v8 ; RV64-NEXT: vsra.vi v9, v9, 2 @@ -730,7 +730,7 @@ define @vrem_vv_nxv4i32( %va, %vb) { ; CHECK-LABEL: vrem_vv_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = srem %va, %vb @@ -740,7 +740,7 @@ define @vrem_vx_nxv4i32( %va, i32 signext %b) { ; CHECK-LABEL: vrem_vx_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -754,7 +754,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 449390 ; RV32-NEXT: addi a0, a0, -1171 -; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; RV32-NEXT: vmulh.vx v10, v8, a0 ; RV32-NEXT: vsub.vv v10, v10, v8 ; RV32-NEXT: vsrl.vi v12, v10, 31 @@ -768,7 +768,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 449390 ; RV64-NEXT: addiw a0, a0, -1171 -; RV64-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; RV64-NEXT: vmulh.vx v10, v8, a0 ; RV64-NEXT: vsub.vv v10, v10, v8 ; RV64-NEXT: vsra.vi v10, v10, 2 @@ -786,7 +786,7 @@ define @vrem_vv_nxv8i32( %va, %vb) { ; CHECK-LABEL: vrem_vv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = srem %va, %vb @@ -796,7 +796,7 @@ define @vrem_vx_nxv8i32( %va, i32 signext %b) { ; CHECK-LABEL: vrem_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -810,7 +810,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 449390 ; RV32-NEXT: addi a0, a0, -1171 -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vmulh.vx v12, v8, a0 ; RV32-NEXT: vsub.vv v12, v12, v8 ; RV32-NEXT: vsrl.vi v16, v12, 31 @@ -824,7 +824,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 449390 ; RV64-NEXT: addiw a0, a0, -1171 -; RV64-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV64-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV64-NEXT: vmulh.vx v12, v8, a0 ; RV64-NEXT: vsub.vv v12, v12, v8 ; RV64-NEXT: vsra.vi v12, v12, 2 @@ -842,7 +842,7 @@ define @vrem_vv_nxv16i32( %va, %vb) { ; CHECK-LABEL: vrem_vv_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = srem %va, %vb @@ -852,7 +852,7 @@ define @vrem_vx_nxv16i32( %va, i32 signext %b) { ; CHECK-LABEL: vrem_vx_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -866,7 +866,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 449390 ; RV32-NEXT: addi a0, a0, -1171 -; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; RV32-NEXT: vmulh.vx v16, v8, a0 ; RV32-NEXT: vsub.vv v16, v16, v8 ; RV32-NEXT: vsrl.vi v24, v16, 31 @@ -880,7 +880,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 449390 ; RV64-NEXT: addiw a0, a0, -1171 -; RV64-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; RV64-NEXT: vmulh.vx v16, v8, a0 ; RV64-NEXT: vsub.vv v16, v16, v8 ; RV64-NEXT: vsra.vi v16, v16, 2 @@ -898,7 +898,7 @@ define @vrem_vv_nxv1i64( %va, %vb) { ; CHECK-LABEL: vrem_vv_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = srem %va, %vb @@ -913,7 +913,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vrem.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -921,7 +921,7 @@ ; ; RV64-LABEL: vrem_vx_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV64-NEXT: vrem.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -942,7 +942,7 @@ ; RV32-V-NEXT: addi a0, a0, 1755 ; RV32-V-NEXT: sw a0, 8(sp) ; RV32-V-NEXT: addi a0, sp, 8 -; RV32-V-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-V-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-V-NEXT: vlse64.v v9, (a0), zero ; RV32-V-NEXT: vmulh.vv v9, v8, v9 ; RV32-V-NEXT: li a0, 63 @@ -957,7 +957,7 @@ ; ZVE64X-LABEL: vrem_vi_nxv1i64_0: ; ZVE64X: # %bb.0: ; ZVE64X-NEXT: li a0, -7 -; ZVE64X-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; ZVE64X-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; ZVE64X-NEXT: vrem.vx v8, v8, a0 ; ZVE64X-NEXT: ret ; @@ -965,7 +965,7 @@ ; RV64-V: # %bb.0: ; RV64-V-NEXT: lui a0, %hi(.LCPI56_0) ; RV64-V-NEXT: ld a0, %lo(.LCPI56_0)(a0) -; RV64-V-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-V-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV64-V-NEXT: vmulh.vx v9, v8, a0 ; RV64-V-NEXT: li a0, 63 ; RV64-V-NEXT: vsrl.vx v10, v9, a0 @@ -983,7 +983,7 @@ define @vrem_vv_nxv2i64( %va, %vb) { ; CHECK-LABEL: vrem_vv_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = srem %va, %vb @@ -998,7 +998,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vrem.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -1006,7 +1006,7 @@ ; ; RV64-LABEL: vrem_vx_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV64-NEXT: vrem.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -1027,7 +1027,7 @@ ; RV32-V-NEXT: addi a0, a0, 1755 ; RV32-V-NEXT: sw a0, 8(sp) ; RV32-V-NEXT: addi a0, sp, 8 -; RV32-V-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-V-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-V-NEXT: vlse64.v v10, (a0), zero ; RV32-V-NEXT: vmulh.vv v10, v8, v10 ; RV32-V-NEXT: li a0, 63 @@ -1042,7 +1042,7 @@ ; ZVE64X-LABEL: vrem_vi_nxv2i64_0: ; ZVE64X: # %bb.0: ; ZVE64X-NEXT: li a0, -7 -; ZVE64X-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; ZVE64X-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; ZVE64X-NEXT: vrem.vx v8, v8, a0 ; ZVE64X-NEXT: ret ; @@ -1050,7 +1050,7 @@ ; RV64-V: # %bb.0: ; RV64-V-NEXT: lui a0, %hi(.LCPI59_0) ; RV64-V-NEXT: ld a0, %lo(.LCPI59_0)(a0) -; RV64-V-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV64-V-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV64-V-NEXT: vmulh.vx v10, v8, a0 ; RV64-V-NEXT: li a0, 63 ; RV64-V-NEXT: vsrl.vx v12, v10, a0 @@ -1068,7 +1068,7 @@ define @vrem_vv_nxv4i64( %va, %vb) { ; CHECK-LABEL: vrem_vv_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = srem %va, %vb @@ -1083,7 +1083,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vrem.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -1091,7 +1091,7 @@ ; ; RV64-LABEL: vrem_vx_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV64-NEXT: vrem.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -1112,7 +1112,7 @@ ; RV32-V-NEXT: addi a0, a0, 1755 ; RV32-V-NEXT: sw a0, 8(sp) ; RV32-V-NEXT: addi a0, sp, 8 -; RV32-V-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-V-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-V-NEXT: vlse64.v v12, (a0), zero ; RV32-V-NEXT: vmulh.vv v12, v8, v12 ; RV32-V-NEXT: li a0, 63 @@ -1127,7 +1127,7 @@ ; ZVE64X-LABEL: vrem_vi_nxv4i64_0: ; ZVE64X: # %bb.0: ; ZVE64X-NEXT: li a0, -7 -; ZVE64X-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; ZVE64X-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; ZVE64X-NEXT: vrem.vx v8, v8, a0 ; ZVE64X-NEXT: ret ; @@ -1135,7 +1135,7 @@ ; RV64-V: # %bb.0: ; RV64-V-NEXT: lui a0, %hi(.LCPI62_0) ; RV64-V-NEXT: ld a0, %lo(.LCPI62_0)(a0) -; RV64-V-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV64-V-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV64-V-NEXT: vmulh.vx v12, v8, a0 ; RV64-V-NEXT: li a0, 63 ; RV64-V-NEXT: vsrl.vx v16, v12, a0 @@ -1153,7 +1153,7 @@ define @vrem_vv_nxv8i64( %va, %vb) { ; CHECK-LABEL: vrem_vv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = srem %va, %vb @@ -1168,7 +1168,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vrem.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -1176,7 +1176,7 @@ ; ; RV64-LABEL: vrem_vx_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vrem.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -1197,7 +1197,7 @@ ; RV32-V-NEXT: addi a0, a0, 1755 ; RV32-V-NEXT: sw a0, 8(sp) ; RV32-V-NEXT: addi a0, sp, 8 -; RV32-V-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-V-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-V-NEXT: vlse64.v v16, (a0), zero ; RV32-V-NEXT: vmulh.vv v16, v8, v16 ; RV32-V-NEXT: li a0, 63 @@ -1212,7 +1212,7 @@ ; ZVE64X-LABEL: vrem_vi_nxv8i64_0: ; ZVE64X: # %bb.0: ; ZVE64X-NEXT: li a0, -7 -; ZVE64X-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; ZVE64X-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; ZVE64X-NEXT: vrem.vx v8, v8, a0 ; ZVE64X-NEXT: ret ; @@ -1220,7 +1220,7 @@ ; RV64-V: # %bb.0: ; RV64-V-NEXT: lui a0, %hi(.LCPI65_0) ; RV64-V-NEXT: ld a0, %lo(.LCPI65_0)(a0) -; RV64-V-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-V-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-V-NEXT: vmulh.vx v16, v8, a0 ; RV64-V-NEXT: li a0, 63 ; RV64-V-NEXT: vsrl.vx v24, v16, a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll @@ -9,7 +9,7 @@ define @vrem_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_nxv8i7: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vsra.vi v8, v8, 1 ; CHECK-NEXT: vmv.v.x v9, a0 @@ -39,7 +39,7 @@ define @vrem_vv_nxv1i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv1i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -63,7 +63,7 @@ define @vrem_vx_nxv1i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_nxv1i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -89,7 +89,7 @@ define @vrem_vv_nxv2i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -113,7 +113,7 @@ define @vrem_vx_nxv2i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_nxv2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -151,7 +151,7 @@ define @vrem_vv_nxv4i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -175,7 +175,7 @@ define @vrem_vx_nxv4i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_nxv4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -201,7 +201,7 @@ define @vrem_vv_nxv8i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -225,7 +225,7 @@ define @vrem_vx_nxv8i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_nxv8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -251,7 +251,7 @@ define @vrem_vv_nxv16i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -275,7 +275,7 @@ define @vrem_vx_nxv16i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_nxv16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -301,7 +301,7 @@ define @vrem_vv_nxv32i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv32i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -325,7 +325,7 @@ define @vrem_vx_nxv32i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_nxv32i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -351,7 +351,7 @@ define @vrem_vv_nxv64i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv64i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -375,7 +375,7 @@ define @vrem_vx_nxv64i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_nxv64i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -401,7 +401,7 @@ define @vrem_vv_nxv1i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv1i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -425,7 +425,7 @@ define @vrem_vx_nxv1i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_nxv1i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -451,7 +451,7 @@ define @vrem_vv_nxv2i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -475,7 +475,7 @@ define @vrem_vx_nxv2i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_nxv2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -501,7 +501,7 @@ define @vrem_vv_nxv4i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -525,7 +525,7 @@ define @vrem_vx_nxv4i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_nxv4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -551,7 +551,7 @@ define @vrem_vv_nxv8i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -575,7 +575,7 @@ define @vrem_vx_nxv8i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_nxv8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -601,7 +601,7 @@ define @vrem_vv_nxv16i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -625,7 +625,7 @@ define @vrem_vx_nxv16i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_nxv16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -651,7 +651,7 @@ define @vrem_vv_nxv32i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv32i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -675,7 +675,7 @@ define @vrem_vx_nxv32i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_nxv32i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -701,7 +701,7 @@ define @vrem_vv_nxv1i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv1i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -725,7 +725,7 @@ define @vrem_vx_nxv1i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_nxv1i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -751,7 +751,7 @@ define @vrem_vv_nxv2i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -775,7 +775,7 @@ define @vrem_vx_nxv2i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -801,7 +801,7 @@ define @vrem_vv_nxv4i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -825,7 +825,7 @@ define @vrem_vx_nxv4i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_nxv4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -851,7 +851,7 @@ define @vrem_vv_nxv8i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -875,7 +875,7 @@ define @vrem_vx_nxv8i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_nxv8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -901,7 +901,7 @@ define @vrem_vv_nxv16i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -925,7 +925,7 @@ define @vrem_vx_nxv16i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vx_nxv16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vrem.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -951,7 +951,7 @@ define @vrem_vv_nxv1i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv1i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -968,7 +968,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vrem.vv v8, v8, v9, v0.t @@ -994,16 +994,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vrem.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vx_nxv1i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vrem.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1029,7 +1029,7 @@ define @vrem_vv_nxv2i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1046,7 +1046,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vrem.vv v8, v8, v10, v0.t @@ -1072,16 +1072,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vrem.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vx_nxv2i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vrem.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1107,7 +1107,7 @@ define @vrem_vv_nxv4i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv4i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1124,7 +1124,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vrem.vv v8, v8, v12, v0.t @@ -1150,16 +1150,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vrem.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vx_nxv4i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vrem.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1185,7 +1185,7 @@ define @vrem_vv_nxv8i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vrem_vv_nxv8i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vrem.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1202,7 +1202,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vrem.vv v8, v8, v16, v0.t @@ -1228,16 +1228,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vrem.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vrem_vx_nxv8i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vrem.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vremu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vremu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vremu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vremu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vremu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vremu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -292,7 +292,7 @@ define @intrinsic_vremu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vremu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vremu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vremu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vremu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vremu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vremu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vremu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vremu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -717,7 +717,7 @@ define @intrinsic_vremu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -764,7 +764,7 @@ define @intrinsic_vremu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -811,7 +811,7 @@ define @intrinsic_vremu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vremu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vremu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vremu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vremu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1048,7 +1048,7 @@ define @intrinsic_vremu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1095,7 +1095,7 @@ define @intrinsic_vremu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1142,7 +1142,7 @@ define @intrinsic_vremu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1189,7 +1189,7 @@ define @intrinsic_vremu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1236,7 +1236,7 @@ define @intrinsic_vremu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1283,7 +1283,7 @@ define @intrinsic_vremu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1330,7 +1330,7 @@ define @intrinsic_vremu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1377,7 +1377,7 @@ define @intrinsic_vremu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1424,7 +1424,7 @@ define @intrinsic_vremu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1471,7 +1471,7 @@ define @intrinsic_vremu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1518,7 +1518,7 @@ define @intrinsic_vremu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1565,7 +1565,7 @@ define @intrinsic_vremu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1612,7 +1612,7 @@ define @intrinsic_vremu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1659,7 +1659,7 @@ define @intrinsic_vremu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1706,7 +1706,7 @@ define @intrinsic_vremu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1753,7 +1753,7 @@ define @intrinsic_vremu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1800,7 +1800,7 @@ define @intrinsic_vremu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1847,7 +1847,7 @@ define @intrinsic_vremu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1898,7 +1898,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 @@ -1957,7 +1957,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vremu.vv v8, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 @@ -2016,7 +2016,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vremu.vv v8, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 @@ -2075,7 +2075,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vremu.vv v8, v8, v16 ; CHECK-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vremu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vremu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vremu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vremu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vremu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vremu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -292,7 +292,7 @@ define @intrinsic_vremu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vremu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vremu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vremu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vremu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vremu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vremu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vremu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vremu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -717,7 +717,7 @@ define @intrinsic_vremu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -764,7 +764,7 @@ define @intrinsic_vremu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -811,7 +811,7 @@ define @intrinsic_vremu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vremu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vremu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vremu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vremu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1048,7 +1048,7 @@ define @intrinsic_vremu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1095,7 +1095,7 @@ define @intrinsic_vremu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1142,7 +1142,7 @@ define @intrinsic_vremu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1189,7 +1189,7 @@ define @intrinsic_vremu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1236,7 +1236,7 @@ define @intrinsic_vremu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1283,7 +1283,7 @@ define @intrinsic_vremu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1330,7 +1330,7 @@ define @intrinsic_vremu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1377,7 +1377,7 @@ define @intrinsic_vremu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1424,7 +1424,7 @@ define @intrinsic_vremu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1471,7 +1471,7 @@ define @intrinsic_vremu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1518,7 +1518,7 @@ define @intrinsic_vremu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1565,7 +1565,7 @@ define @intrinsic_vremu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1612,7 +1612,7 @@ define @intrinsic_vremu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1659,7 +1659,7 @@ define @intrinsic_vremu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1706,7 +1706,7 @@ define @intrinsic_vremu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1753,7 +1753,7 @@ define @intrinsic_vremu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1800,7 +1800,7 @@ define @intrinsic_vremu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1847,7 +1847,7 @@ define @intrinsic_vremu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1894,7 +1894,7 @@ define @intrinsic_vremu_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1941,7 +1941,7 @@ define @intrinsic_vremu_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1988,7 +1988,7 @@ define @intrinsic_vremu_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -2035,7 +2035,7 @@ define @intrinsic_vremu_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vremu_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vremu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vremu-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vremu-sdnode.ll @@ -7,7 +7,7 @@ define @vremu_vv_nxv1i8( %va, %vb) { ; CHECK-LABEL: vremu_vv_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = urem %va, %vb @@ -17,7 +17,7 @@ define @vremu_vx_nxv1i8( %va, i8 signext %b) { ; CHECK-LABEL: vremu_vx_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -30,7 +30,7 @@ ; CHECK-LABEL: vremu_vi_nxv1i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 33 -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmulhu.vx v9, v8, a0 ; CHECK-NEXT: vsrl.vi v9, v9, 5 ; CHECK-NEXT: li a0, -7 @@ -45,7 +45,7 @@ define @vremu_vv_nxv2i8( %va, %vb) { ; CHECK-LABEL: vremu_vv_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = urem %va, %vb @@ -55,7 +55,7 @@ define @vremu_vx_nxv2i8( %va, i8 signext %b) { ; CHECK-LABEL: vremu_vx_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -68,7 +68,7 @@ ; CHECK-LABEL: vremu_vi_nxv2i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 33 -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmulhu.vx v9, v8, a0 ; CHECK-NEXT: vsrl.vi v9, v9, 5 ; CHECK-NEXT: li a0, -7 @@ -83,7 +83,7 @@ define @vremu_vv_nxv4i8( %va, %vb) { ; CHECK-LABEL: vremu_vv_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = urem %va, %vb @@ -93,7 +93,7 @@ define @vremu_vx_nxv4i8( %va, i8 signext %b) { ; CHECK-LABEL: vremu_vx_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -106,7 +106,7 @@ ; CHECK-LABEL: vremu_vi_nxv4i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 33 -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmulhu.vx v9, v8, a0 ; CHECK-NEXT: vsrl.vi v9, v9, 5 ; CHECK-NEXT: li a0, -7 @@ -121,7 +121,7 @@ define @vremu_vv_nxv8i8( %va, %vb) { ; CHECK-LABEL: vremu_vv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = urem %va, %vb @@ -131,7 +131,7 @@ define @vremu_vx_nxv8i8( %va, i8 signext %b) { ; CHECK-LABEL: vremu_vx_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -144,7 +144,7 @@ ; CHECK-LABEL: vremu_vi_nxv8i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 33 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmulhu.vx v9, v8, a0 ; CHECK-NEXT: vsrl.vi v9, v9, 5 ; CHECK-NEXT: li a0, -7 @@ -159,7 +159,7 @@ define @vremu_vv_nxv16i8( %va, %vb) { ; CHECK-LABEL: vremu_vv_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = urem %va, %vb @@ -169,7 +169,7 @@ define @vremu_vx_nxv16i8( %va, i8 signext %b) { ; CHECK-LABEL: vremu_vx_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -182,7 +182,7 @@ ; CHECK-LABEL: vremu_vi_nxv16i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 33 -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vmulhu.vx v10, v8, a0 ; CHECK-NEXT: vsrl.vi v10, v10, 5 ; CHECK-NEXT: li a0, -7 @@ -197,7 +197,7 @@ define @vremu_vv_nxv32i8( %va, %vb) { ; CHECK-LABEL: vremu_vv_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = urem %va, %vb @@ -207,7 +207,7 @@ define @vremu_vx_nxv32i8( %va, i8 signext %b) { ; CHECK-LABEL: vremu_vx_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -220,7 +220,7 @@ ; CHECK-LABEL: vremu_vi_nxv32i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 33 -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vmulhu.vx v12, v8, a0 ; CHECK-NEXT: vsrl.vi v12, v12, 5 ; CHECK-NEXT: li a0, -7 @@ -235,7 +235,7 @@ define @vremu_vv_nxv64i8( %va, %vb) { ; CHECK-LABEL: vremu_vv_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = urem %va, %vb @@ -245,7 +245,7 @@ define @vremu_vx_nxv64i8( %va, i8 signext %b) { ; CHECK-LABEL: vremu_vx_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -258,7 +258,7 @@ ; CHECK-LABEL: vremu_vi_nxv64i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 33 -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vmulhu.vx v16, v8, a0 ; CHECK-NEXT: vsrl.vi v16, v16, 5 ; CHECK-NEXT: li a0, -7 @@ -273,7 +273,7 @@ define @vremu_vv_nxv1i16( %va, %vb) { ; CHECK-LABEL: vremu_vv_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = urem %va, %vb @@ -283,7 +283,7 @@ define @vremu_vx_nxv1i16( %va, i16 signext %b) { ; CHECK-LABEL: vremu_vx_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -297,7 +297,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 2 ; RV32-NEXT: addi a0, a0, 1 -; RV32-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; RV32-NEXT: vmulhu.vx v9, v8, a0 ; RV32-NEXT: vsrl.vi v9, v9, 13 ; RV32-NEXT: li a0, -7 @@ -308,7 +308,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 2 ; RV64-NEXT: addiw a0, a0, 1 -; RV64-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; RV64-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; RV64-NEXT: vmulhu.vx v9, v8, a0 ; RV64-NEXT: vsrl.vi v9, v9, 13 ; RV64-NEXT: li a0, -7 @@ -323,7 +323,7 @@ define @vremu_vv_nxv2i16( %va, %vb) { ; CHECK-LABEL: vremu_vv_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = urem %va, %vb @@ -333,7 +333,7 @@ define @vremu_vx_nxv2i16( %va, i16 signext %b) { ; CHECK-LABEL: vremu_vx_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -347,7 +347,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 2 ; RV32-NEXT: addi a0, a0, 1 -; RV32-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; RV32-NEXT: vmulhu.vx v9, v8, a0 ; RV32-NEXT: vsrl.vi v9, v9, 13 ; RV32-NEXT: li a0, -7 @@ -358,7 +358,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 2 ; RV64-NEXT: addiw a0, a0, 1 -; RV64-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; RV64-NEXT: vmulhu.vx v9, v8, a0 ; RV64-NEXT: vsrl.vi v9, v9, 13 ; RV64-NEXT: li a0, -7 @@ -373,7 +373,7 @@ define @vremu_vv_nxv4i16( %va, %vb) { ; CHECK-LABEL: vremu_vv_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = urem %va, %vb @@ -383,7 +383,7 @@ define @vremu_vx_nxv4i16( %va, i16 signext %b) { ; CHECK-LABEL: vremu_vx_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -397,7 +397,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 2 ; RV32-NEXT: addi a0, a0, 1 -; RV32-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; RV32-NEXT: vmulhu.vx v9, v8, a0 ; RV32-NEXT: vsrl.vi v9, v9, 13 ; RV32-NEXT: li a0, -7 @@ -408,7 +408,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 2 ; RV64-NEXT: addiw a0, a0, 1 -; RV64-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; RV64-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; RV64-NEXT: vmulhu.vx v9, v8, a0 ; RV64-NEXT: vsrl.vi v9, v9, 13 ; RV64-NEXT: li a0, -7 @@ -423,7 +423,7 @@ define @vremu_vv_nxv8i16( %va, %vb) { ; CHECK-LABEL: vremu_vv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = urem %va, %vb @@ -433,7 +433,7 @@ define @vremu_vx_nxv8i16( %va, i16 signext %b) { ; CHECK-LABEL: vremu_vx_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -447,7 +447,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 2 ; RV32-NEXT: addi a0, a0, 1 -; RV32-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; RV32-NEXT: vmulhu.vx v10, v8, a0 ; RV32-NEXT: vsrl.vi v10, v10, 13 ; RV32-NEXT: li a0, -7 @@ -458,7 +458,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 2 ; RV64-NEXT: addiw a0, a0, 1 -; RV64-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; RV64-NEXT: vmulhu.vx v10, v8, a0 ; RV64-NEXT: vsrl.vi v10, v10, 13 ; RV64-NEXT: li a0, -7 @@ -473,7 +473,7 @@ define @vremu_vv_nxv16i16( %va, %vb) { ; CHECK-LABEL: vremu_vv_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = urem %va, %vb @@ -483,7 +483,7 @@ define @vremu_vx_nxv16i16( %va, i16 signext %b) { ; CHECK-LABEL: vremu_vx_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -497,7 +497,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 2 ; RV32-NEXT: addi a0, a0, 1 -; RV32-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; RV32-NEXT: vmulhu.vx v12, v8, a0 ; RV32-NEXT: vsrl.vi v12, v12, 13 ; RV32-NEXT: li a0, -7 @@ -508,7 +508,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 2 ; RV64-NEXT: addiw a0, a0, 1 -; RV64-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; RV64-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; RV64-NEXT: vmulhu.vx v12, v8, a0 ; RV64-NEXT: vsrl.vi v12, v12, 13 ; RV64-NEXT: li a0, -7 @@ -523,7 +523,7 @@ define @vremu_vv_nxv32i16( %va, %vb) { ; CHECK-LABEL: vremu_vv_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = urem %va, %vb @@ -533,7 +533,7 @@ define @vremu_vx_nxv32i16( %va, i16 signext %b) { ; CHECK-LABEL: vremu_vx_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -547,7 +547,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 2 ; RV32-NEXT: addi a0, a0, 1 -; RV32-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; RV32-NEXT: vmulhu.vx v16, v8, a0 ; RV32-NEXT: vsrl.vi v16, v16, 13 ; RV32-NEXT: li a0, -7 @@ -558,7 +558,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 2 ; RV64-NEXT: addiw a0, a0, 1 -; RV64-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; RV64-NEXT: vmulhu.vx v16, v8, a0 ; RV64-NEXT: vsrl.vi v16, v16, 13 ; RV64-NEXT: li a0, -7 @@ -573,7 +573,7 @@ define @vremu_vv_nxv1i32( %va, %vb) { ; CHECK-LABEL: vremu_vv_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = urem %va, %vb @@ -583,7 +583,7 @@ define @vremu_vx_nxv1i32( %va, i32 signext %b) { ; CHECK-LABEL: vremu_vx_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -597,7 +597,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 131072 ; RV32-NEXT: addi a0, a0, 1 -; RV32-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; RV32-NEXT: vmulhu.vx v9, v8, a0 ; RV32-NEXT: vsrl.vi v9, v9, 29 ; RV32-NEXT: li a0, -7 @@ -608,7 +608,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 131072 ; RV64-NEXT: addiw a0, a0, 1 -; RV64-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; RV64-NEXT: vmulhu.vx v9, v8, a0 ; RV64-NEXT: vsrl.vi v9, v9, 29 ; RV64-NEXT: li a0, -7 @@ -623,7 +623,7 @@ define @vremu_vv_nxv2i32( %va, %vb) { ; CHECK-LABEL: vremu_vv_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = urem %va, %vb @@ -633,7 +633,7 @@ define @vremu_vx_nxv2i32( %va, i32 signext %b) { ; CHECK-LABEL: vremu_vx_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -647,7 +647,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 131072 ; RV32-NEXT: addi a0, a0, 1 -; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; RV32-NEXT: vmulhu.vx v9, v8, a0 ; RV32-NEXT: vsrl.vi v9, v9, 29 ; RV32-NEXT: li a0, -7 @@ -658,7 +658,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 131072 ; RV64-NEXT: addiw a0, a0, 1 -; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; RV64-NEXT: vmulhu.vx v9, v8, a0 ; RV64-NEXT: vsrl.vi v9, v9, 29 ; RV64-NEXT: li a0, -7 @@ -673,7 +673,7 @@ define @vremu_vv_nxv4i32( %va, %vb) { ; CHECK-LABEL: vremu_vv_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = urem %va, %vb @@ -683,7 +683,7 @@ define @vremu_vx_nxv4i32( %va, i32 signext %b) { ; CHECK-LABEL: vremu_vx_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -697,7 +697,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 131072 ; RV32-NEXT: addi a0, a0, 1 -; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; RV32-NEXT: vmulhu.vx v10, v8, a0 ; RV32-NEXT: vsrl.vi v10, v10, 29 ; RV32-NEXT: li a0, -7 @@ -708,7 +708,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 131072 ; RV64-NEXT: addiw a0, a0, 1 -; RV64-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; RV64-NEXT: vmulhu.vx v10, v8, a0 ; RV64-NEXT: vsrl.vi v10, v10, 29 ; RV64-NEXT: li a0, -7 @@ -723,7 +723,7 @@ define @vremu_vv_nxv8i32( %va, %vb) { ; CHECK-LABEL: vremu_vv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = urem %va, %vb @@ -733,7 +733,7 @@ define @vremu_vx_nxv8i32( %va, i32 signext %b) { ; CHECK-LABEL: vremu_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -747,7 +747,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 131072 ; RV32-NEXT: addi a0, a0, 1 -; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vmulhu.vx v12, v8, a0 ; RV32-NEXT: vsrl.vi v12, v12, 29 ; RV32-NEXT: li a0, -7 @@ -758,7 +758,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 131072 ; RV64-NEXT: addiw a0, a0, 1 -; RV64-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; RV64-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV64-NEXT: vmulhu.vx v12, v8, a0 ; RV64-NEXT: vsrl.vi v12, v12, 29 ; RV64-NEXT: li a0, -7 @@ -773,7 +773,7 @@ define @vremu_vv_nxv16i32( %va, %vb) { ; CHECK-LABEL: vremu_vv_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = urem %va, %vb @@ -783,7 +783,7 @@ define @vremu_vx_nxv16i32( %va, i32 signext %b) { ; CHECK-LABEL: vremu_vx_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -797,7 +797,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: lui a0, 131072 ; RV32-NEXT: addi a0, a0, 1 -; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; RV32-NEXT: vmulhu.vx v16, v8, a0 ; RV32-NEXT: vsrl.vi v16, v16, 29 ; RV32-NEXT: li a0, -7 @@ -808,7 +808,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: lui a0, 131072 ; RV64-NEXT: addiw a0, a0, 1 -; RV64-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; RV64-NEXT: vmulhu.vx v16, v8, a0 ; RV64-NEXT: vsrl.vi v16, v16, 29 ; RV64-NEXT: li a0, -7 @@ -823,7 +823,7 @@ define @vremu_vv_nxv1i64( %va, %vb) { ; CHECK-LABEL: vremu_vv_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = urem %va, %vb @@ -838,7 +838,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vremu.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -846,7 +846,7 @@ ; ; RV64-LABEL: vremu_vx_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV64-NEXT: vremu.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -865,7 +865,7 @@ ; RV32-V-NEXT: li a0, 1 ; RV32-V-NEXT: sw a0, 8(sp) ; RV32-V-NEXT: addi a0, sp, 8 -; RV32-V-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-V-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-V-NEXT: vlse64.v v9, (a0), zero ; RV32-V-NEXT: vmulhu.vv v9, v8, v9 ; RV32-V-NEXT: li a0, 61 @@ -878,7 +878,7 @@ ; ZVE64X-LABEL: vremu_vi_nxv1i64_0: ; ZVE64X: # %bb.0: ; ZVE64X-NEXT: li a0, -7 -; ZVE64X-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; ZVE64X-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; ZVE64X-NEXT: vremu.vx v8, v8, a0 ; ZVE64X-NEXT: ret ; @@ -887,7 +887,7 @@ ; RV64-V-NEXT: li a0, 1 ; RV64-V-NEXT: slli a0, a0, 61 ; RV64-V-NEXT: addi a0, a0, 1 -; RV64-V-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-V-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV64-V-NEXT: vmulhu.vx v9, v8, a0 ; RV64-V-NEXT: li a0, 61 ; RV64-V-NEXT: vsrl.vx v9, v9, a0 @@ -904,7 +904,7 @@ define @vremu_vi_nxv1i64_1( %va) { ; CHECK-LABEL: vremu_vi_nxv1i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 15 ; CHECK-NEXT: ret %head = insertelement poison, i64 16, i32 0 @@ -918,7 +918,7 @@ ; CHECK-LABEL: vremu_vi_nxv1i64_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vsll.vv v9, v10, v9 ; CHECK-NEXT: vadd.vi v9, v9, -1 @@ -934,7 +934,7 @@ define @vremu_vv_nxv2i64( %va, %vb) { ; CHECK-LABEL: vremu_vv_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = urem %va, %vb @@ -949,7 +949,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vremu.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -957,7 +957,7 @@ ; ; RV64-LABEL: vremu_vx_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV64-NEXT: vremu.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -976,7 +976,7 @@ ; RV32-V-NEXT: li a0, 1 ; RV32-V-NEXT: sw a0, 8(sp) ; RV32-V-NEXT: addi a0, sp, 8 -; RV32-V-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-V-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-V-NEXT: vlse64.v v10, (a0), zero ; RV32-V-NEXT: vmulhu.vv v10, v8, v10 ; RV32-V-NEXT: li a0, 61 @@ -989,7 +989,7 @@ ; ZVE64X-LABEL: vremu_vi_nxv2i64_0: ; ZVE64X: # %bb.0: ; ZVE64X-NEXT: li a0, -7 -; ZVE64X-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; ZVE64X-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; ZVE64X-NEXT: vremu.vx v8, v8, a0 ; ZVE64X-NEXT: ret ; @@ -998,7 +998,7 @@ ; RV64-V-NEXT: li a0, 1 ; RV64-V-NEXT: slli a0, a0, 61 ; RV64-V-NEXT: addi a0, a0, 1 -; RV64-V-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV64-V-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV64-V-NEXT: vmulhu.vx v10, v8, a0 ; RV64-V-NEXT: li a0, 61 ; RV64-V-NEXT: vsrl.vx v10, v10, a0 @@ -1015,7 +1015,7 @@ define @vremu_vi_nxv2i64_1( %va) { ; CHECK-LABEL: vremu_vi_nxv2i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 15 ; CHECK-NEXT: ret %head = insertelement poison, i64 16, i32 0 @@ -1029,7 +1029,7 @@ ; CHECK-LABEL: vremu_vi_nxv2i64_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: vsll.vv v10, v12, v10 ; CHECK-NEXT: vadd.vi v10, v10, -1 @@ -1045,7 +1045,7 @@ define @vremu_vv_nxv4i64( %va, %vb) { ; CHECK-LABEL: vremu_vv_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = urem %va, %vb @@ -1060,7 +1060,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vremu.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -1068,7 +1068,7 @@ ; ; RV64-LABEL: vremu_vx_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV64-NEXT: vremu.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -1087,7 +1087,7 @@ ; RV32-V-NEXT: li a0, 1 ; RV32-V-NEXT: sw a0, 8(sp) ; RV32-V-NEXT: addi a0, sp, 8 -; RV32-V-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-V-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-V-NEXT: vlse64.v v12, (a0), zero ; RV32-V-NEXT: vmulhu.vv v12, v8, v12 ; RV32-V-NEXT: li a0, 61 @@ -1100,7 +1100,7 @@ ; ZVE64X-LABEL: vremu_vi_nxv4i64_0: ; ZVE64X: # %bb.0: ; ZVE64X-NEXT: li a0, -7 -; ZVE64X-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; ZVE64X-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; ZVE64X-NEXT: vremu.vx v8, v8, a0 ; ZVE64X-NEXT: ret ; @@ -1109,7 +1109,7 @@ ; RV64-V-NEXT: li a0, 1 ; RV64-V-NEXT: slli a0, a0, 61 ; RV64-V-NEXT: addi a0, a0, 1 -; RV64-V-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV64-V-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV64-V-NEXT: vmulhu.vx v12, v8, a0 ; RV64-V-NEXT: li a0, 61 ; RV64-V-NEXT: vsrl.vx v12, v12, a0 @@ -1126,7 +1126,7 @@ define @vremu_vi_nxv4i64_1( %va) { ; CHECK-LABEL: vremu_vi_nxv4i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 15 ; CHECK-NEXT: ret %head = insertelement poison, i64 16, i32 0 @@ -1140,7 +1140,7 @@ ; CHECK-LABEL: vremu_vi_nxv4i64_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vsll.vv v12, v16, v12 ; CHECK-NEXT: vadd.vi v12, v12, -1 @@ -1156,7 +1156,7 @@ define @vremu_vv_nxv8i64( %va, %vb) { ; CHECK-LABEL: vremu_vv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = urem %va, %vb @@ -1171,7 +1171,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vremu.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -1179,7 +1179,7 @@ ; ; RV64-LABEL: vremu_vx_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vremu.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -1198,7 +1198,7 @@ ; RV32-V-NEXT: li a0, 1 ; RV32-V-NEXT: sw a0, 8(sp) ; RV32-V-NEXT: addi a0, sp, 8 -; RV32-V-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-V-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-V-NEXT: vlse64.v v16, (a0), zero ; RV32-V-NEXT: vmulhu.vv v16, v8, v16 ; RV32-V-NEXT: li a0, 61 @@ -1211,7 +1211,7 @@ ; ZVE64X-LABEL: vremu_vi_nxv8i64_0: ; ZVE64X: # %bb.0: ; ZVE64X-NEXT: li a0, -7 -; ZVE64X-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; ZVE64X-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; ZVE64X-NEXT: vremu.vx v8, v8, a0 ; ZVE64X-NEXT: ret ; @@ -1220,7 +1220,7 @@ ; RV64-V-NEXT: li a0, 1 ; RV64-V-NEXT: slli a0, a0, 61 ; RV64-V-NEXT: addi a0, a0, 1 -; RV64-V-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-V-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-V-NEXT: vmulhu.vx v16, v8, a0 ; RV64-V-NEXT: li a0, 61 ; RV64-V-NEXT: vsrl.vx v16, v16, a0 @@ -1237,7 +1237,7 @@ define @vremu_vi_nxv8i64_1( %va) { ; CHECK-LABEL: vremu_vi_nxv8i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 15 ; CHECK-NEXT: ret %head = insertelement poison, i64 16, i32 0 @@ -1251,7 +1251,7 @@ ; CHECK-LABEL: vremu_vi_nxv8i64_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; CHECK-NEXT: vmv.v.x v24, a0 ; CHECK-NEXT: vsll.vv v16, v24, v16 ; CHECK-NEXT: vadd.vi v16, v16, -1 diff --git a/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll @@ -10,7 +10,7 @@ ; CHECK-LABEL: vremu_vx_nxv8i7: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 127 -; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a2 ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vand.vx v9, v9, a2 @@ -38,7 +38,7 @@ define @vremu_vv_nxv1i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv1i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -62,7 +62,7 @@ define @vremu_vx_nxv1i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_nxv1i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -88,7 +88,7 @@ define @vremu_vv_nxv2i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -112,7 +112,7 @@ define @vremu_vx_nxv2i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_nxv2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -150,7 +150,7 @@ define @vremu_vv_nxv4i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -174,7 +174,7 @@ define @vremu_vx_nxv4i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_nxv4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -200,7 +200,7 @@ define @vremu_vv_nxv8i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -224,7 +224,7 @@ define @vremu_vx_nxv8i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_nxv8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -250,7 +250,7 @@ define @vremu_vv_nxv16i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -274,7 +274,7 @@ define @vremu_vx_nxv16i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_nxv16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -300,7 +300,7 @@ define @vremu_vv_nxv32i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv32i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -324,7 +324,7 @@ define @vremu_vx_nxv32i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_nxv32i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -350,7 +350,7 @@ define @vremu_vv_nxv64i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv64i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -374,7 +374,7 @@ define @vremu_vx_nxv64i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_nxv64i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -400,7 +400,7 @@ define @vremu_vv_nxv1i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv1i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -424,7 +424,7 @@ define @vremu_vx_nxv1i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_nxv1i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -450,7 +450,7 @@ define @vremu_vv_nxv2i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -474,7 +474,7 @@ define @vremu_vx_nxv2i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_nxv2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -500,7 +500,7 @@ define @vremu_vv_nxv4i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -524,7 +524,7 @@ define @vremu_vx_nxv4i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_nxv4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -550,7 +550,7 @@ define @vremu_vv_nxv8i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -574,7 +574,7 @@ define @vremu_vx_nxv8i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_nxv8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -600,7 +600,7 @@ define @vremu_vv_nxv16i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -624,7 +624,7 @@ define @vremu_vx_nxv16i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_nxv16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -650,7 +650,7 @@ define @vremu_vv_nxv32i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv32i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -674,7 +674,7 @@ define @vremu_vx_nxv32i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_nxv32i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -700,7 +700,7 @@ define @vremu_vv_nxv1i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv1i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -724,7 +724,7 @@ define @vremu_vx_nxv1i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_nxv1i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -750,7 +750,7 @@ define @vremu_vv_nxv2i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -774,7 +774,7 @@ define @vremu_vx_nxv2i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -800,7 +800,7 @@ define @vremu_vv_nxv4i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -824,7 +824,7 @@ define @vremu_vx_nxv4i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_nxv4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -850,7 +850,7 @@ define @vremu_vv_nxv8i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -874,7 +874,7 @@ define @vremu_vx_nxv8i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_nxv8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -900,7 +900,7 @@ define @vremu_vv_nxv16i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -924,7 +924,7 @@ define @vremu_vx_nxv16i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vx_nxv16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vremu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -950,7 +950,7 @@ define @vremu_vv_nxv1i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv1i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -967,7 +967,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vremu.vv v8, v8, v9, v0.t @@ -993,16 +993,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vremu.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vx_nxv1i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vremu.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1028,7 +1028,7 @@ define @vremu_vv_nxv2i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1045,7 +1045,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vremu.vv v8, v8, v10, v0.t @@ -1071,16 +1071,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vremu.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vx_nxv2i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vremu.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1106,7 +1106,7 @@ define @vremu_vv_nxv4i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv4i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1123,7 +1123,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vremu.vv v8, v8, v12, v0.t @@ -1149,16 +1149,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vremu.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vx_nxv4i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vremu.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1184,7 +1184,7 @@ define @vremu_vv_nxv8i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vremu_vv_nxv8i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vremu.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1201,7 +1201,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vremu.vv v8, v8, v16, v0.t @@ -1227,16 +1227,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vremu.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vremu_vx_nxv8i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vremu.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vrgather.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -58,7 +58,7 @@ define @intrinsic_vrgather_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vrgather.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -106,7 +106,7 @@ define @intrinsic_vrgather_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vrgather.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -154,7 +154,7 @@ define @intrinsic_vrgather_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vrgather.vv v10, v8, v9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -202,7 +202,7 @@ define @intrinsic_vrgather_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vrgather.vv v12, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -250,7 +250,7 @@ define @intrinsic_vrgather_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vrgather.vv v16, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -298,7 +298,7 @@ define @intrinsic_vrgather_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vrgather.vv v24, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret @@ -347,7 +347,7 @@ define @intrinsic_vrgather_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vrgather.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -395,7 +395,7 @@ define @intrinsic_vrgather_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vrgather.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -443,7 +443,7 @@ define @intrinsic_vrgather_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vrgather.vv v10, v8, v9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -491,7 +491,7 @@ define @intrinsic_vrgather_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vrgather.vv v12, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -539,7 +539,7 @@ define @intrinsic_vrgather_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vrgather.vv v16, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -587,7 +587,7 @@ define @intrinsic_vrgather_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vrgather.vv v24, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret @@ -636,7 +636,7 @@ define @intrinsic_vrgather_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vrgather.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -684,7 +684,7 @@ define @intrinsic_vrgather_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vrgather.vv v10, v8, v9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -732,7 +732,7 @@ define @intrinsic_vrgather_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vrgather.vv v12, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -780,7 +780,7 @@ define @intrinsic_vrgather_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vrgather.vv v16, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -828,7 +828,7 @@ define @intrinsic_vrgather_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vrgather.vv v24, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret @@ -877,7 +877,7 @@ define @intrinsic_vrgather_vv_nxv1f16_nxv1f16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vrgather.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -925,7 +925,7 @@ define @intrinsic_vrgather_vv_nxv2f16_nxv2f16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vrgather.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -973,7 +973,7 @@ define @intrinsic_vrgather_vv_nxv4f16_nxv4f16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vrgather.vv v10, v8, v9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1021,7 +1021,7 @@ define @intrinsic_vrgather_vv_nxv8f16_nxv8f16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vrgather.vv v12, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1069,7 +1069,7 @@ define @intrinsic_vrgather_vv_nxv16f16_nxv16f16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv16f16_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vrgather.vv v16, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1117,7 +1117,7 @@ define @intrinsic_vrgather_vv_nxv32f16_nxv32f16_nxv32i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vrgather.vv v24, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret @@ -1166,7 +1166,7 @@ define @intrinsic_vrgather_vv_nxv1f32_nxv1f32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vrgather.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1214,7 +1214,7 @@ define @intrinsic_vrgather_vv_nxv2f32_nxv2f32_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vrgather.vv v10, v8, v9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1262,7 +1262,7 @@ define @intrinsic_vrgather_vv_nxv4f32_nxv4f32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vrgather.vv v12, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1310,7 +1310,7 @@ define @intrinsic_vrgather_vv_nxv8f32_nxv8f32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv8f32_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vrgather.vv v16, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1358,7 +1358,7 @@ define @intrinsic_vrgather_vv_nxv16f32_nxv16f32_nxv16i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vrgather.vv v24, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret @@ -1407,7 +1407,7 @@ define @intrinsic_vrgather_vv_nxv1f64_nxv1f64_nxv1i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vrgather.vv v10, v8, v9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1455,7 +1455,7 @@ define @intrinsic_vrgather_vv_nxv2f64_nxv2f64_nxv2i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vrgather.vv v12, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1503,7 +1503,7 @@ define @intrinsic_vrgather_vv_nxv4f64_nxv4f64_nxv4i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv4f64_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vrgather.vv v16, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1551,7 +1551,7 @@ define @intrinsic_vrgather_vv_nxv8f64_nxv8f64_nxv8i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vrgather.vv v24, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret @@ -1600,7 +1600,7 @@ define @intrinsic_vrgather_vx_nxv1i8_nxv1i8_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i8_nxv1i8_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vrgather.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1648,7 +1648,7 @@ define @intrinsic_vrgather_vx_nxv2i8_nxv2i8_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i8_nxv2i8_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vrgather.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1696,7 +1696,7 @@ define @intrinsic_vrgather_vx_nxv4i8_nxv4i8_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i8_nxv4i8_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vrgather.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1744,7 +1744,7 @@ define @intrinsic_vrgather_vx_nxv8i8_nxv8i8_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i8_nxv8i8_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vrgather.vx v9, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -1792,7 +1792,7 @@ define @intrinsic_vrgather_vx_nxv16i8_nxv16i8_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv16i8_nxv16i8_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vrgather.vx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1840,7 +1840,7 @@ define @intrinsic_vrgather_vx_nxv32i8_nxv32i8_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv32i8_nxv32i8_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vrgather.vx v12, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1888,7 +1888,7 @@ define @intrinsic_vrgather_vx_nxv64i8_nxv64i8_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv64i8_nxv64i8_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vrgather.vx v16, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1936,7 +1936,7 @@ define @intrinsic_vrgather_vx_nxv1i16_nxv1i16_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i16_nxv1i16_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vrgather.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1984,7 +1984,7 @@ define @intrinsic_vrgather_vx_nxv2i16_nxv2i16_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i16_nxv2i16_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vrgather.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -2032,7 +2032,7 @@ define @intrinsic_vrgather_vx_nxv4i16_nxv4i16_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i16_nxv4i16_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vrgather.vx v9, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -2080,7 +2080,7 @@ define @intrinsic_vrgather_vx_nxv8i16_nxv8i16_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i16_nxv8i16_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vrgather.vx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -2128,7 +2128,7 @@ define @intrinsic_vrgather_vx_nxv16i16_nxv16i16_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv16i16_nxv16i16_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vrgather.vx v12, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -2176,7 +2176,7 @@ define @intrinsic_vrgather_vx_nxv32i16_nxv32i16_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv32i16_nxv32i16_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vrgather.vx v16, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -2224,7 +2224,7 @@ define @intrinsic_vrgather_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vrgather.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -2272,7 +2272,7 @@ define @intrinsic_vrgather_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vrgather.vx v9, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -2320,7 +2320,7 @@ define @intrinsic_vrgather_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vrgather.vx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -2368,7 +2368,7 @@ define @intrinsic_vrgather_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vrgather.vx v12, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -2416,7 +2416,7 @@ define @intrinsic_vrgather_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vrgather.vx v16, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -2464,7 +2464,7 @@ define @intrinsic_vrgather_vx_nxv1f16_nxv1f16_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1f16_nxv1f16_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vrgather.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -2512,7 +2512,7 @@ define @intrinsic_vrgather_vx_nxv2f16_nxv2f16_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv2f16_nxv2f16_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vrgather.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -2560,7 +2560,7 @@ define @intrinsic_vrgather_vx_nxv4f16_nxv4f16_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv4f16_nxv4f16_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vrgather.vx v9, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -2608,7 +2608,7 @@ define @intrinsic_vrgather_vx_nxv8f16_nxv8f16_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv8f16_nxv8f16_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vrgather.vx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -2656,7 +2656,7 @@ define @intrinsic_vrgather_vx_nxv16f16_nxv16f16_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv16f16_nxv16f16_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vrgather.vx v12, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -2704,7 +2704,7 @@ define @intrinsic_vrgather_vx_nxv32f16_nxv32f16_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv32f16_nxv32f16_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vrgather.vx v16, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -2752,7 +2752,7 @@ define @intrinsic_vrgather_vx_nxv1f32_nxv1f32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1f32_nxv1f32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vrgather.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -2800,7 +2800,7 @@ define @intrinsic_vrgather_vx_nxv2f32_nxv2f32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv2f32_nxv2f32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vrgather.vx v9, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -2848,7 +2848,7 @@ define @intrinsic_vrgather_vx_nxv4f32_nxv4f32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv4f32_nxv4f32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vrgather.vx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -2896,7 +2896,7 @@ define @intrinsic_vrgather_vx_nxv8f32_nxv8f32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv8f32_nxv8f32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vrgather.vx v12, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -2944,7 +2944,7 @@ define @intrinsic_vrgather_vx_nxv16f32_nxv16f32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv16f32_nxv16f32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vrgather.vx v16, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -2992,7 +2992,7 @@ define @intrinsic_vrgather_vx_nxv1f64_nxv1f64_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1f64_nxv1f64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vrgather.vx v9, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -3040,7 +3040,7 @@ define @intrinsic_vrgather_vx_nxv2f64_nxv2f64_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv2f64_nxv2f64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vrgather.vx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -3088,7 +3088,7 @@ define @intrinsic_vrgather_vx_nxv4f64_nxv4f64_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv4f64_nxv4f64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vrgather.vx v12, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -3136,7 +3136,7 @@ define @intrinsic_vrgather_vx_nxv8f64_nxv8f64_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv8f64_nxv8f64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vrgather.vx v16, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -3178,7 +3178,7 @@ define @intrinsic_vrgather_vi_nxv1i8_nxv1i8_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv1i8_nxv1i8_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vrgather.vi v9, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -3212,7 +3212,7 @@ define @intrinsic_vrgather_vi_nxv2i8_nxv2i8_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv2i8_nxv2i8_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vrgather.vi v9, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -3246,7 +3246,7 @@ define @intrinsic_vrgather_vi_nxv4i8_nxv4i8_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv4i8_nxv4i8_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vrgather.vi v9, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -3280,7 +3280,7 @@ define @intrinsic_vrgather_vi_nxv8i8_nxv8i8_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv8i8_nxv8i8_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vrgather.vi v9, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -3314,7 +3314,7 @@ define @intrinsic_vrgather_vi_nxv16i8_nxv16i8_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv16i8_nxv16i8_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vrgather.vi v10, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -3348,7 +3348,7 @@ define @intrinsic_vrgather_vi_nxv32i8_nxv32i8_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv32i8_nxv32i8_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vrgather.vi v12, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -3382,7 +3382,7 @@ define @intrinsic_vrgather_vi_nxv64i8_nxv64i8_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv64i8_nxv64i8_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vrgather.vi v16, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -3416,7 +3416,7 @@ define @intrinsic_vrgather_vi_nxv1i16_nxv1i16_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv1i16_nxv1i16_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vrgather.vi v9, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -3450,7 +3450,7 @@ define @intrinsic_vrgather_vi_nxv2i16_nxv2i16_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv2i16_nxv2i16_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vrgather.vi v9, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -3484,7 +3484,7 @@ define @intrinsic_vrgather_vi_nxv4i16_nxv4i16_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv4i16_nxv4i16_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vrgather.vi v9, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -3518,7 +3518,7 @@ define @intrinsic_vrgather_vi_nxv8i16_nxv8i16_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv8i16_nxv8i16_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vrgather.vi v10, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -3552,7 +3552,7 @@ define @intrinsic_vrgather_vi_nxv16i16_nxv16i16_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv16i16_nxv16i16_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vrgather.vi v12, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -3586,7 +3586,7 @@ define @intrinsic_vrgather_vi_nxv32i16_nxv32i16_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv32i16_nxv32i16_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vrgather.vi v16, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -3620,7 +3620,7 @@ define @intrinsic_vrgather_vi_nxv1i32_nxv1i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vrgather.vi v9, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -3654,7 +3654,7 @@ define @intrinsic_vrgather_vi_nxv2i32_nxv2i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vrgather.vi v9, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -3688,7 +3688,7 @@ define @intrinsic_vrgather_vi_nxv4i32_nxv4i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vrgather.vi v10, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -3722,7 +3722,7 @@ define @intrinsic_vrgather_vi_nxv8i32_nxv8i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vrgather.vi v12, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -3756,7 +3756,7 @@ define @intrinsic_vrgather_vi_nxv16i32_nxv16i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vrgather.vi v16, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -3790,7 +3790,7 @@ define @intrinsic_vrgather_vi_nxv1f16_nxv1f16_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv1f16_nxv1f16_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vrgather.vi v9, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -3824,7 +3824,7 @@ define @intrinsic_vrgather_vi_nxv2f16_nxv2f16_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv2f16_nxv2f16_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vrgather.vi v9, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -3858,7 +3858,7 @@ define @intrinsic_vrgather_vi_nxv4f16_nxv4f16_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv4f16_nxv4f16_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vrgather.vi v9, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -3892,7 +3892,7 @@ define @intrinsic_vrgather_vi_nxv8f16_nxv8f16_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv8f16_nxv8f16_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vrgather.vi v10, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -3926,7 +3926,7 @@ define @intrinsic_vrgather_vi_nxv16f16_nxv16f16_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv16f16_nxv16f16_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vrgather.vi v12, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -3960,7 +3960,7 @@ define @intrinsic_vrgather_vi_nxv32f16_nxv32f16_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv32f16_nxv32f16_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vrgather.vi v16, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -3994,7 +3994,7 @@ define @intrinsic_vrgather_vi_nxv1f32_nxv1f32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv1f32_nxv1f32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vrgather.vi v9, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -4028,7 +4028,7 @@ define @intrinsic_vrgather_vi_nxv2f32_nxv2f32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv2f32_nxv2f32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vrgather.vi v9, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -4062,7 +4062,7 @@ define @intrinsic_vrgather_vi_nxv4f32_nxv4f32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv4f32_nxv4f32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vrgather.vi v10, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -4096,7 +4096,7 @@ define @intrinsic_vrgather_vi_nxv8f32_nxv8f32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv8f32_nxv8f32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vrgather.vi v12, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -4130,7 +4130,7 @@ define @intrinsic_vrgather_vi_nxv16f32_nxv16f32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv16f32_nxv16f32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vrgather.vi v16, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -4164,7 +4164,7 @@ define @intrinsic_vrgather_vi_nxv1f64_nxv1f64_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv1f64_nxv1f64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vrgather.vi v9, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -4198,7 +4198,7 @@ define @intrinsic_vrgather_vi_nxv2f64_nxv2f64_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv2f64_nxv2f64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vrgather.vi v10, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -4232,7 +4232,7 @@ define @intrinsic_vrgather_vi_nxv4f64_nxv4f64_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv4f64_nxv4f64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vrgather.vi v12, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -4266,7 +4266,7 @@ define @intrinsic_vrgather_vi_nxv8f64_nxv8f64_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv8f64_nxv8f64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vrgather.vi v16, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vrgather.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -58,7 +58,7 @@ define @intrinsic_vrgather_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vrgather.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -106,7 +106,7 @@ define @intrinsic_vrgather_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vrgather.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -154,7 +154,7 @@ define @intrinsic_vrgather_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vrgather.vv v10, v8, v9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -202,7 +202,7 @@ define @intrinsic_vrgather_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vrgather.vv v12, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -250,7 +250,7 @@ define @intrinsic_vrgather_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vrgather.vv v16, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -298,7 +298,7 @@ define @intrinsic_vrgather_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vrgather.vv v24, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret @@ -347,7 +347,7 @@ define @intrinsic_vrgather_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vrgather.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -395,7 +395,7 @@ define @intrinsic_vrgather_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vrgather.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -443,7 +443,7 @@ define @intrinsic_vrgather_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vrgather.vv v10, v8, v9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -491,7 +491,7 @@ define @intrinsic_vrgather_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vrgather.vv v12, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -539,7 +539,7 @@ define @intrinsic_vrgather_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vrgather.vv v16, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -587,7 +587,7 @@ define @intrinsic_vrgather_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vrgather.vv v24, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret @@ -636,7 +636,7 @@ define @intrinsic_vrgather_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vrgather.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -684,7 +684,7 @@ define @intrinsic_vrgather_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vrgather.vv v10, v8, v9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -732,7 +732,7 @@ define @intrinsic_vrgather_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vrgather.vv v12, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -780,7 +780,7 @@ define @intrinsic_vrgather_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vrgather.vv v16, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -828,7 +828,7 @@ define @intrinsic_vrgather_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vrgather.vv v24, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret @@ -877,7 +877,7 @@ define @intrinsic_vrgather_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vrgather.vv v10, v8, v9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -925,7 +925,7 @@ define @intrinsic_vrgather_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vrgather.vv v12, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -973,7 +973,7 @@ define @intrinsic_vrgather_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vrgather.vv v16, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1021,7 +1021,7 @@ define @intrinsic_vrgather_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vrgather.vv v24, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret @@ -1070,7 +1070,7 @@ define @intrinsic_vrgather_vv_nxv1f16_nxv1f16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vrgather.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1118,7 +1118,7 @@ define @intrinsic_vrgather_vv_nxv2f16_nxv2f16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vrgather.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1166,7 +1166,7 @@ define @intrinsic_vrgather_vv_nxv4f16_nxv4f16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vrgather.vv v10, v8, v9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1214,7 +1214,7 @@ define @intrinsic_vrgather_vv_nxv8f16_nxv8f16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vrgather.vv v12, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1262,7 +1262,7 @@ define @intrinsic_vrgather_vv_nxv16f16_nxv16f16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv16f16_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vrgather.vv v16, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1310,7 +1310,7 @@ define @intrinsic_vrgather_vv_nxv32f16_nxv32f16_nxv32i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vrgather.vv v24, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret @@ -1359,7 +1359,7 @@ define @intrinsic_vrgather_vv_nxv1f32_nxv1f32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vrgather.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1407,7 +1407,7 @@ define @intrinsic_vrgather_vv_nxv2f32_nxv2f32_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vrgather.vv v10, v8, v9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1455,7 +1455,7 @@ define @intrinsic_vrgather_vv_nxv4f32_nxv4f32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vrgather.vv v12, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1503,7 +1503,7 @@ define @intrinsic_vrgather_vv_nxv8f32_nxv8f32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv8f32_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vrgather.vv v16, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1551,7 +1551,7 @@ define @intrinsic_vrgather_vv_nxv16f32_nxv16f32_nxv16i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vrgather.vv v24, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret @@ -1600,7 +1600,7 @@ define @intrinsic_vrgather_vv_nxv1f64_nxv1f64_nxv1i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vrgather.vv v10, v8, v9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1648,7 +1648,7 @@ define @intrinsic_vrgather_vv_nxv2f64_nxv2f64_nxv2i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vrgather.vv v12, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1696,7 +1696,7 @@ define @intrinsic_vrgather_vv_nxv4f64_nxv4f64_nxv4i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv4f64_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vrgather.vv v16, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1744,7 +1744,7 @@ define @intrinsic_vrgather_vv_nxv8f64_nxv8f64_nxv8i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vv_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vrgather.vv v24, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret @@ -1793,7 +1793,7 @@ define @intrinsic_vrgather_vx_nxv1i8_nxv1i8_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i8_nxv1i8_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vrgather.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1841,7 +1841,7 @@ define @intrinsic_vrgather_vx_nxv2i8_nxv2i8_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i8_nxv2i8_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vrgather.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1889,7 +1889,7 @@ define @intrinsic_vrgather_vx_nxv4i8_nxv4i8_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i8_nxv4i8_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vrgather.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1937,7 +1937,7 @@ define @intrinsic_vrgather_vx_nxv8i8_nxv8i8_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i8_nxv8i8_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vrgather.vx v9, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -1985,7 +1985,7 @@ define @intrinsic_vrgather_vx_nxv16i8_nxv16i8_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv16i8_nxv16i8_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vrgather.vx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -2033,7 +2033,7 @@ define @intrinsic_vrgather_vx_nxv32i8_nxv32i8_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv32i8_nxv32i8_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vrgather.vx v12, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -2081,7 +2081,7 @@ define @intrinsic_vrgather_vx_nxv64i8_nxv64i8_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv64i8_nxv64i8_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vrgather.vx v16, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -2129,7 +2129,7 @@ define @intrinsic_vrgather_vx_nxv1i16_nxv1i16_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i16_nxv1i16_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vrgather.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -2177,7 +2177,7 @@ define @intrinsic_vrgather_vx_nxv2i16_nxv2i16_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i16_nxv2i16_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vrgather.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -2225,7 +2225,7 @@ define @intrinsic_vrgather_vx_nxv4i16_nxv4i16_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i16_nxv4i16_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vrgather.vx v9, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -2273,7 +2273,7 @@ define @intrinsic_vrgather_vx_nxv8i16_nxv8i16_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i16_nxv8i16_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vrgather.vx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -2321,7 +2321,7 @@ define @intrinsic_vrgather_vx_nxv16i16_nxv16i16_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv16i16_nxv16i16_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vrgather.vx v12, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -2369,7 +2369,7 @@ define @intrinsic_vrgather_vx_nxv32i16_nxv32i16_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv32i16_nxv32i16_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vrgather.vx v16, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -2417,7 +2417,7 @@ define @intrinsic_vrgather_vx_nxv1i32_nxv1i32_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i32_nxv1i32_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vrgather.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -2465,7 +2465,7 @@ define @intrinsic_vrgather_vx_nxv2i32_nxv2i32_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i32_nxv2i32_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vrgather.vx v9, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -2513,7 +2513,7 @@ define @intrinsic_vrgather_vx_nxv4i32_nxv4i32_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i32_nxv4i32_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vrgather.vx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -2561,7 +2561,7 @@ define @intrinsic_vrgather_vx_nxv8i32_nxv8i32_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i32_nxv8i32_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vrgather.vx v12, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -2609,7 +2609,7 @@ define @intrinsic_vrgather_vx_nxv16i32_nxv16i32_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv16i32_nxv16i32_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vrgather.vx v16, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -2657,7 +2657,7 @@ define @intrinsic_vrgather_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vrgather.vx v9, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -2705,7 +2705,7 @@ define @intrinsic_vrgather_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vrgather.vx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -2753,7 +2753,7 @@ define @intrinsic_vrgather_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vrgather.vx v12, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -2801,7 +2801,7 @@ define @intrinsic_vrgather_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vrgather.vx v16, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -2849,7 +2849,7 @@ define @intrinsic_vrgather_vx_nxv1f16_nxv1f16_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1f16_nxv1f16_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vrgather.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -2897,7 +2897,7 @@ define @intrinsic_vrgather_vx_nxv2f16_nxv2f16_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv2f16_nxv2f16_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vrgather.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -2945,7 +2945,7 @@ define @intrinsic_vrgather_vx_nxv4f16_nxv4f16_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv4f16_nxv4f16_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vrgather.vx v9, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -2993,7 +2993,7 @@ define @intrinsic_vrgather_vx_nxv8f16_nxv8f16_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv8f16_nxv8f16_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vrgather.vx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -3041,7 +3041,7 @@ define @intrinsic_vrgather_vx_nxv16f16_nxv16f16_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv16f16_nxv16f16_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vrgather.vx v12, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -3089,7 +3089,7 @@ define @intrinsic_vrgather_vx_nxv32f16_nxv32f16_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv32f16_nxv32f16_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vrgather.vx v16, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -3137,7 +3137,7 @@ define @intrinsic_vrgather_vx_nxv1f32_nxv1f32_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1f32_nxv1f32_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vrgather.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -3185,7 +3185,7 @@ define @intrinsic_vrgather_vx_nxv2f32_nxv2f32_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv2f32_nxv2f32_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vrgather.vx v9, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -3233,7 +3233,7 @@ define @intrinsic_vrgather_vx_nxv4f32_nxv4f32_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv4f32_nxv4f32_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vrgather.vx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -3281,7 +3281,7 @@ define @intrinsic_vrgather_vx_nxv8f32_nxv8f32_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv8f32_nxv8f32_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vrgather.vx v12, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -3329,7 +3329,7 @@ define @intrinsic_vrgather_vx_nxv16f32_nxv16f32_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv16f32_nxv16f32_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vrgather.vx v16, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -3377,7 +3377,7 @@ define @intrinsic_vrgather_vx_nxv1f64_nxv1f64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv1f64_nxv1f64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vrgather.vx v9, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -3425,7 +3425,7 @@ define @intrinsic_vrgather_vx_nxv2f64_nxv2f64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv2f64_nxv2f64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vrgather.vx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -3473,7 +3473,7 @@ define @intrinsic_vrgather_vx_nxv4f64_nxv4f64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv4f64_nxv4f64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vrgather.vx v12, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -3521,7 +3521,7 @@ define @intrinsic_vrgather_vx_nxv8f64_nxv8f64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vx_nxv8f64_nxv8f64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vrgather.vx v16, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -3563,7 +3563,7 @@ define @intrinsic_vrgather_vi_nxv1i8_nxv1i8_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv1i8_nxv1i8_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vrgather.vi v9, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -3597,7 +3597,7 @@ define @intrinsic_vrgather_vi_nxv2i8_nxv2i8_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv2i8_nxv2i8_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vrgather.vi v9, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -3631,7 +3631,7 @@ define @intrinsic_vrgather_vi_nxv4i8_nxv4i8_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv4i8_nxv4i8_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vrgather.vi v9, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -3665,7 +3665,7 @@ define @intrinsic_vrgather_vi_nxv8i8_nxv8i8_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv8i8_nxv8i8_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vrgather.vi v9, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -3699,7 +3699,7 @@ define @intrinsic_vrgather_vi_nxv16i8_nxv16i8_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv16i8_nxv16i8_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vrgather.vi v10, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -3733,7 +3733,7 @@ define @intrinsic_vrgather_vi_nxv32i8_nxv32i8_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv32i8_nxv32i8_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vrgather.vi v12, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -3767,7 +3767,7 @@ define @intrinsic_vrgather_vi_nxv64i8_nxv64i8_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv64i8_nxv64i8_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vrgather.vi v16, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -3801,7 +3801,7 @@ define @intrinsic_vrgather_vi_nxv1i16_nxv1i16_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv1i16_nxv1i16_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vrgather.vi v9, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -3835,7 +3835,7 @@ define @intrinsic_vrgather_vi_nxv2i16_nxv2i16_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv2i16_nxv2i16_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vrgather.vi v9, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -3869,7 +3869,7 @@ define @intrinsic_vrgather_vi_nxv4i16_nxv4i16_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv4i16_nxv4i16_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vrgather.vi v9, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -3903,7 +3903,7 @@ define @intrinsic_vrgather_vi_nxv8i16_nxv8i16_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv8i16_nxv8i16_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vrgather.vi v10, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -3937,7 +3937,7 @@ define @intrinsic_vrgather_vi_nxv16i16_nxv16i16_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv16i16_nxv16i16_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vrgather.vi v12, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -3971,7 +3971,7 @@ define @intrinsic_vrgather_vi_nxv32i16_nxv32i16_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv32i16_nxv32i16_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vrgather.vi v16, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -4005,7 +4005,7 @@ define @intrinsic_vrgather_vi_nxv1i32_nxv1i32_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv1i32_nxv1i32_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vrgather.vi v9, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -4039,7 +4039,7 @@ define @intrinsic_vrgather_vi_nxv2i32_nxv2i32_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv2i32_nxv2i32_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vrgather.vi v9, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -4073,7 +4073,7 @@ define @intrinsic_vrgather_vi_nxv4i32_nxv4i32_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv4i32_nxv4i32_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vrgather.vi v10, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -4107,7 +4107,7 @@ define @intrinsic_vrgather_vi_nxv8i32_nxv8i32_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv8i32_nxv8i32_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vrgather.vi v12, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -4141,7 +4141,7 @@ define @intrinsic_vrgather_vi_nxv16i32_nxv16i32_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv16i32_nxv16i32_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vrgather.vi v16, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -4175,7 +4175,7 @@ define @intrinsic_vrgather_vi_nxv1i64_nxv1i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vrgather.vi v9, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -4209,7 +4209,7 @@ define @intrinsic_vrgather_vi_nxv2i64_nxv2i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vrgather.vi v10, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -4243,7 +4243,7 @@ define @intrinsic_vrgather_vi_nxv4i64_nxv4i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vrgather.vi v12, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -4277,7 +4277,7 @@ define @intrinsic_vrgather_vi_nxv8i64_nxv8i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vrgather.vi v16, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -4311,7 +4311,7 @@ define @intrinsic_vrgather_vi_nxv1f16_nxv1f16_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv1f16_nxv1f16_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vrgather.vi v9, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -4345,7 +4345,7 @@ define @intrinsic_vrgather_vi_nxv2f16_nxv2f16_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv2f16_nxv2f16_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vrgather.vi v9, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -4379,7 +4379,7 @@ define @intrinsic_vrgather_vi_nxv4f16_nxv4f16_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv4f16_nxv4f16_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vrgather.vi v9, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -4413,7 +4413,7 @@ define @intrinsic_vrgather_vi_nxv8f16_nxv8f16_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv8f16_nxv8f16_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vrgather.vi v10, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -4447,7 +4447,7 @@ define @intrinsic_vrgather_vi_nxv16f16_nxv16f16_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv16f16_nxv16f16_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vrgather.vi v12, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -4481,7 +4481,7 @@ define @intrinsic_vrgather_vi_nxv32f16_nxv32f16_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv32f16_nxv32f16_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vrgather.vi v16, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -4515,7 +4515,7 @@ define @intrinsic_vrgather_vi_nxv1f32_nxv1f32_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv1f32_nxv1f32_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vrgather.vi v9, v8, 9 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -4549,7 +4549,7 @@ define @intrinsic_vrgather_vi_nxv2f32_nxv2f32_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv2f32_nxv2f32_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vrgather.vi v9, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -4583,7 +4583,7 @@ define @intrinsic_vrgather_vi_nxv4f32_nxv4f32_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv4f32_nxv4f32_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vrgather.vi v10, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -4617,7 +4617,7 @@ define @intrinsic_vrgather_vi_nxv8f32_nxv8f32_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv8f32_nxv8f32_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vrgather.vi v12, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -4651,7 +4651,7 @@ define @intrinsic_vrgather_vi_nxv16f32_nxv16f32_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv16f32_nxv16f32_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vrgather.vi v16, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -4685,7 +4685,7 @@ define @intrinsic_vrgather_vi_nxv1f64_nxv1f64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv1f64_nxv1f64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vrgather.vi v9, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -4719,7 +4719,7 @@ define @intrinsic_vrgather_vi_nxv2f64_nxv2f64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv2f64_nxv2f64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vrgather.vi v10, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -4753,7 +4753,7 @@ define @intrinsic_vrgather_vi_nxv4f64_nxv4f64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv4f64_nxv4f64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vrgather.vi v12, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -4787,7 +4787,7 @@ define @intrinsic_vrgather_vi_nxv8f64_nxv8f64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vrgather_vi_nxv8f64_nxv8f64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vrgather.vi v16, v8, 9 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vrgatherei16_vv_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vrgatherei16.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -58,7 +58,7 @@ define @intrinsic_vrgatherei16_vv_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vrgatherei16.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -106,7 +106,7 @@ define @intrinsic_vrgatherei16_vv_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vrgatherei16.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -154,7 +154,7 @@ define @intrinsic_vrgatherei16_vv_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vrgatherei16.vv v9, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -202,7 +202,7 @@ define @intrinsic_vrgatherei16_vv_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vrgatherei16.vv v10, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -250,7 +250,7 @@ define @intrinsic_vrgatherei16_vv_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vrgatherei16.vv v12, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -298,7 +298,7 @@ define @intrinsic_vrgatherei16_vv_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vrgatherei16.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -346,7 +346,7 @@ define @intrinsic_vrgatherei16_vv_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vrgatherei16.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -394,7 +394,7 @@ define @intrinsic_vrgatherei16_vv_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vrgatherei16.vv v10, v8, v9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -442,7 +442,7 @@ define @intrinsic_vrgatherei16_vv_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vrgatherei16.vv v12, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -490,7 +490,7 @@ define @intrinsic_vrgatherei16_vv_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vrgatherei16.vv v16, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -538,7 +538,7 @@ define @intrinsic_vrgatherei16_vv_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vrgatherei16.vv v24, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret @@ -587,7 +587,7 @@ define @intrinsic_vrgatherei16_vv_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vrgatherei16.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -635,7 +635,7 @@ define @intrinsic_vrgatherei16_vv_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vrgatherei16.vv v12, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -683,7 +683,7 @@ define @intrinsic_vrgatherei16_vv_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vrgatherei16.vv v16, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -731,7 +731,7 @@ define @intrinsic_vrgatherei16_vv_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vrgatherei16.vv v24, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret @@ -780,7 +780,7 @@ define @intrinsic_vrgatherei16_vv_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vrgatherei16.vv v16, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -828,7 +828,7 @@ define @intrinsic_vrgatherei16_vv_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vrgatherei16.vv v24, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret @@ -877,7 +877,7 @@ define @intrinsic_vrgatherei16_vv_nxv1f16_nxv1f16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vrgatherei16.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -925,7 +925,7 @@ define @intrinsic_vrgatherei16_vv_nxv2f16_nxv2f16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vrgatherei16.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -973,7 +973,7 @@ define @intrinsic_vrgatherei16_vv_nxv4f16_nxv4f16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vrgatherei16.vv v10, v8, v9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1021,7 +1021,7 @@ define @intrinsic_vrgatherei16_vv_nxv8f16_nxv8f16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vrgatherei16.vv v12, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1069,7 +1069,7 @@ define @intrinsic_vrgatherei16_vv_nxv16f16_nxv16f16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vrgatherei16.vv v16, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1117,7 +1117,7 @@ define @intrinsic_vrgatherei16_vv_nxv32f16_nxv32f16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vrgatherei16.vv v24, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret @@ -1166,7 +1166,7 @@ define @intrinsic_vrgatherei16_vv_nxv1f32_nxv1f32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vrgatherei16.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1214,7 +1214,7 @@ define @intrinsic_vrgatherei16_vv_nxv4f32_nxv4f32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vrgatherei16.vv v12, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1262,7 +1262,7 @@ define @intrinsic_vrgatherei16_vv_nxv8f32_nxv8f32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vrgatherei16.vv v16, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1310,7 +1310,7 @@ define @intrinsic_vrgatherei16_vv_nxv16f32_nxv16f32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vrgatherei16.vv v24, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret @@ -1359,7 +1359,7 @@ define @intrinsic_vrgatherei16_vv_nxv4f64_nxv4f64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vrgatherei16.vv v16, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1407,7 +1407,7 @@ define @intrinsic_vrgatherei16_vv_nxv8f64_nxv8f64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vrgatherei16.vv v24, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vrgatherei16_vv_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vrgatherei16.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -58,7 +58,7 @@ define @intrinsic_vrgatherei16_vv_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vrgatherei16.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -106,7 +106,7 @@ define @intrinsic_vrgatherei16_vv_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vrgatherei16.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -154,7 +154,7 @@ define @intrinsic_vrgatherei16_vv_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vrgatherei16.vv v9, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -202,7 +202,7 @@ define @intrinsic_vrgatherei16_vv_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vrgatherei16.vv v10, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -250,7 +250,7 @@ define @intrinsic_vrgatherei16_vv_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vrgatherei16.vv v12, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -298,7 +298,7 @@ define @intrinsic_vrgatherei16_vv_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vrgatherei16.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -346,7 +346,7 @@ define @intrinsic_vrgatherei16_vv_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vrgatherei16.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -394,7 +394,7 @@ define @intrinsic_vrgatherei16_vv_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vrgatherei16.vv v10, v8, v9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -442,7 +442,7 @@ define @intrinsic_vrgatherei16_vv_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vrgatherei16.vv v12, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -490,7 +490,7 @@ define @intrinsic_vrgatherei16_vv_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vrgatherei16.vv v16, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -538,7 +538,7 @@ define @intrinsic_vrgatherei16_vv_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vrgatherei16.vv v24, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret @@ -587,7 +587,7 @@ define @intrinsic_vrgatherei16_vv_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vrgatherei16.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -635,7 +635,7 @@ define @intrinsic_vrgatherei16_vv_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vrgatherei16.vv v12, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -683,7 +683,7 @@ define @intrinsic_vrgatherei16_vv_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vrgatherei16.vv v16, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -731,7 +731,7 @@ define @intrinsic_vrgatherei16_vv_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vrgatherei16.vv v24, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret @@ -780,7 +780,7 @@ define @intrinsic_vrgatherei16_vv_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vrgatherei16.vv v16, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -828,7 +828,7 @@ define @intrinsic_vrgatherei16_vv_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vrgatherei16.vv v24, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret @@ -877,7 +877,7 @@ define @intrinsic_vrgatherei16_vv_nxv1f16_nxv1f16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vrgatherei16.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -925,7 +925,7 @@ define @intrinsic_vrgatherei16_vv_nxv2f16_nxv2f16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vrgatherei16.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -973,7 +973,7 @@ define @intrinsic_vrgatherei16_vv_nxv4f16_nxv4f16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vrgatherei16.vv v10, v8, v9 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1021,7 +1021,7 @@ define @intrinsic_vrgatherei16_vv_nxv8f16_nxv8f16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vrgatherei16.vv v12, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1069,7 +1069,7 @@ define @intrinsic_vrgatherei16_vv_nxv16f16_nxv16f16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vrgatherei16.vv v16, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1117,7 +1117,7 @@ define @intrinsic_vrgatherei16_vv_nxv32f16_nxv32f16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vrgatherei16.vv v24, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret @@ -1166,7 +1166,7 @@ define @intrinsic_vrgatherei16_vv_nxv1f32_nxv1f32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vrgatherei16.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1214,7 +1214,7 @@ define @intrinsic_vrgatherei16_vv_nxv4f32_nxv4f32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vrgatherei16.vv v12, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1262,7 +1262,7 @@ define @intrinsic_vrgatherei16_vv_nxv8f32_nxv8f32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vrgatherei16.vv v16, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1310,7 +1310,7 @@ define @intrinsic_vrgatherei16_vv_nxv16f32_nxv16f32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vrgatherei16.vv v24, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret @@ -1359,7 +1359,7 @@ define @intrinsic_vrgatherei16_vv_nxv4f64_nxv4f64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vrgatherei16.vv v16, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -1407,7 +1407,7 @@ define @intrinsic_vrgatherei16_vv_nxv8f64_nxv8f64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vrgatherei16.vv v24, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll @@ -13,40 +13,40 @@ ; NOSUBREG-LABEL: foo: ; NOSUBREG: # %bb.0: # %loopIR.preheader.i.i ; NOSUBREG-NEXT: # kill: def $v10 killed $v10 def $v10m2 -; NOSUBREG-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; NOSUBREG-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; NOSUBREG-NEXT: vmv.v.i v14, 0 -; NOSUBREG-NEXT: vsetvli zero, zero, e8, m1, ta, mu +; NOSUBREG-NEXT: vsetvli zero, zero, e8, m1, ta, ma ; NOSUBREG-NEXT: vmv.v.i v9, 0 -; NOSUBREG-NEXT: vsetivli zero, 4, e8, m1, tu, mu +; NOSUBREG-NEXT: vsetivli zero, 4, e8, m1, tu, ma ; NOSUBREG-NEXT: vmv1r.v v8, v9 ; NOSUBREG-NEXT: vrgatherei16.vv v8, v9, v14 ; NOSUBREG-NEXT: .LBB0_1: # %loopIR3.i.i ; NOSUBREG-NEXT: # =>This Inner Loop Header: Depth=1 ; NOSUBREG-NEXT: vl1r.v v9, (zero) -; NOSUBREG-NEXT: vsetivli zero, 4, e8, m1, tu, mu +; NOSUBREG-NEXT: vsetivli zero, 4, e8, m1, tu, ma ; NOSUBREG-NEXT: vmv1r.v v13, v12 ; NOSUBREG-NEXT: vrgatherei16.vv v13, v9, v10 -; NOSUBREG-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; NOSUBREG-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; NOSUBREG-NEXT: vand.vv v9, v8, v13 ; NOSUBREG-NEXT: vs1r.v v9, (zero) ; NOSUBREG-NEXT: j .LBB0_1 ; ; SUBREG-LABEL: foo: ; SUBREG: # %bb.0: # %loopIR.preheader.i.i -; SUBREG-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; SUBREG-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; SUBREG-NEXT: vmv.v.i v14, 0 -; SUBREG-NEXT: vsetvli zero, zero, e8, m1, ta, mu +; SUBREG-NEXT: vsetvli zero, zero, e8, m1, ta, ma ; SUBREG-NEXT: vmv.v.i v9, 0 -; SUBREG-NEXT: vsetivli zero, 4, e8, m1, tu, mu +; SUBREG-NEXT: vsetivli zero, 4, e8, m1, tu, ma ; SUBREG-NEXT: vmv1r.v v8, v9 ; SUBREG-NEXT: vrgatherei16.vv v8, v9, v14 ; SUBREG-NEXT: .LBB0_1: # %loopIR3.i.i ; SUBREG-NEXT: # =>This Inner Loop Header: Depth=1 ; SUBREG-NEXT: vl1r.v v9, (zero) -; SUBREG-NEXT: vsetivli zero, 4, e8, m1, tu, mu +; SUBREG-NEXT: vsetivli zero, 4, e8, m1, tu, ma ; SUBREG-NEXT: vmv1r.v v11, v12 ; SUBREG-NEXT: vrgatherei16.vv v11, v9, v10 -; SUBREG-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; SUBREG-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; SUBREG-NEXT: vand.vv v9, v8, v11 ; SUBREG-NEXT: vs1r.v v9, (zero) ; SUBREG-NEXT: j .LBB0_1 diff --git a/llvm/test/CodeGen/RISCV/rvv/vrsub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vrsub-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrsub-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrsub-sdnode.ll @@ -5,7 +5,7 @@ define @vrsub_vx_nxv1i8( %va, i8 signext %b) { ; CHECK-LABEL: vrsub_vx_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -17,7 +17,7 @@ define @vrsub_vi_nxv1i8_0( %va) { ; CHECK-LABEL: vrsub_vi_nxv1i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, -4 ; CHECK-NEXT: ret %head = insertelement poison, i8 -4, i32 0 @@ -29,7 +29,7 @@ define @vrsub_vx_nxv2i8( %va, i8 signext %b) { ; CHECK-LABEL: vrsub_vx_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -41,7 +41,7 @@ define @vrsub_vi_nxv2i8_0( %va) { ; CHECK-LABEL: vrsub_vi_nxv2i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, -4 ; CHECK-NEXT: ret %head = insertelement poison, i8 -4, i32 0 @@ -53,7 +53,7 @@ define @vrsub_vx_nxv4i8( %va, i8 signext %b) { ; CHECK-LABEL: vrsub_vx_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -65,7 +65,7 @@ define @vrsub_vi_nxv4i8_0( %va) { ; CHECK-LABEL: vrsub_vi_nxv4i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, -4 ; CHECK-NEXT: ret %head = insertelement poison, i8 -4, i32 0 @@ -77,7 +77,7 @@ define @vrsub_vx_nxv8i8( %va, i8 signext %b) { ; CHECK-LABEL: vrsub_vx_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -89,7 +89,7 @@ define @vrsub_vi_nxv8i8_0( %va) { ; CHECK-LABEL: vrsub_vi_nxv8i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, -4 ; CHECK-NEXT: ret %head = insertelement poison, i8 -4, i32 0 @@ -101,7 +101,7 @@ define @vrsub_vx_nxv16i8( %va, i8 signext %b) { ; CHECK-LABEL: vrsub_vx_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -113,7 +113,7 @@ define @vrsub_vi_nxv16i8_0( %va) { ; CHECK-LABEL: vrsub_vi_nxv16i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, -4 ; CHECK-NEXT: ret %head = insertelement poison, i8 -4, i32 0 @@ -125,7 +125,7 @@ define @vrsub_vx_nxv32i8( %va, i8 signext %b) { ; CHECK-LABEL: vrsub_vx_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -137,7 +137,7 @@ define @vrsub_vi_nxv32i8_0( %va) { ; CHECK-LABEL: vrsub_vi_nxv32i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, -4 ; CHECK-NEXT: ret %head = insertelement poison, i8 -4, i32 0 @@ -149,7 +149,7 @@ define @vrsub_vx_nxv64i8( %va, i8 signext %b) { ; CHECK-LABEL: vrsub_vx_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -161,7 +161,7 @@ define @vrsub_vi_nxv64i8_0( %va) { ; CHECK-LABEL: vrsub_vi_nxv64i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, -4 ; CHECK-NEXT: ret %head = insertelement poison, i8 -4, i32 0 @@ -173,7 +173,7 @@ define @vrsub_vx_nxv1i16( %va, i16 signext %b) { ; CHECK-LABEL: vrsub_vx_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -185,7 +185,7 @@ define @vrsub_vi_nxv1i16_0( %va) { ; CHECK-LABEL: vrsub_vi_nxv1i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, -4 ; CHECK-NEXT: ret %head = insertelement poison, i16 -4, i32 0 @@ -197,7 +197,7 @@ define @vrsub_vx_nxv2i16( %va, i16 signext %b) { ; CHECK-LABEL: vrsub_vx_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -209,7 +209,7 @@ define @vrsub_vi_nxv2i16_0( %va) { ; CHECK-LABEL: vrsub_vi_nxv2i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, -4 ; CHECK-NEXT: ret %head = insertelement poison, i16 -4, i32 0 @@ -221,7 +221,7 @@ define @vrsub_vx_nxv4i16( %va, i16 signext %b) { ; CHECK-LABEL: vrsub_vx_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -233,7 +233,7 @@ define @vrsub_vi_nxv4i16_0( %va) { ; CHECK-LABEL: vrsub_vi_nxv4i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, -4 ; CHECK-NEXT: ret %head = insertelement poison, i16 -4, i32 0 @@ -245,7 +245,7 @@ define @vrsub_vx_nxv8i16( %va, i16 signext %b) { ; CHECK-LABEL: vrsub_vx_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -257,7 +257,7 @@ define @vrsub_vi_nxv8i16_0( %va) { ; CHECK-LABEL: vrsub_vi_nxv8i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, -4 ; CHECK-NEXT: ret %head = insertelement poison, i16 -4, i32 0 @@ -269,7 +269,7 @@ define @vrsub_vx_nxv16i16( %va, i16 signext %b) { ; CHECK-LABEL: vrsub_vx_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -281,7 +281,7 @@ define @vrsub_vi_nxv16i16_0( %va) { ; CHECK-LABEL: vrsub_vi_nxv16i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, -4 ; CHECK-NEXT: ret %head = insertelement poison, i16 -4, i32 0 @@ -293,7 +293,7 @@ define @vrsub_vx_nxv32i16( %va, i16 signext %b) { ; CHECK-LABEL: vrsub_vx_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -305,7 +305,7 @@ define @vrsub_vi_nxv32i16_0( %va) { ; CHECK-LABEL: vrsub_vi_nxv32i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, -4 ; CHECK-NEXT: ret %head = insertelement poison, i16 -4, i32 0 @@ -317,7 +317,7 @@ define @vrsub_vx_nxv1i32( %va, i32 signext %b) { ; CHECK-LABEL: vrsub_vx_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -329,7 +329,7 @@ define @vrsub_vi_nxv1i32_0( %va) { ; CHECK-LABEL: vrsub_vi_nxv1i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, -4 ; CHECK-NEXT: ret %head = insertelement poison, i32 -4, i32 0 @@ -341,7 +341,7 @@ define @vrsub_vx_nxv2i32( %va, i32 signext %b) { ; CHECK-LABEL: vrsub_vx_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -353,7 +353,7 @@ define @vrsub_vi_nxv2i32_0( %va) { ; CHECK-LABEL: vrsub_vi_nxv2i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, -4 ; CHECK-NEXT: ret %head = insertelement poison, i32 -4, i32 0 @@ -365,7 +365,7 @@ define @vrsub_vx_nxv4i32( %va, i32 signext %b) { ; CHECK-LABEL: vrsub_vx_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -377,7 +377,7 @@ define @vrsub_vi_nxv4i32_0( %va) { ; CHECK-LABEL: vrsub_vi_nxv4i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, -4 ; CHECK-NEXT: ret %head = insertelement poison, i32 -4, i32 0 @@ -389,7 +389,7 @@ define @vrsub_vx_nxv8i32( %va, i32 signext %b) { ; CHECK-LABEL: vrsub_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -401,7 +401,7 @@ define @vrsub_vi_nxv8i32_0( %va) { ; CHECK-LABEL: vrsub_vi_nxv8i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, -4 ; CHECK-NEXT: ret %head = insertelement poison, i32 -4, i32 0 @@ -413,7 +413,7 @@ define @vrsub_vx_nxv16i32( %va, i32 signext %b) { ; CHECK-LABEL: vrsub_vx_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -425,7 +425,7 @@ define @vrsub_vi_nxv16i32_0( %va) { ; CHECK-LABEL: vrsub_vi_nxv16i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, -4 ; CHECK-NEXT: ret %head = insertelement poison, i32 -4, i32 0 @@ -442,7 +442,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsub.vv v8, v9, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -450,7 +450,7 @@ ; ; RV64-LABEL: vrsub_vx_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV64-NEXT: vrsub.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -462,7 +462,7 @@ define @vrsub_vi_nxv1i64_0( %va) { ; CHECK-LABEL: vrsub_vi_nxv1i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, -4 ; CHECK-NEXT: ret %head = insertelement poison, i64 -4, i32 0 @@ -479,7 +479,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsub.vv v8, v10, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -487,7 +487,7 @@ ; ; RV64-LABEL: vrsub_vx_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV64-NEXT: vrsub.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -499,7 +499,7 @@ define @vrsub_vi_nxv2i64_0( %va) { ; CHECK-LABEL: vrsub_vi_nxv2i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, -4 ; CHECK-NEXT: ret %head = insertelement poison, i64 -4, i32 0 @@ -516,7 +516,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsub.vv v8, v12, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -524,7 +524,7 @@ ; ; RV64-LABEL: vrsub_vx_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV64-NEXT: vrsub.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -536,7 +536,7 @@ define @vrsub_vi_nxv4i64_0( %va) { ; CHECK-LABEL: vrsub_vi_nxv4i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, -4 ; CHECK-NEXT: ret %head = insertelement poison, i64 -4, i32 0 @@ -553,7 +553,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsub.vv v8, v16, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -561,7 +561,7 @@ ; ; RV64-LABEL: vrsub_vx_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vrsub.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -573,7 +573,7 @@ define @vrsub_vi_nxv8i64_0( %va) { ; CHECK-LABEL: vrsub_vi_nxv8i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, -4 ; CHECK-NEXT: ret %head = insertelement poison, i64 -4, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vrsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vrsub-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrsub-vp.ll @@ -21,7 +21,7 @@ define @vrsub_vx_nxv1i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv1i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -47,7 +47,7 @@ define @vrsub_vi_nxv1i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_nxv1i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 2, i32 0 @@ -75,7 +75,7 @@ define @vrsub_vx_nxv2i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -101,7 +101,7 @@ define @vrsub_vi_nxv2i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_nxv2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 2, i32 0 @@ -129,7 +129,7 @@ define @vrsub_vx_nxv4i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -155,7 +155,7 @@ define @vrsub_vi_nxv4i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_nxv4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 2, i32 0 @@ -183,7 +183,7 @@ define @vrsub_vx_nxv8i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -209,7 +209,7 @@ define @vrsub_vi_nxv8i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_nxv8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 2, i32 0 @@ -237,7 +237,7 @@ define @vrsub_vx_nxv16i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -263,7 +263,7 @@ define @vrsub_vi_nxv16i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_nxv16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 2, i32 0 @@ -291,7 +291,7 @@ define @vrsub_vx_nxv32i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv32i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -317,7 +317,7 @@ define @vrsub_vi_nxv32i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_nxv32i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 2, i32 0 @@ -345,7 +345,7 @@ define @vrsub_vx_nxv64i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv64i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -371,7 +371,7 @@ define @vrsub_vi_nxv64i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_nxv64i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 2, i32 0 @@ -399,7 +399,7 @@ define @vrsub_vx_nxv1i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv1i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -425,7 +425,7 @@ define @vrsub_vi_nxv1i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_nxv1i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 2, i32 0 @@ -453,7 +453,7 @@ define @vrsub_vx_nxv2i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -479,7 +479,7 @@ define @vrsub_vi_nxv2i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_nxv2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 2, i32 0 @@ -507,7 +507,7 @@ define @vrsub_vx_nxv4i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -533,7 +533,7 @@ define @vrsub_vi_nxv4i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_nxv4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 2, i32 0 @@ -561,7 +561,7 @@ define @vrsub_vx_nxv8i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -587,7 +587,7 @@ define @vrsub_vi_nxv8i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_nxv8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 2, i32 0 @@ -615,7 +615,7 @@ define @vrsub_vx_nxv16i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -641,7 +641,7 @@ define @vrsub_vi_nxv16i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_nxv16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 2, i32 0 @@ -669,7 +669,7 @@ define @vrsub_vx_nxv32i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv32i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -695,7 +695,7 @@ define @vrsub_vi_nxv32i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_nxv32i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 2, i32 0 @@ -723,7 +723,7 @@ define @vrsub_vx_nxv1i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv1i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -749,7 +749,7 @@ define @vrsub_vi_nxv1i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_nxv1i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 2, i32 0 @@ -777,7 +777,7 @@ define @vrsub_vx_nxv2i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -803,7 +803,7 @@ define @vrsub_vi_nxv2i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 2, i32 0 @@ -831,7 +831,7 @@ define @vrsub_vx_nxv4i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -857,7 +857,7 @@ define @vrsub_vi_nxv4i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_nxv4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 2, i32 0 @@ -885,7 +885,7 @@ define @vrsub_vx_nxv8i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -911,7 +911,7 @@ define @vrsub_vi_nxv8i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_nxv8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 2, i32 0 @@ -939,7 +939,7 @@ define @vrsub_vx_nxv16i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vx_nxv16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -965,7 +965,7 @@ define @vrsub_vi_nxv16i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_nxv16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 2, i32 0 @@ -986,7 +986,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vsub.vv v8, v9, v8, v0.t @@ -1012,16 +1012,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vsub.vv v8, v9, v8 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vrsub_vx_nxv1i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vrsub.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1047,7 +1047,7 @@ define @vrsub_vi_nxv1i64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_nxv1i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 2, i32 0 @@ -1068,7 +1068,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vsub.vv v8, v10, v8, v0.t @@ -1094,16 +1094,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vsub.vv v8, v10, v8 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vrsub_vx_nxv2i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vrsub.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1129,7 +1129,7 @@ define @vrsub_vi_nxv2i64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_nxv2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 2, i32 0 @@ -1150,7 +1150,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vsub.vv v8, v12, v8, v0.t @@ -1176,16 +1176,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vsub.vv v8, v12, v8 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vrsub_vx_nxv4i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vrsub.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1211,7 +1211,7 @@ define @vrsub_vi_nxv4i64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_nxv4i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 2, i32 0 @@ -1232,7 +1232,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vsub.vv v8, v16, v8, v0.t @@ -1258,16 +1258,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vsub.vv v8, v16, v8 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vrsub_vx_nxv8i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vrsub.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1293,7 +1293,7 @@ define @vrsub_vi_nxv8i64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vrsub_vi_nxv8i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 2 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 2, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vrsub.ll b/llvm/test/CodeGen/RISCV/rvv/vrsub.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrsub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrsub.ll @@ -12,7 +12,7 @@ define @intrinsic_vrsub_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -58,7 +58,7 @@ define @intrinsic_vrsub_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vrsub_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -150,7 +150,7 @@ define @intrinsic_vrsub_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -196,7 +196,7 @@ define @intrinsic_vrsub_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -242,7 +242,7 @@ define @intrinsic_vrsub_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -288,7 +288,7 @@ define @intrinsic_vrsub_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -334,7 +334,7 @@ define @intrinsic_vrsub_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -380,7 +380,7 @@ define @intrinsic_vrsub_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -426,7 +426,7 @@ define @intrinsic_vrsub_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -472,7 +472,7 @@ define @intrinsic_vrsub_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -518,7 +518,7 @@ define @intrinsic_vrsub_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -564,7 +564,7 @@ define @intrinsic_vrsub_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -610,7 +610,7 @@ define @intrinsic_vrsub_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -656,7 +656,7 @@ define @intrinsic_vrsub_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -702,7 +702,7 @@ define @intrinsic_vrsub_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -748,7 +748,7 @@ define @intrinsic_vrsub_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -794,7 +794,7 @@ define @intrinsic_vrsub_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vrsub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -844,7 +844,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsub.vv v8, v9, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -852,7 +852,7 @@ ; ; RV64-LABEL: intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vrsub.vx v8, v8, a0 ; RV64-NEXT: ret entry: @@ -914,7 +914,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsub.vv v8, v10, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -922,7 +922,7 @@ ; ; RV64-LABEL: intrinsic_vrsub_vx_nxv2i64_nxv2i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vrsub.vx v8, v8, a0 ; RV64-NEXT: ret entry: @@ -984,7 +984,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsub.vv v8, v12, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -992,7 +992,7 @@ ; ; RV64-LABEL: intrinsic_vrsub_vx_nxv4i64_nxv4i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vrsub.vx v8, v8, a0 ; RV64-NEXT: ret entry: @@ -1054,7 +1054,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsub.vv v8, v16, v8 ; RV32-NEXT: addi sp, sp, 16 @@ -1062,7 +1062,7 @@ ; ; RV64-LABEL: intrinsic_vrsub_vx_nxv8i64_nxv8i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vrsub.vx v8, v8, a0 ; RV64-NEXT: ret entry: @@ -1114,7 +1114,7 @@ define @intrinsic_vrsub_vi_nxv1i8_nxv1i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1147,7 +1147,7 @@ define @intrinsic_vrsub_vi_nxv2i8_nxv2i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1180,7 +1180,7 @@ define @intrinsic_vrsub_vi_nxv4i8_nxv4i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1213,7 +1213,7 @@ define @intrinsic_vrsub_vi_nxv8i8_nxv8i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1246,7 +1246,7 @@ define @intrinsic_vrsub_vi_nxv16i8_nxv16i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1279,7 +1279,7 @@ define @intrinsic_vrsub_vi_nxv32i8_nxv32i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1312,7 +1312,7 @@ define @intrinsic_vrsub_vi_nxv64i8_nxv64i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1345,7 +1345,7 @@ define @intrinsic_vrsub_vi_nxv1i16_nxv1i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1378,7 +1378,7 @@ define @intrinsic_vrsub_vi_nxv2i16_nxv2i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1411,7 +1411,7 @@ define @intrinsic_vrsub_vi_nxv4i16_nxv4i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1444,7 +1444,7 @@ define @intrinsic_vrsub_vi_nxv8i16_nxv8i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1477,7 +1477,7 @@ define @intrinsic_vrsub_vi_nxv16i16_nxv16i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1510,7 +1510,7 @@ define @intrinsic_vrsub_vi_nxv32i16_nxv32i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1543,7 +1543,7 @@ define @intrinsic_vrsub_vi_nxv1i32_nxv1i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1576,7 +1576,7 @@ define @intrinsic_vrsub_vi_nxv2i32_nxv2i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1609,7 +1609,7 @@ define @intrinsic_vrsub_vi_nxv4i32_nxv4i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1642,7 +1642,7 @@ define @intrinsic_vrsub_vi_nxv8i32_nxv8i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1675,7 +1675,7 @@ define @intrinsic_vrsub_vi_nxv16i32_nxv16i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1708,7 +1708,7 @@ define @intrinsic_vrsub_vi_nxv1i64_nxv1i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1741,7 +1741,7 @@ define @intrinsic_vrsub_vi_nxv2i64_nxv2i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1774,7 +1774,7 @@ define @intrinsic_vrsub_vi_nxv4i64_nxv4i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1807,7 +1807,7 @@ define @intrinsic_vrsub_vi_nxv8i64_nxv8i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vrsub_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vrsub.vi v8, v8, 9 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vsadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vsadd_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vsadd_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vsadd_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vsadd_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vsadd_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -292,7 +292,7 @@ define @intrinsic_vsadd_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vsadd_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vsadd_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vsadd_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vsadd_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vsadd_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vsadd_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vsadd_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vsadd_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -717,7 +717,7 @@ define @intrinsic_vsadd_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -764,7 +764,7 @@ define @intrinsic_vsadd_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -811,7 +811,7 @@ define @intrinsic_vsadd_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vsadd_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vsadd_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vsadd_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vsadd_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1048,7 +1048,7 @@ define @intrinsic_vsadd_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1095,7 +1095,7 @@ define @intrinsic_vsadd_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1142,7 +1142,7 @@ define @intrinsic_vsadd_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1189,7 +1189,7 @@ define @intrinsic_vsadd_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1236,7 +1236,7 @@ define @intrinsic_vsadd_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1283,7 +1283,7 @@ define @intrinsic_vsadd_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1330,7 +1330,7 @@ define @intrinsic_vsadd_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1377,7 +1377,7 @@ define @intrinsic_vsadd_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1424,7 +1424,7 @@ define @intrinsic_vsadd_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1471,7 +1471,7 @@ define @intrinsic_vsadd_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1518,7 +1518,7 @@ define @intrinsic_vsadd_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1565,7 +1565,7 @@ define @intrinsic_vsadd_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1612,7 +1612,7 @@ define @intrinsic_vsadd_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1659,7 +1659,7 @@ define @intrinsic_vsadd_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1706,7 +1706,7 @@ define @intrinsic_vsadd_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1753,7 +1753,7 @@ define @intrinsic_vsadd_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1800,7 +1800,7 @@ define @intrinsic_vsadd_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1847,7 +1847,7 @@ define @intrinsic_vsadd_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1898,7 +1898,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 @@ -1957,7 +1957,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vsadd.vv v8, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 @@ -2016,7 +2016,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vsadd.vv v8, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 @@ -2075,7 +2075,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vsadd.vv v8, v8, v16 ; CHECK-NEXT: addi sp, sp, 16 @@ -2124,7 +2124,7 @@ define @intrinsic_vsadd_vi_nxv1i8_nxv1i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2157,7 +2157,7 @@ define @intrinsic_vsadd_vi_nxv2i8_nxv2i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2190,7 +2190,7 @@ define @intrinsic_vsadd_vi_nxv4i8_nxv4i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2223,7 +2223,7 @@ define @intrinsic_vsadd_vi_nxv8i8_nxv8i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2256,7 +2256,7 @@ define @intrinsic_vsadd_vi_nxv16i8_nxv16i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2289,7 +2289,7 @@ define @intrinsic_vsadd_vi_nxv32i8_nxv32i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2322,7 +2322,7 @@ define @intrinsic_vsadd_vi_nxv64i8_nxv64i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2355,7 +2355,7 @@ define @intrinsic_vsadd_vi_nxv1i16_nxv1i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2388,7 +2388,7 @@ define @intrinsic_vsadd_vi_nxv2i16_nxv2i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2421,7 +2421,7 @@ define @intrinsic_vsadd_vi_nxv4i16_nxv4i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2454,7 +2454,7 @@ define @intrinsic_vsadd_vi_nxv8i16_nxv8i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2487,7 +2487,7 @@ define @intrinsic_vsadd_vi_nxv16i16_nxv16i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2520,7 +2520,7 @@ define @intrinsic_vsadd_vi_nxv32i16_nxv32i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2553,7 +2553,7 @@ define @intrinsic_vsadd_vi_nxv1i32_nxv1i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2586,7 +2586,7 @@ define @intrinsic_vsadd_vi_nxv2i32_nxv2i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2619,7 +2619,7 @@ define @intrinsic_vsadd_vi_nxv4i32_nxv4i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2652,7 +2652,7 @@ define @intrinsic_vsadd_vi_nxv8i32_nxv8i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2685,7 +2685,7 @@ define @intrinsic_vsadd_vi_nxv16i32_nxv16i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2718,7 +2718,7 @@ define @intrinsic_vsadd_vi_nxv1i64_nxv1i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2751,7 +2751,7 @@ define @intrinsic_vsadd_vi_nxv2i64_nxv2i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2784,7 +2784,7 @@ define @intrinsic_vsadd_vi_nxv4i64_nxv4i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2817,7 +2817,7 @@ define @intrinsic_vsadd_vi_nxv8i64_nxv8i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vsadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vsadd_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vsadd_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vsadd_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vsadd_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vsadd_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -292,7 +292,7 @@ define @intrinsic_vsadd_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vsadd_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vsadd_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vsadd_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vsadd_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vsadd_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vsadd_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vsadd_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vsadd_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -717,7 +717,7 @@ define @intrinsic_vsadd_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -764,7 +764,7 @@ define @intrinsic_vsadd_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -811,7 +811,7 @@ define @intrinsic_vsadd_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vsadd_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vsadd_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vsadd_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vsadd_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1048,7 +1048,7 @@ define @intrinsic_vsadd_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1095,7 +1095,7 @@ define @intrinsic_vsadd_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1142,7 +1142,7 @@ define @intrinsic_vsadd_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1189,7 +1189,7 @@ define @intrinsic_vsadd_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1236,7 +1236,7 @@ define @intrinsic_vsadd_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1283,7 +1283,7 @@ define @intrinsic_vsadd_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1330,7 +1330,7 @@ define @intrinsic_vsadd_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1377,7 +1377,7 @@ define @intrinsic_vsadd_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1424,7 +1424,7 @@ define @intrinsic_vsadd_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1471,7 +1471,7 @@ define @intrinsic_vsadd_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1518,7 +1518,7 @@ define @intrinsic_vsadd_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1565,7 +1565,7 @@ define @intrinsic_vsadd_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1612,7 +1612,7 @@ define @intrinsic_vsadd_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1659,7 +1659,7 @@ define @intrinsic_vsadd_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1706,7 +1706,7 @@ define @intrinsic_vsadd_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1753,7 +1753,7 @@ define @intrinsic_vsadd_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1800,7 +1800,7 @@ define @intrinsic_vsadd_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1847,7 +1847,7 @@ define @intrinsic_vsadd_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1894,7 +1894,7 @@ define @intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1941,7 +1941,7 @@ define @intrinsic_vsadd_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1988,7 +1988,7 @@ define @intrinsic_vsadd_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -2035,7 +2035,7 @@ define @intrinsic_vsadd_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -2076,7 +2076,7 @@ define @intrinsic_vsadd_vi_nxv1i8_nxv1i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2109,7 +2109,7 @@ define @intrinsic_vsadd_vi_nxv2i8_nxv2i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2142,7 +2142,7 @@ define @intrinsic_vsadd_vi_nxv4i8_nxv4i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2175,7 +2175,7 @@ define @intrinsic_vsadd_vi_nxv8i8_nxv8i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2208,7 +2208,7 @@ define @intrinsic_vsadd_vi_nxv16i8_nxv16i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2241,7 +2241,7 @@ define @intrinsic_vsadd_vi_nxv32i8_nxv32i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2274,7 +2274,7 @@ define @intrinsic_vsadd_vi_nxv64i8_nxv64i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2307,7 +2307,7 @@ define @intrinsic_vsadd_vi_nxv1i16_nxv1i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2340,7 +2340,7 @@ define @intrinsic_vsadd_vi_nxv2i16_nxv2i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2373,7 +2373,7 @@ define @intrinsic_vsadd_vi_nxv4i16_nxv4i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2406,7 +2406,7 @@ define @intrinsic_vsadd_vi_nxv8i16_nxv8i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2439,7 +2439,7 @@ define @intrinsic_vsadd_vi_nxv16i16_nxv16i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2472,7 +2472,7 @@ define @intrinsic_vsadd_vi_nxv32i16_nxv32i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2505,7 +2505,7 @@ define @intrinsic_vsadd_vi_nxv1i32_nxv1i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2538,7 +2538,7 @@ define @intrinsic_vsadd_vi_nxv2i32_nxv2i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2571,7 +2571,7 @@ define @intrinsic_vsadd_vi_nxv4i32_nxv4i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2604,7 +2604,7 @@ define @intrinsic_vsadd_vi_nxv8i32_nxv8i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2637,7 +2637,7 @@ define @intrinsic_vsadd_vi_nxv16i32_nxv16i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2670,7 +2670,7 @@ define @intrinsic_vsadd_vi_nxv1i64_nxv1i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2703,7 +2703,7 @@ define @intrinsic_vsadd_vi_nxv2i64_nxv2i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2736,7 +2736,7 @@ define @intrinsic_vsadd_vi_nxv4i64_nxv4i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2769,7 +2769,7 @@ define @intrinsic_vsadd_vi_nxv8i64_nxv8i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsadd_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsadd-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-sdnode.ll @@ -9,7 +9,7 @@ define @sadd_nxv1i8_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv1i8_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.sadd.sat.nxv1i8( %va, %b) @@ -19,7 +19,7 @@ define @sadd_nxv1i8_vx( %va, i8 %b) { ; CHECK-LABEL: sadd_nxv1i8_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -31,7 +31,7 @@ define @sadd_nxv1i8_vi( %va) { ; CHECK-LABEL: sadd_nxv1i8_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 5, i32 0 @@ -45,7 +45,7 @@ define @sadd_nxv2i8_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv2i8_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.sadd.sat.nxv2i8( %va, %b) @@ -55,7 +55,7 @@ define @sadd_nxv2i8_vx( %va, i8 %b) { ; CHECK-LABEL: sadd_nxv2i8_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -67,7 +67,7 @@ define @sadd_nxv2i8_vi( %va) { ; CHECK-LABEL: sadd_nxv2i8_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 5, i32 0 @@ -81,7 +81,7 @@ define @sadd_nxv4i8_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv4i8_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.sadd.sat.nxv4i8( %va, %b) @@ -91,7 +91,7 @@ define @sadd_nxv4i8_vx( %va, i8 %b) { ; CHECK-LABEL: sadd_nxv4i8_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -103,7 +103,7 @@ define @sadd_nxv4i8_vi( %va) { ; CHECK-LABEL: sadd_nxv4i8_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 5, i32 0 @@ -117,7 +117,7 @@ define @sadd_nxv8i8_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv8i8_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.sadd.sat.nxv8i8( %va, %b) @@ -127,7 +127,7 @@ define @sadd_nxv8i8_vx( %va, i8 %b) { ; CHECK-LABEL: sadd_nxv8i8_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -139,7 +139,7 @@ define @sadd_nxv8i8_vi( %va) { ; CHECK-LABEL: sadd_nxv8i8_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 5, i32 0 @@ -153,7 +153,7 @@ define @sadd_nxv16i8_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv16i8_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v10 ; CHECK-NEXT: ret %v = call @llvm.sadd.sat.nxv16i8( %va, %b) @@ -163,7 +163,7 @@ define @sadd_nxv16i8_vx( %va, i8 %b) { ; CHECK-LABEL: sadd_nxv16i8_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -175,7 +175,7 @@ define @sadd_nxv16i8_vi( %va) { ; CHECK-LABEL: sadd_nxv16i8_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 5, i32 0 @@ -189,7 +189,7 @@ define @sadd_nxv32i8_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv32i8_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v12 ; CHECK-NEXT: ret %v = call @llvm.sadd.sat.nxv32i8( %va, %b) @@ -199,7 +199,7 @@ define @sadd_nxv32i8_vx( %va, i8 %b) { ; CHECK-LABEL: sadd_nxv32i8_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -211,7 +211,7 @@ define @sadd_nxv32i8_vi( %va) { ; CHECK-LABEL: sadd_nxv32i8_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 5, i32 0 @@ -225,7 +225,7 @@ define @sadd_nxv64i8_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv64i8_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v16 ; CHECK-NEXT: ret %v = call @llvm.sadd.sat.nxv64i8( %va, %b) @@ -235,7 +235,7 @@ define @sadd_nxv64i8_vx( %va, i8 %b) { ; CHECK-LABEL: sadd_nxv64i8_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -247,7 +247,7 @@ define @sadd_nxv64i8_vi( %va) { ; CHECK-LABEL: sadd_nxv64i8_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 5, i32 0 @@ -261,7 +261,7 @@ define @sadd_nxv1i16_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv1i16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.sadd.sat.nxv1i16( %va, %b) @@ -271,7 +271,7 @@ define @sadd_nxv1i16_vx( %va, i16 %b) { ; CHECK-LABEL: sadd_nxv1i16_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -283,7 +283,7 @@ define @sadd_nxv1i16_vi( %va) { ; CHECK-LABEL: sadd_nxv1i16_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 5, i32 0 @@ -297,7 +297,7 @@ define @sadd_nxv2i16_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv2i16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.sadd.sat.nxv2i16( %va, %b) @@ -307,7 +307,7 @@ define @sadd_nxv2i16_vx( %va, i16 %b) { ; CHECK-LABEL: sadd_nxv2i16_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -319,7 +319,7 @@ define @sadd_nxv2i16_vi( %va) { ; CHECK-LABEL: sadd_nxv2i16_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 5, i32 0 @@ -333,7 +333,7 @@ define @sadd_nxv4i16_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv4i16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.sadd.sat.nxv4i16( %va, %b) @@ -343,7 +343,7 @@ define @sadd_nxv4i16_vx( %va, i16 %b) { ; CHECK-LABEL: sadd_nxv4i16_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -355,7 +355,7 @@ define @sadd_nxv4i16_vi( %va) { ; CHECK-LABEL: sadd_nxv4i16_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 5, i32 0 @@ -369,7 +369,7 @@ define @sadd_nxv8i16_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv8i16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v10 ; CHECK-NEXT: ret %v = call @llvm.sadd.sat.nxv8i16( %va, %b) @@ -379,7 +379,7 @@ define @sadd_nxv8i16_vx( %va, i16 %b) { ; CHECK-LABEL: sadd_nxv8i16_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -391,7 +391,7 @@ define @sadd_nxv8i16_vi( %va) { ; CHECK-LABEL: sadd_nxv8i16_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 5, i32 0 @@ -405,7 +405,7 @@ define @sadd_nxv16i16_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv16i16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v12 ; CHECK-NEXT: ret %v = call @llvm.sadd.sat.nxv16i16( %va, %b) @@ -415,7 +415,7 @@ define @sadd_nxv16i16_vx( %va, i16 %b) { ; CHECK-LABEL: sadd_nxv16i16_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -427,7 +427,7 @@ define @sadd_nxv16i16_vi( %va) { ; CHECK-LABEL: sadd_nxv16i16_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 5, i32 0 @@ -441,7 +441,7 @@ define @sadd_nxv32i16_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv32i16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v16 ; CHECK-NEXT: ret %v = call @llvm.sadd.sat.nxv32i16( %va, %b) @@ -451,7 +451,7 @@ define @sadd_nxv32i16_vx( %va, i16 %b) { ; CHECK-LABEL: sadd_nxv32i16_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -463,7 +463,7 @@ define @sadd_nxv32i16_vi( %va) { ; CHECK-LABEL: sadd_nxv32i16_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 5, i32 0 @@ -477,7 +477,7 @@ define @sadd_nxv1i32_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv1i32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.sadd.sat.nxv1i32( %va, %b) @@ -487,7 +487,7 @@ define @sadd_nxv1i32_vx( %va, i32 %b) { ; CHECK-LABEL: sadd_nxv1i32_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -499,7 +499,7 @@ define @sadd_nxv1i32_vi( %va) { ; CHECK-LABEL: sadd_nxv1i32_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 5, i32 0 @@ -513,7 +513,7 @@ define @sadd_nxv2i32_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv2i32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.sadd.sat.nxv2i32( %va, %b) @@ -523,7 +523,7 @@ define @sadd_nxv2i32_vx( %va, i32 %b) { ; CHECK-LABEL: sadd_nxv2i32_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -535,7 +535,7 @@ define @sadd_nxv2i32_vi( %va) { ; CHECK-LABEL: sadd_nxv2i32_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 5, i32 0 @@ -549,7 +549,7 @@ define @sadd_nxv4i32_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv4i32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v10 ; CHECK-NEXT: ret %v = call @llvm.sadd.sat.nxv4i32( %va, %b) @@ -559,7 +559,7 @@ define @sadd_nxv4i32_vx( %va, i32 %b) { ; CHECK-LABEL: sadd_nxv4i32_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -571,7 +571,7 @@ define @sadd_nxv4i32_vi( %va) { ; CHECK-LABEL: sadd_nxv4i32_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 5, i32 0 @@ -585,7 +585,7 @@ define @sadd_nxv8i32_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv8i32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v12 ; CHECK-NEXT: ret %v = call @llvm.sadd.sat.nxv8i32( %va, %b) @@ -595,7 +595,7 @@ define @sadd_nxv8i32_vx( %va, i32 %b) { ; CHECK-LABEL: sadd_nxv8i32_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -607,7 +607,7 @@ define @sadd_nxv8i32_vi( %va) { ; CHECK-LABEL: sadd_nxv8i32_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 5, i32 0 @@ -621,7 +621,7 @@ define @sadd_nxv16i32_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv16i32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v16 ; CHECK-NEXT: ret %v = call @llvm.sadd.sat.nxv16i32( %va, %b) @@ -631,7 +631,7 @@ define @sadd_nxv16i32_vx( %va, i32 %b) { ; CHECK-LABEL: sadd_nxv16i32_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; CHECK-NEXT: vsadd.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -643,7 +643,7 @@ define @sadd_nxv16i32_vi( %va) { ; CHECK-LABEL: sadd_nxv16i32_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 5, i32 0 @@ -657,7 +657,7 @@ define @sadd_nxv1i64_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv1i64_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.sadd.sat.nxv1i64( %va, %b) @@ -672,7 +672,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsadd.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -680,7 +680,7 @@ ; ; RV64-LABEL: sadd_nxv1i64_vx: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV64-NEXT: vsadd.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -692,7 +692,7 @@ define @sadd_nxv1i64_vi( %va) { ; CHECK-LABEL: sadd_nxv1i64_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 5, i32 0 @@ -706,7 +706,7 @@ define @sadd_nxv2i64_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv2i64_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v10 ; CHECK-NEXT: ret %v = call @llvm.sadd.sat.nxv2i64( %va, %b) @@ -721,7 +721,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsadd.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -729,7 +729,7 @@ ; ; RV64-LABEL: sadd_nxv2i64_vx: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV64-NEXT: vsadd.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -741,7 +741,7 @@ define @sadd_nxv2i64_vi( %va) { ; CHECK-LABEL: sadd_nxv2i64_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 5, i32 0 @@ -755,7 +755,7 @@ define @sadd_nxv4i64_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv4i64_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v12 ; CHECK-NEXT: ret %v = call @llvm.sadd.sat.nxv4i64( %va, %b) @@ -770,7 +770,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsadd.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -778,7 +778,7 @@ ; ; RV64-LABEL: sadd_nxv4i64_vx: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV64-NEXT: vsadd.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -790,7 +790,7 @@ define @sadd_nxv4i64_vi( %va) { ; CHECK-LABEL: sadd_nxv4i64_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 5, i32 0 @@ -804,7 +804,7 @@ define @sadd_nxv8i64_vv( %va, %b) { ; CHECK-LABEL: sadd_nxv8i64_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vsadd.vv v8, v8, v16 ; CHECK-NEXT: ret %v = call @llvm.sadd.sat.nxv8i64( %va, %b) @@ -819,7 +819,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsadd.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -827,7 +827,7 @@ ; ; RV64-LABEL: sadd_nxv8i64_vx: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsadd.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -839,7 +839,7 @@ define @sadd_nxv8i64_vi( %va) { ; CHECK-LABEL: sadd_nxv8i64_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vsadd.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 5, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vsaddu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vsaddu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vsaddu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vsaddu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vsaddu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -292,7 +292,7 @@ define @intrinsic_vsaddu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vsaddu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vsaddu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vsaddu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vsaddu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vsaddu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vsaddu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vsaddu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vsaddu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -717,7 +717,7 @@ define @intrinsic_vsaddu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -764,7 +764,7 @@ define @intrinsic_vsaddu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -811,7 +811,7 @@ define @intrinsic_vsaddu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vsaddu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vsaddu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vsaddu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vsaddu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1048,7 +1048,7 @@ define @intrinsic_vsaddu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1095,7 +1095,7 @@ define @intrinsic_vsaddu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1142,7 +1142,7 @@ define @intrinsic_vsaddu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1189,7 +1189,7 @@ define @intrinsic_vsaddu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1236,7 +1236,7 @@ define @intrinsic_vsaddu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1283,7 +1283,7 @@ define @intrinsic_vsaddu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1330,7 +1330,7 @@ define @intrinsic_vsaddu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1377,7 +1377,7 @@ define @intrinsic_vsaddu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1424,7 +1424,7 @@ define @intrinsic_vsaddu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1471,7 +1471,7 @@ define @intrinsic_vsaddu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1518,7 +1518,7 @@ define @intrinsic_vsaddu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1565,7 +1565,7 @@ define @intrinsic_vsaddu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1612,7 +1612,7 @@ define @intrinsic_vsaddu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1659,7 +1659,7 @@ define @intrinsic_vsaddu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1706,7 +1706,7 @@ define @intrinsic_vsaddu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1753,7 +1753,7 @@ define @intrinsic_vsaddu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1800,7 +1800,7 @@ define @intrinsic_vsaddu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1847,7 +1847,7 @@ define @intrinsic_vsaddu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1898,7 +1898,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 @@ -1957,7 +1957,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vsaddu.vv v8, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 @@ -2016,7 +2016,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vsaddu.vv v8, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 @@ -2075,7 +2075,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vsaddu.vv v8, v8, v16 ; CHECK-NEXT: addi sp, sp, 16 @@ -2124,7 +2124,7 @@ define @intrinsic_vsaddu_vi_nxv1i8_nxv1i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2157,7 +2157,7 @@ define @intrinsic_vsaddu_vi_nxv2i8_nxv2i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2190,7 +2190,7 @@ define @intrinsic_vsaddu_vi_nxv4i8_nxv4i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2223,7 +2223,7 @@ define @intrinsic_vsaddu_vi_nxv8i8_nxv8i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2256,7 +2256,7 @@ define @intrinsic_vsaddu_vi_nxv16i8_nxv16i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2289,7 +2289,7 @@ define @intrinsic_vsaddu_vi_nxv32i8_nxv32i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2322,7 +2322,7 @@ define @intrinsic_vsaddu_vi_nxv64i8_nxv64i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2355,7 +2355,7 @@ define @intrinsic_vsaddu_vi_nxv1i16_nxv1i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2388,7 +2388,7 @@ define @intrinsic_vsaddu_vi_nxv2i16_nxv2i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2421,7 +2421,7 @@ define @intrinsic_vsaddu_vi_nxv4i16_nxv4i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2454,7 +2454,7 @@ define @intrinsic_vsaddu_vi_nxv8i16_nxv8i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2487,7 +2487,7 @@ define @intrinsic_vsaddu_vi_nxv16i16_nxv16i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2520,7 +2520,7 @@ define @intrinsic_vsaddu_vi_nxv32i16_nxv32i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2553,7 +2553,7 @@ define @intrinsic_vsaddu_vi_nxv1i32_nxv1i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2586,7 +2586,7 @@ define @intrinsic_vsaddu_vi_nxv2i32_nxv2i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2619,7 +2619,7 @@ define @intrinsic_vsaddu_vi_nxv4i32_nxv4i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2652,7 +2652,7 @@ define @intrinsic_vsaddu_vi_nxv8i32_nxv8i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2685,7 +2685,7 @@ define @intrinsic_vsaddu_vi_nxv16i32_nxv16i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2718,7 +2718,7 @@ define @intrinsic_vsaddu_vi_nxv1i64_nxv1i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2751,7 +2751,7 @@ define @intrinsic_vsaddu_vi_nxv2i64_nxv2i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2784,7 +2784,7 @@ define @intrinsic_vsaddu_vi_nxv4i64_nxv4i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2817,7 +2817,7 @@ define @intrinsic_vsaddu_vi_nxv8i64_nxv8i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vsaddu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vsaddu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vsaddu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vsaddu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vsaddu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -292,7 +292,7 @@ define @intrinsic_vsaddu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vsaddu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vsaddu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vsaddu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vsaddu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vsaddu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vsaddu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vsaddu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vsaddu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -717,7 +717,7 @@ define @intrinsic_vsaddu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -764,7 +764,7 @@ define @intrinsic_vsaddu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -811,7 +811,7 @@ define @intrinsic_vsaddu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vsaddu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vsaddu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vsaddu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vsaddu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1048,7 +1048,7 @@ define @intrinsic_vsaddu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1095,7 +1095,7 @@ define @intrinsic_vsaddu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1142,7 +1142,7 @@ define @intrinsic_vsaddu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1189,7 +1189,7 @@ define @intrinsic_vsaddu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1236,7 +1236,7 @@ define @intrinsic_vsaddu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1283,7 +1283,7 @@ define @intrinsic_vsaddu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1330,7 +1330,7 @@ define @intrinsic_vsaddu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1377,7 +1377,7 @@ define @intrinsic_vsaddu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1424,7 +1424,7 @@ define @intrinsic_vsaddu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1471,7 +1471,7 @@ define @intrinsic_vsaddu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1518,7 +1518,7 @@ define @intrinsic_vsaddu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1565,7 +1565,7 @@ define @intrinsic_vsaddu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1612,7 +1612,7 @@ define @intrinsic_vsaddu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1659,7 +1659,7 @@ define @intrinsic_vsaddu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1706,7 +1706,7 @@ define @intrinsic_vsaddu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1753,7 +1753,7 @@ define @intrinsic_vsaddu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1800,7 +1800,7 @@ define @intrinsic_vsaddu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1847,7 +1847,7 @@ define @intrinsic_vsaddu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1894,7 +1894,7 @@ define @intrinsic_vsaddu_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1941,7 +1941,7 @@ define @intrinsic_vsaddu_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1988,7 +1988,7 @@ define @intrinsic_vsaddu_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -2035,7 +2035,7 @@ define @intrinsic_vsaddu_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -2076,7 +2076,7 @@ define @intrinsic_vsaddu_vi_nxv1i8_nxv1i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2109,7 +2109,7 @@ define @intrinsic_vsaddu_vi_nxv2i8_nxv2i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2142,7 +2142,7 @@ define @intrinsic_vsaddu_vi_nxv4i8_nxv4i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2175,7 +2175,7 @@ define @intrinsic_vsaddu_vi_nxv8i8_nxv8i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2208,7 +2208,7 @@ define @intrinsic_vsaddu_vi_nxv16i8_nxv16i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2241,7 +2241,7 @@ define @intrinsic_vsaddu_vi_nxv32i8_nxv32i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2274,7 +2274,7 @@ define @intrinsic_vsaddu_vi_nxv64i8_nxv64i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2307,7 +2307,7 @@ define @intrinsic_vsaddu_vi_nxv1i16_nxv1i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2340,7 +2340,7 @@ define @intrinsic_vsaddu_vi_nxv2i16_nxv2i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2373,7 +2373,7 @@ define @intrinsic_vsaddu_vi_nxv4i16_nxv4i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2406,7 +2406,7 @@ define @intrinsic_vsaddu_vi_nxv8i16_nxv8i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2439,7 +2439,7 @@ define @intrinsic_vsaddu_vi_nxv16i16_nxv16i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2472,7 +2472,7 @@ define @intrinsic_vsaddu_vi_nxv32i16_nxv32i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2505,7 +2505,7 @@ define @intrinsic_vsaddu_vi_nxv1i32_nxv1i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2538,7 +2538,7 @@ define @intrinsic_vsaddu_vi_nxv2i32_nxv2i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2571,7 +2571,7 @@ define @intrinsic_vsaddu_vi_nxv4i32_nxv4i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2604,7 +2604,7 @@ define @intrinsic_vsaddu_vi_nxv8i32_nxv8i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2637,7 +2637,7 @@ define @intrinsic_vsaddu_vi_nxv16i32_nxv16i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2670,7 +2670,7 @@ define @intrinsic_vsaddu_vi_nxv1i64_nxv1i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2703,7 +2703,7 @@ define @intrinsic_vsaddu_vi_nxv2i64_nxv2i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2736,7 +2736,7 @@ define @intrinsic_vsaddu_vi_nxv4i64_nxv4i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2769,7 +2769,7 @@ define @intrinsic_vsaddu_vi_nxv8i64_nxv8i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 9 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-sdnode.ll @@ -9,7 +9,7 @@ define @uadd_nxv1i8_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv1i8_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.uadd.sat.nxv1i8( %va, %b) @@ -19,7 +19,7 @@ define @uadd_nxv1i8_vx( %va, i8 %b) { ; CHECK-LABEL: uadd_nxv1i8_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -31,7 +31,7 @@ define @uadd_nxv1i8_vi( %va) { ; CHECK-LABEL: uadd_nxv1i8_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 8, i32 0 @@ -45,7 +45,7 @@ define @uadd_nxv2i8_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv2i8_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.uadd.sat.nxv2i8( %va, %b) @@ -55,7 +55,7 @@ define @uadd_nxv2i8_vx( %va, i8 %b) { ; CHECK-LABEL: uadd_nxv2i8_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -67,7 +67,7 @@ define @uadd_nxv2i8_vi( %va) { ; CHECK-LABEL: uadd_nxv2i8_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 8, i32 0 @@ -81,7 +81,7 @@ define @uadd_nxv4i8_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv4i8_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.uadd.sat.nxv4i8( %va, %b) @@ -91,7 +91,7 @@ define @uadd_nxv4i8_vx( %va, i8 %b) { ; CHECK-LABEL: uadd_nxv4i8_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -103,7 +103,7 @@ define @uadd_nxv4i8_vi( %va) { ; CHECK-LABEL: uadd_nxv4i8_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 8, i32 0 @@ -117,7 +117,7 @@ define @uadd_nxv8i8_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv8i8_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.uadd.sat.nxv8i8( %va, %b) @@ -127,7 +127,7 @@ define @uadd_nxv8i8_vx( %va, i8 %b) { ; CHECK-LABEL: uadd_nxv8i8_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -139,7 +139,7 @@ define @uadd_nxv8i8_vi( %va) { ; CHECK-LABEL: uadd_nxv8i8_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 8, i32 0 @@ -153,7 +153,7 @@ define @uadd_nxv16i8_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv16i8_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v10 ; CHECK-NEXT: ret %v = call @llvm.uadd.sat.nxv16i8( %va, %b) @@ -163,7 +163,7 @@ define @uadd_nxv16i8_vx( %va, i8 %b) { ; CHECK-LABEL: uadd_nxv16i8_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -175,7 +175,7 @@ define @uadd_nxv16i8_vi( %va) { ; CHECK-LABEL: uadd_nxv16i8_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 8, i32 0 @@ -189,7 +189,7 @@ define @uadd_nxv32i8_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv32i8_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v12 ; CHECK-NEXT: ret %v = call @llvm.uadd.sat.nxv32i8( %va, %b) @@ -199,7 +199,7 @@ define @uadd_nxv32i8_vx( %va, i8 %b) { ; CHECK-LABEL: uadd_nxv32i8_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -211,7 +211,7 @@ define @uadd_nxv32i8_vi( %va) { ; CHECK-LABEL: uadd_nxv32i8_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 8, i32 0 @@ -225,7 +225,7 @@ define @uadd_nxv64i8_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv64i8_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v16 ; CHECK-NEXT: ret %v = call @llvm.uadd.sat.nxv64i8( %va, %b) @@ -235,7 +235,7 @@ define @uadd_nxv64i8_vx( %va, i8 %b) { ; CHECK-LABEL: uadd_nxv64i8_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -247,7 +247,7 @@ define @uadd_nxv64i8_vi( %va) { ; CHECK-LABEL: uadd_nxv64i8_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 8, i32 0 @@ -261,7 +261,7 @@ define @uadd_nxv1i16_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv1i16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.uadd.sat.nxv1i16( %va, %b) @@ -271,7 +271,7 @@ define @uadd_nxv1i16_vx( %va, i16 %b) { ; CHECK-LABEL: uadd_nxv1i16_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -283,7 +283,7 @@ define @uadd_nxv1i16_vi( %va) { ; CHECK-LABEL: uadd_nxv1i16_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 8, i32 0 @@ -297,7 +297,7 @@ define @uadd_nxv2i16_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv2i16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.uadd.sat.nxv2i16( %va, %b) @@ -307,7 +307,7 @@ define @uadd_nxv2i16_vx( %va, i16 %b) { ; CHECK-LABEL: uadd_nxv2i16_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -319,7 +319,7 @@ define @uadd_nxv2i16_vi( %va) { ; CHECK-LABEL: uadd_nxv2i16_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 8, i32 0 @@ -333,7 +333,7 @@ define @uadd_nxv4i16_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv4i16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.uadd.sat.nxv4i16( %va, %b) @@ -343,7 +343,7 @@ define @uadd_nxv4i16_vx( %va, i16 %b) { ; CHECK-LABEL: uadd_nxv4i16_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -355,7 +355,7 @@ define @uadd_nxv4i16_vi( %va) { ; CHECK-LABEL: uadd_nxv4i16_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 8, i32 0 @@ -369,7 +369,7 @@ define @uadd_nxv8i16_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv8i16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v10 ; CHECK-NEXT: ret %v = call @llvm.uadd.sat.nxv8i16( %va, %b) @@ -379,7 +379,7 @@ define @uadd_nxv8i16_vx( %va, i16 %b) { ; CHECK-LABEL: uadd_nxv8i16_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -391,7 +391,7 @@ define @uadd_nxv8i16_vi( %va) { ; CHECK-LABEL: uadd_nxv8i16_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 8, i32 0 @@ -405,7 +405,7 @@ define @uadd_nxv16i16_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv16i16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v12 ; CHECK-NEXT: ret %v = call @llvm.uadd.sat.nxv16i16( %va, %b) @@ -415,7 +415,7 @@ define @uadd_nxv16i16_vx( %va, i16 %b) { ; CHECK-LABEL: uadd_nxv16i16_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -427,7 +427,7 @@ define @uadd_nxv16i16_vi( %va) { ; CHECK-LABEL: uadd_nxv16i16_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 8, i32 0 @@ -441,7 +441,7 @@ define @uadd_nxv32i16_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv32i16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v16 ; CHECK-NEXT: ret %v = call @llvm.uadd.sat.nxv32i16( %va, %b) @@ -451,7 +451,7 @@ define @uadd_nxv32i16_vx( %va, i16 %b) { ; CHECK-LABEL: uadd_nxv32i16_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -463,7 +463,7 @@ define @uadd_nxv32i16_vi( %va) { ; CHECK-LABEL: uadd_nxv32i16_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 8, i32 0 @@ -477,7 +477,7 @@ define @uadd_nxv1i32_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv1i32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.uadd.sat.nxv1i32( %va, %b) @@ -487,7 +487,7 @@ define @uadd_nxv1i32_vx( %va, i32 %b) { ; CHECK-LABEL: uadd_nxv1i32_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -499,7 +499,7 @@ define @uadd_nxv1i32_vi( %va) { ; CHECK-LABEL: uadd_nxv1i32_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 8, i32 0 @@ -513,7 +513,7 @@ define @uadd_nxv2i32_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv2i32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.uadd.sat.nxv2i32( %va, %b) @@ -523,7 +523,7 @@ define @uadd_nxv2i32_vx( %va, i32 %b) { ; CHECK-LABEL: uadd_nxv2i32_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -535,7 +535,7 @@ define @uadd_nxv2i32_vi( %va) { ; CHECK-LABEL: uadd_nxv2i32_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 8, i32 0 @@ -549,7 +549,7 @@ define @uadd_nxv4i32_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv4i32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v10 ; CHECK-NEXT: ret %v = call @llvm.uadd.sat.nxv4i32( %va, %b) @@ -559,7 +559,7 @@ define @uadd_nxv4i32_vx( %va, i32 %b) { ; CHECK-LABEL: uadd_nxv4i32_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -571,7 +571,7 @@ define @uadd_nxv4i32_vi( %va) { ; CHECK-LABEL: uadd_nxv4i32_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 8, i32 0 @@ -585,7 +585,7 @@ define @uadd_nxv8i32_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv8i32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v12 ; CHECK-NEXT: ret %v = call @llvm.uadd.sat.nxv8i32( %va, %b) @@ -595,7 +595,7 @@ define @uadd_nxv8i32_vx( %va, i32 %b) { ; CHECK-LABEL: uadd_nxv8i32_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -607,7 +607,7 @@ define @uadd_nxv8i32_vi( %va) { ; CHECK-LABEL: uadd_nxv8i32_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 8, i32 0 @@ -621,7 +621,7 @@ define @uadd_nxv16i32_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv16i32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v16 ; CHECK-NEXT: ret %v = call @llvm.uadd.sat.nxv16i32( %va, %b) @@ -631,7 +631,7 @@ define @uadd_nxv16i32_vx( %va, i32 %b) { ; CHECK-LABEL: uadd_nxv16i32_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; CHECK-NEXT: vsaddu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -643,7 +643,7 @@ define @uadd_nxv16i32_vi( %va) { ; CHECK-LABEL: uadd_nxv16i32_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 8, i32 0 @@ -657,7 +657,7 @@ define @uadd_nxv1i64_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv1i64_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.uadd.sat.nxv1i64( %va, %b) @@ -672,7 +672,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsaddu.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -680,7 +680,7 @@ ; ; RV64-LABEL: uadd_nxv1i64_vx: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV64-NEXT: vsaddu.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -692,7 +692,7 @@ define @uadd_nxv1i64_vi( %va) { ; CHECK-LABEL: uadd_nxv1i64_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 8, i32 0 @@ -706,7 +706,7 @@ define @uadd_nxv2i64_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv2i64_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v10 ; CHECK-NEXT: ret %v = call @llvm.uadd.sat.nxv2i64( %va, %b) @@ -721,7 +721,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsaddu.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -729,7 +729,7 @@ ; ; RV64-LABEL: uadd_nxv2i64_vx: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV64-NEXT: vsaddu.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -741,7 +741,7 @@ define @uadd_nxv2i64_vi( %va) { ; CHECK-LABEL: uadd_nxv2i64_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 8, i32 0 @@ -755,7 +755,7 @@ define @uadd_nxv4i64_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv4i64_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v12 ; CHECK-NEXT: ret %v = call @llvm.uadd.sat.nxv4i64( %va, %b) @@ -770,7 +770,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsaddu.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -778,7 +778,7 @@ ; ; RV64-LABEL: uadd_nxv4i64_vx: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV64-NEXT: vsaddu.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -790,7 +790,7 @@ define @uadd_nxv4i64_vi( %va) { ; CHECK-LABEL: uadd_nxv4i64_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 8, i32 0 @@ -804,7 +804,7 @@ define @uadd_nxv8i64_vv( %va, %b) { ; CHECK-LABEL: uadd_nxv8i64_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vsaddu.vv v8, v8, v16 ; CHECK-NEXT: ret %v = call @llvm.uadd.sat.nxv8i64( %va, %b) @@ -819,7 +819,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsaddu.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -827,7 +827,7 @@ ; ; RV64-LABEL: uadd_nxv8i64_vx: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsaddu.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -839,7 +839,7 @@ define @uadd_nxv8i64_vi( %va) { ; CHECK-LABEL: uadd_nxv8i64_vi: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vsaddu.vi v8, v8, 8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 8, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsbc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsbc-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsbc-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsbc-rv32.ll @@ -11,7 +11,7 @@ define @intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -35,7 +35,7 @@ define @intrinsic_vsbc_vvm_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -59,7 +59,7 @@ define @intrinsic_vsbc_vvm_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -83,7 +83,7 @@ define @intrinsic_vsbc_vvm_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -107,7 +107,7 @@ define @intrinsic_vsbc_vvm_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vsbc.vvm v8, v8, v10, v0 ; CHECK-NEXT: ret entry: @@ -131,7 +131,7 @@ define @intrinsic_vsbc_vvm_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vsbc.vvm v8, v8, v12, v0 ; CHECK-NEXT: ret entry: @@ -155,7 +155,7 @@ define @intrinsic_vsbc_vvm_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vsbc.vvm v8, v8, v16, v0 ; CHECK-NEXT: ret entry: @@ -179,7 +179,7 @@ define @intrinsic_vsbc_vvm_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -203,7 +203,7 @@ define @intrinsic_vsbc_vvm_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -227,7 +227,7 @@ define @intrinsic_vsbc_vvm_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -251,7 +251,7 @@ define @intrinsic_vsbc_vvm_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsbc.vvm v8, v8, v10, v0 ; CHECK-NEXT: ret entry: @@ -275,7 +275,7 @@ define @intrinsic_vsbc_vvm_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vsbc.vvm v8, v8, v12, v0 ; CHECK-NEXT: ret entry: @@ -299,7 +299,7 @@ define @intrinsic_vsbc_vvm_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vsbc.vvm v8, v8, v16, v0 ; CHECK-NEXT: ret entry: @@ -323,7 +323,7 @@ define @intrinsic_vsbc_vvm_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -347,7 +347,7 @@ define @intrinsic_vsbc_vvm_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -371,7 +371,7 @@ define @intrinsic_vsbc_vvm_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsbc.vvm v8, v8, v10, v0 ; CHECK-NEXT: ret entry: @@ -395,7 +395,7 @@ define @intrinsic_vsbc_vvm_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsbc.vvm v8, v8, v12, v0 ; CHECK-NEXT: ret entry: @@ -419,7 +419,7 @@ define @intrinsic_vsbc_vvm_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vsbc.vvm v8, v8, v16, v0 ; CHECK-NEXT: ret entry: @@ -443,7 +443,7 @@ define @intrinsic_vsbc_vvm_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -467,7 +467,7 @@ define @intrinsic_vsbc_vvm_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsbc.vvm v8, v8, v10, v0 ; CHECK-NEXT: ret entry: @@ -491,7 +491,7 @@ define @intrinsic_vsbc_vvm_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsbc.vvm v8, v8, v12, v0 ; CHECK-NEXT: ret entry: @@ -515,7 +515,7 @@ define @intrinsic_vsbc_vvm_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsbc.vvm v8, v8, v16, v0 ; CHECK-NEXT: ret entry: @@ -539,7 +539,7 @@ define @intrinsic_vsbc_vxm_nxv1i8_nxv1i8_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -563,7 +563,7 @@ define @intrinsic_vsbc_vxm_nxv2i8_nxv2i8_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -587,7 +587,7 @@ define @intrinsic_vsbc_vxm_nxv4i8_nxv4i8_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -611,7 +611,7 @@ define @intrinsic_vsbc_vxm_nxv8i8_nxv8i8_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -635,7 +635,7 @@ define @intrinsic_vsbc_vxm_nxv16i8_nxv16i8_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -659,7 +659,7 @@ define @intrinsic_vsbc_vxm_nxv32i8_nxv32i8_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -683,7 +683,7 @@ define @intrinsic_vsbc_vxm_nxv64i8_nxv64i8_i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -707,7 +707,7 @@ define @intrinsic_vsbc_vxm_nxv1i16_nxv1i16_i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -731,7 +731,7 @@ define @intrinsic_vsbc_vxm_nxv2i16_nxv2i16_i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -755,7 +755,7 @@ define @intrinsic_vsbc_vxm_nxv4i16_nxv4i16_i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -779,7 +779,7 @@ define @intrinsic_vsbc_vxm_nxv8i16_nxv8i16_i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -803,7 +803,7 @@ define @intrinsic_vsbc_vxm_nxv16i16_nxv16i16_i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -827,7 +827,7 @@ define @intrinsic_vsbc_vxm_nxv32i16_nxv32i16_i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -851,7 +851,7 @@ define @intrinsic_vsbc_vxm_nxv1i32_nxv1i32_i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -875,7 +875,7 @@ define @intrinsic_vsbc_vxm_nxv2i32_nxv2i32_i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -899,7 +899,7 @@ define @intrinsic_vsbc_vxm_nxv4i32_nxv4i32_i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -923,7 +923,7 @@ define @intrinsic_vsbc_vxm_nxv8i32_nxv8i32_i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -947,7 +947,7 @@ define @intrinsic_vsbc_vxm_nxv16i32_nxv16i32_i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -975,7 +975,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0 ; CHECK-NEXT: addi sp, sp, 16 @@ -1005,7 +1005,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vsbc.vvm v8, v8, v10, v0 ; CHECK-NEXT: addi sp, sp, 16 @@ -1035,7 +1035,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vsbc.vvm v8, v8, v12, v0 ; CHECK-NEXT: addi sp, sp, 16 @@ -1065,7 +1065,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vsbc.vvm v8, v8, v16, v0 ; CHECK-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsbc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsbc-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsbc-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsbc-rv64.ll @@ -11,7 +11,7 @@ define @intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -35,7 +35,7 @@ define @intrinsic_vsbc_vvm_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -59,7 +59,7 @@ define @intrinsic_vsbc_vvm_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -83,7 +83,7 @@ define @intrinsic_vsbc_vvm_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -107,7 +107,7 @@ define @intrinsic_vsbc_vvm_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vsbc.vvm v8, v8, v10, v0 ; CHECK-NEXT: ret entry: @@ -131,7 +131,7 @@ define @intrinsic_vsbc_vvm_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vsbc.vvm v8, v8, v12, v0 ; CHECK-NEXT: ret entry: @@ -155,7 +155,7 @@ define @intrinsic_vsbc_vvm_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vsbc.vvm v8, v8, v16, v0 ; CHECK-NEXT: ret entry: @@ -179,7 +179,7 @@ define @intrinsic_vsbc_vvm_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -203,7 +203,7 @@ define @intrinsic_vsbc_vvm_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -227,7 +227,7 @@ define @intrinsic_vsbc_vvm_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -251,7 +251,7 @@ define @intrinsic_vsbc_vvm_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsbc.vvm v8, v8, v10, v0 ; CHECK-NEXT: ret entry: @@ -275,7 +275,7 @@ define @intrinsic_vsbc_vvm_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vsbc.vvm v8, v8, v12, v0 ; CHECK-NEXT: ret entry: @@ -299,7 +299,7 @@ define @intrinsic_vsbc_vvm_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vsbc.vvm v8, v8, v16, v0 ; CHECK-NEXT: ret entry: @@ -323,7 +323,7 @@ define @intrinsic_vsbc_vvm_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -347,7 +347,7 @@ define @intrinsic_vsbc_vvm_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -371,7 +371,7 @@ define @intrinsic_vsbc_vvm_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsbc.vvm v8, v8, v10, v0 ; CHECK-NEXT: ret entry: @@ -395,7 +395,7 @@ define @intrinsic_vsbc_vvm_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsbc.vvm v8, v8, v12, v0 ; CHECK-NEXT: ret entry: @@ -419,7 +419,7 @@ define @intrinsic_vsbc_vvm_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vsbc.vvm v8, v8, v16, v0 ; CHECK-NEXT: ret entry: @@ -443,7 +443,7 @@ define @intrinsic_vsbc_vvm_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsbc.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -467,7 +467,7 @@ define @intrinsic_vsbc_vvm_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsbc.vvm v8, v8, v10, v0 ; CHECK-NEXT: ret entry: @@ -491,7 +491,7 @@ define @intrinsic_vsbc_vvm_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsbc.vvm v8, v8, v12, v0 ; CHECK-NEXT: ret entry: @@ -515,7 +515,7 @@ define @intrinsic_vsbc_vvm_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsbc.vvm v8, v8, v16, v0 ; CHECK-NEXT: ret entry: @@ -539,7 +539,7 @@ define @intrinsic_vsbc_vxm_nxv1i8_nxv1i8_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -563,7 +563,7 @@ define @intrinsic_vsbc_vxm_nxv2i8_nxv2i8_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -587,7 +587,7 @@ define @intrinsic_vsbc_vxm_nxv4i8_nxv4i8_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -611,7 +611,7 @@ define @intrinsic_vsbc_vxm_nxv8i8_nxv8i8_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -635,7 +635,7 @@ define @intrinsic_vsbc_vxm_nxv16i8_nxv16i8_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -659,7 +659,7 @@ define @intrinsic_vsbc_vxm_nxv32i8_nxv32i8_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -683,7 +683,7 @@ define @intrinsic_vsbc_vxm_nxv64i8_nxv64i8_i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -707,7 +707,7 @@ define @intrinsic_vsbc_vxm_nxv1i16_nxv1i16_i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -731,7 +731,7 @@ define @intrinsic_vsbc_vxm_nxv2i16_nxv2i16_i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -755,7 +755,7 @@ define @intrinsic_vsbc_vxm_nxv4i16_nxv4i16_i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -779,7 +779,7 @@ define @intrinsic_vsbc_vxm_nxv8i16_nxv8i16_i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -803,7 +803,7 @@ define @intrinsic_vsbc_vxm_nxv16i16_nxv16i16_i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -827,7 +827,7 @@ define @intrinsic_vsbc_vxm_nxv32i16_nxv32i16_i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -851,7 +851,7 @@ define @intrinsic_vsbc_vxm_nxv1i32_nxv1i32_i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -875,7 +875,7 @@ define @intrinsic_vsbc_vxm_nxv2i32_nxv2i32_i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -899,7 +899,7 @@ define @intrinsic_vsbc_vxm_nxv4i32_nxv4i32_i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -923,7 +923,7 @@ define @intrinsic_vsbc_vxm_nxv8i32_nxv8i32_i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -947,7 +947,7 @@ define @intrinsic_vsbc_vxm_nxv16i32_nxv16i32_i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -971,7 +971,7 @@ define @intrinsic_vsbc_vxm_nxv1i64_nxv1i64_i64( %0, i64 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -995,7 +995,7 @@ define @intrinsic_vsbc_vxm_nxv2i64_nxv2i64_i64( %0, i64 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -1019,7 +1019,7 @@ define @intrinsic_vsbc_vxm_nxv4i64_nxv4i64_i64( %0, i64 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: @@ -1043,7 +1043,7 @@ define @intrinsic_vsbc_vxm_nxv8i64_nxv8i64_i64( %0, i64 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vsbc.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vse.ll b/llvm/test/CodeGen/RISCV/rvv/vse.ll --- a/llvm/test/CodeGen/RISCV/rvv/vse.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vse.ll @@ -11,7 +11,7 @@ define void @intrinsic_vse_v_nxv1i64_nxv1i64( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vse64.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -32,7 +32,7 @@ define void @intrinsic_vse_mask_v_nxv1i64_nxv1i64( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vse64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -53,7 +53,7 @@ define void @intrinsic_vse_v_nxv2i64_nxv2i64( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vse64.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -74,7 +74,7 @@ define void @intrinsic_vse_mask_v_nxv2i64_nxv2i64( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vse64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -95,7 +95,7 @@ define void @intrinsic_vse_v_nxv4i64_nxv4i64( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vse64.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -116,7 +116,7 @@ define void @intrinsic_vse_mask_v_nxv4i64_nxv4i64( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vse64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -137,7 +137,7 @@ define void @intrinsic_vse_v_nxv8i64_nxv8i64( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vse64.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -158,7 +158,7 @@ define void @intrinsic_vse_mask_v_nxv8i64_nxv8i64( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vse64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -179,7 +179,7 @@ define void @intrinsic_vse_v_nxv1f64_nxv1f64( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vse64.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -200,7 +200,7 @@ define void @intrinsic_vse_mask_v_nxv1f64_nxv1f64( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vse64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -221,7 +221,7 @@ define void @intrinsic_vse_v_nxv2f64_nxv2f64( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vse64.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -242,7 +242,7 @@ define void @intrinsic_vse_mask_v_nxv2f64_nxv2f64( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vse64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -263,7 +263,7 @@ define void @intrinsic_vse_v_nxv4f64_nxv4f64( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vse64.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -284,7 +284,7 @@ define void @intrinsic_vse_mask_v_nxv4f64_nxv4f64( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vse64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -305,7 +305,7 @@ define void @intrinsic_vse_v_nxv8f64_nxv8f64( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vse64.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -326,7 +326,7 @@ define void @intrinsic_vse_mask_v_nxv8f64_nxv8f64( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vse64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -347,7 +347,7 @@ define void @intrinsic_vse_v_nxv1i32_nxv1i32( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -368,7 +368,7 @@ define void @intrinsic_vse_mask_v_nxv1i32_nxv1i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vse32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -389,7 +389,7 @@ define void @intrinsic_vse_v_nxv2i32_nxv2i32( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -410,7 +410,7 @@ define void @intrinsic_vse_mask_v_nxv2i32_nxv2i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -431,7 +431,7 @@ define void @intrinsic_vse_v_nxv4i32_nxv4i32( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -452,7 +452,7 @@ define void @intrinsic_vse_mask_v_nxv4i32_nxv4i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vse32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -473,7 +473,7 @@ define void @intrinsic_vse_v_nxv8i32_nxv8i32( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -494,7 +494,7 @@ define void @intrinsic_vse_mask_v_nxv8i32_nxv8i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vse32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -515,7 +515,7 @@ define void @intrinsic_vse_v_nxv16i32_nxv16i32( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -536,7 +536,7 @@ define void @intrinsic_vse_mask_v_nxv16i32_nxv16i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vse32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -557,7 +557,7 @@ define void @intrinsic_vse_v_nxv1f32_nxv1f32( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -578,7 +578,7 @@ define void @intrinsic_vse_mask_v_nxv1f32_nxv1f32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vse32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -599,7 +599,7 @@ define void @intrinsic_vse_v_nxv2f32_nxv2f32( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -620,7 +620,7 @@ define void @intrinsic_vse_mask_v_nxv2f32_nxv2f32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -641,7 +641,7 @@ define void @intrinsic_vse_v_nxv4f32_nxv4f32( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -662,7 +662,7 @@ define void @intrinsic_vse_mask_v_nxv4f32_nxv4f32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vse32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -683,7 +683,7 @@ define void @intrinsic_vse_v_nxv8f32_nxv8f32( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -704,7 +704,7 @@ define void @intrinsic_vse_mask_v_nxv8f32_nxv8f32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vse32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -725,7 +725,7 @@ define void @intrinsic_vse_v_nxv16f32_nxv16f32( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -746,7 +746,7 @@ define void @intrinsic_vse_mask_v_nxv16f32_nxv16f32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vse32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -767,7 +767,7 @@ define void @intrinsic_vse_v_nxv1i16_nxv1i16( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -788,7 +788,7 @@ define void @intrinsic_vse_mask_v_nxv1i16_nxv1i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vse16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -809,7 +809,7 @@ define void @intrinsic_vse_v_nxv2i16_nxv2i16( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -830,7 +830,7 @@ define void @intrinsic_vse_mask_v_nxv2i16_nxv2i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vse16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -851,7 +851,7 @@ define void @intrinsic_vse_v_nxv4i16_nxv4i16( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -872,7 +872,7 @@ define void @intrinsic_vse_mask_v_nxv4i16_nxv4i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vse16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -893,7 +893,7 @@ define void @intrinsic_vse_v_nxv8i16_nxv8i16( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -914,7 +914,7 @@ define void @intrinsic_vse_mask_v_nxv8i16_nxv8i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vse16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -935,7 +935,7 @@ define void @intrinsic_vse_v_nxv16i16_nxv16i16( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -956,7 +956,7 @@ define void @intrinsic_vse_mask_v_nxv16i16_nxv16i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vse16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -977,7 +977,7 @@ define void @intrinsic_vse_v_nxv32i16_nxv32i16( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -998,7 +998,7 @@ define void @intrinsic_vse_mask_v_nxv32i16_nxv32i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vse16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1019,7 +1019,7 @@ define void @intrinsic_vse_v_nxv1f16_nxv1f16( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1040,7 +1040,7 @@ define void @intrinsic_vse_mask_v_nxv1f16_nxv1f16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vse16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1061,7 +1061,7 @@ define void @intrinsic_vse_v_nxv2f16_nxv2f16( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1082,7 +1082,7 @@ define void @intrinsic_vse_mask_v_nxv2f16_nxv2f16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vse16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1103,7 +1103,7 @@ define void @intrinsic_vse_v_nxv4f16_nxv4f16( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1124,7 +1124,7 @@ define void @intrinsic_vse_mask_v_nxv4f16_nxv4f16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vse16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1145,7 +1145,7 @@ define void @intrinsic_vse_v_nxv8f16_nxv8f16( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1166,7 +1166,7 @@ define void @intrinsic_vse_mask_v_nxv8f16_nxv8f16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vse16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1187,7 +1187,7 @@ define void @intrinsic_vse_v_nxv16f16_nxv16f16( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1208,7 +1208,7 @@ define void @intrinsic_vse_mask_v_nxv16f16_nxv16f16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vse16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1229,7 +1229,7 @@ define void @intrinsic_vse_v_nxv32f16_nxv32f16( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1250,7 +1250,7 @@ define void @intrinsic_vse_mask_v_nxv32f16_nxv32f16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vse16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1271,7 +1271,7 @@ define void @intrinsic_vse_v_nxv1i8_nxv1i8( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1292,7 +1292,7 @@ define void @intrinsic_vse_mask_v_nxv1i8_nxv1i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vse8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1313,7 +1313,7 @@ define void @intrinsic_vse_v_nxv2i8_nxv2i8( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1334,7 +1334,7 @@ define void @intrinsic_vse_mask_v_nxv2i8_nxv2i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vse8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1355,7 +1355,7 @@ define void @intrinsic_vse_v_nxv4i8_nxv4i8( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1376,7 +1376,7 @@ define void @intrinsic_vse_mask_v_nxv4i8_nxv4i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vse8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1397,7 +1397,7 @@ define void @intrinsic_vse_v_nxv8i8_nxv8i8( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1418,7 +1418,7 @@ define void @intrinsic_vse_mask_v_nxv8i8_nxv8i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vse8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1439,7 +1439,7 @@ define void @intrinsic_vse_v_nxv16i8_nxv16i8( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1460,7 +1460,7 @@ define void @intrinsic_vse_mask_v_nxv16i8_nxv16i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vse8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1481,7 +1481,7 @@ define void @intrinsic_vse_v_nxv32i8_nxv32i8( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1502,7 +1502,7 @@ define void @intrinsic_vse_mask_v_nxv32i8_nxv32i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vse8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1523,7 +1523,7 @@ define void @intrinsic_vse_v_nxv64i8_nxv64i8( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vse_v_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1544,7 +1544,7 @@ define void @intrinsic_vse_mask_v_nxv64i8_nxv64i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_mask_v_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vse8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv32.ll @@ -5,7 +5,7 @@ define @vfmerge_vv_nxv1f16( %va, %vb, %cond) { ; CHECK-LABEL: vfmerge_vv_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -15,7 +15,7 @@ define @vfmerge_fv_nxv1f16( %va, half %b, %cond) { ; CHECK-LABEL: vfmerge_fv_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -27,7 +27,7 @@ define @vfmerge_vv_nxv2f16( %va, %vb, %cond) { ; CHECK-LABEL: vfmerge_vv_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -37,7 +37,7 @@ define @vfmerge_fv_nxv2f16( %va, half %b, %cond) { ; CHECK-LABEL: vfmerge_fv_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -49,7 +49,7 @@ define @vfmerge_vv_nxv4f16( %va, %vb, %cond) { ; CHECK-LABEL: vfmerge_vv_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -59,7 +59,7 @@ define @vfmerge_fv_nxv4f16( %va, half %b, %cond) { ; CHECK-LABEL: vfmerge_fv_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -71,7 +71,7 @@ define @vfmerge_vv_nxv8f16( %va, %vb, %cond) { ; CHECK-LABEL: vfmerge_vv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -81,7 +81,7 @@ define @vfmerge_fv_nxv8f16( %va, half %b, %cond) { ; CHECK-LABEL: vfmerge_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -93,7 +93,7 @@ define @vfmerge_zv_nxv8f16( %va, %cond) { ; CHECK-LABEL: vfmerge_zv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret %head = insertelement poison, half zeroinitializer, i32 0 @@ -124,7 +124,7 @@ define @vfmerge_vv_nxv16f16( %va, %vb, %cond) { ; CHECK-LABEL: vfmerge_vv_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -134,7 +134,7 @@ define @vfmerge_fv_nxv16f16( %va, half %b, %cond) { ; CHECK-LABEL: vfmerge_fv_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -146,7 +146,7 @@ define @vfmerge_vv_nxv32f16( %va, %vb, %cond) { ; CHECK-LABEL: vfmerge_vv_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -156,7 +156,7 @@ define @vfmerge_fv_nxv32f16( %va, half %b, %cond) { ; CHECK-LABEL: vfmerge_fv_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -168,7 +168,7 @@ define @vfmerge_vv_nxv1f32( %va, %vb, %cond) { ; CHECK-LABEL: vfmerge_vv_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -178,7 +178,7 @@ define @vfmerge_fv_nxv1f32( %va, float %b, %cond) { ; CHECK-LABEL: vfmerge_fv_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -190,7 +190,7 @@ define @vfmerge_vv_nxv2f32( %va, %vb, %cond) { ; CHECK-LABEL: vfmerge_vv_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -200,7 +200,7 @@ define @vfmerge_fv_nxv2f32( %va, float %b, %cond) { ; CHECK-LABEL: vfmerge_fv_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -212,7 +212,7 @@ define @vfmerge_vv_nxv4f32( %va, %vb, %cond) { ; CHECK-LABEL: vfmerge_vv_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -222,7 +222,7 @@ define @vfmerge_fv_nxv4f32( %va, float %b, %cond) { ; CHECK-LABEL: vfmerge_fv_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -234,7 +234,7 @@ define @vfmerge_vv_nxv8f32( %va, %vb, %cond) { ; CHECK-LABEL: vfmerge_vv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -244,7 +244,7 @@ define @vfmerge_fv_nxv8f32( %va, float %b, %cond) { ; CHECK-LABEL: vfmerge_fv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -256,7 +256,7 @@ define @vfmerge_zv_nxv8f32( %va, %cond) { ; CHECK-LABEL: vfmerge_zv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret %head = insertelement poison, float zeroinitializer, i32 0 @@ -268,7 +268,7 @@ define @vfmerge_vv_nxv16f32( %va, %vb, %cond) { ; CHECK-LABEL: vfmerge_vv_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -278,7 +278,7 @@ define @vfmerge_fv_nxv16f32( %va, float %b, %cond) { ; CHECK-LABEL: vfmerge_fv_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -290,7 +290,7 @@ define @vfmerge_vv_nxv1f64( %va, %vb, %cond) { ; CHECK-LABEL: vfmerge_vv_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -300,7 +300,7 @@ define @vfmerge_fv_nxv1f64( %va, double %b, %cond) { ; CHECK-LABEL: vfmerge_fv_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -312,7 +312,7 @@ define @vfmerge_vv_nxv2f64( %va, %vb, %cond) { ; CHECK-LABEL: vfmerge_vv_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -322,7 +322,7 @@ define @vfmerge_fv_nxv2f64( %va, double %b, %cond) { ; CHECK-LABEL: vfmerge_fv_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -334,7 +334,7 @@ define @vfmerge_vv_nxv4f64( %va, %vb, %cond) { ; CHECK-LABEL: vfmerge_vv_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -344,7 +344,7 @@ define @vfmerge_fv_nxv4f64( %va, double %b, %cond) { ; CHECK-LABEL: vfmerge_fv_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -356,7 +356,7 @@ define @vfmerge_vv_nxv8f64( %va, %vb, %cond) { ; CHECK-LABEL: vfmerge_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -366,7 +366,7 @@ define @vfmerge_fv_nxv8f64( %va, double %b, %cond) { ; CHECK-LABEL: vfmerge_fv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -378,7 +378,7 @@ define @vfmerge_zv_nxv8f64( %va, %cond) { ; CHECK-LABEL: vfmerge_zv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret %head = insertelement poison, double zeroinitializer, i32 0 @@ -407,7 +407,7 @@ ; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: vl8re64.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmseq.vi v24, v16, 0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload @@ -434,14 +434,14 @@ define void @vselect_legalize_regression( %a, %ma, %mb, * %out) { ; CHECK-LABEL: vselect_legalize_regression: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, m2, ta, ma ; CHECK-NEXT: vlm.v v24, (a0) ; CHECK-NEXT: vmand.mm v1, v0, v24 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a2, a0, 3 -; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v1, a2 -; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; CHECK-NEXT: vmv.v.i v24, 0 ; CHECK-NEXT: vmerge.vvm v16, v24, v16, v0 ; CHECK-NEXT: vmv1r.v v0, v1 diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv64.ll @@ -5,7 +5,7 @@ define @vfmerge_vv_nxv1f16( %va, %vb, %cond) { ; CHECK-LABEL: vfmerge_vv_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -15,7 +15,7 @@ define @vfmerge_fv_nxv1f16( %va, half %b, %cond) { ; CHECK-LABEL: vfmerge_fv_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -27,7 +27,7 @@ define @vfmerge_vv_nxv2f16( %va, %vb, %cond) { ; CHECK-LABEL: vfmerge_vv_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -37,7 +37,7 @@ define @vfmerge_fv_nxv2f16( %va, half %b, %cond) { ; CHECK-LABEL: vfmerge_fv_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -49,7 +49,7 @@ define @vfmerge_vv_nxv4f16( %va, %vb, %cond) { ; CHECK-LABEL: vfmerge_vv_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -59,7 +59,7 @@ define @vfmerge_fv_nxv4f16( %va, half %b, %cond) { ; CHECK-LABEL: vfmerge_fv_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -71,7 +71,7 @@ define @vfmerge_vv_nxv8f16( %va, %vb, %cond) { ; CHECK-LABEL: vfmerge_vv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -81,7 +81,7 @@ define @vfmerge_fv_nxv8f16( %va, half %b, %cond) { ; CHECK-LABEL: vfmerge_fv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -93,7 +93,7 @@ define @vfmerge_zv_nxv8f16( %va, %cond) { ; CHECK-LABEL: vfmerge_zv_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret %head = insertelement poison, half zeroinitializer, i32 0 @@ -124,7 +124,7 @@ define @vfmerge_vv_nxv16f16( %va, %vb, %cond) { ; CHECK-LABEL: vfmerge_vv_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -134,7 +134,7 @@ define @vfmerge_fv_nxv16f16( %va, half %b, %cond) { ; CHECK-LABEL: vfmerge_fv_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -146,7 +146,7 @@ define @vfmerge_vv_nxv32f16( %va, %vb, %cond) { ; CHECK-LABEL: vfmerge_vv_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -156,7 +156,7 @@ define @vfmerge_fv_nxv32f16( %va, half %b, %cond) { ; CHECK-LABEL: vfmerge_fv_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 @@ -168,7 +168,7 @@ define @vfmerge_vv_nxv1f32( %va, %vb, %cond) { ; CHECK-LABEL: vfmerge_vv_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -178,7 +178,7 @@ define @vfmerge_fv_nxv1f32( %va, float %b, %cond) { ; CHECK-LABEL: vfmerge_fv_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -190,7 +190,7 @@ define @vfmerge_vv_nxv2f32( %va, %vb, %cond) { ; CHECK-LABEL: vfmerge_vv_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -200,7 +200,7 @@ define @vfmerge_fv_nxv2f32( %va, float %b, %cond) { ; CHECK-LABEL: vfmerge_fv_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -212,7 +212,7 @@ define @vfmerge_vv_nxv4f32( %va, %vb, %cond) { ; CHECK-LABEL: vfmerge_vv_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -222,7 +222,7 @@ define @vfmerge_fv_nxv4f32( %va, float %b, %cond) { ; CHECK-LABEL: vfmerge_fv_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -234,7 +234,7 @@ define @vfmerge_vv_nxv8f32( %va, %vb, %cond) { ; CHECK-LABEL: vfmerge_vv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -244,7 +244,7 @@ define @vfmerge_fv_nxv8f32( %va, float %b, %cond) { ; CHECK-LABEL: vfmerge_fv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -256,7 +256,7 @@ define @vfmerge_zv_nxv8f32( %va, %cond) { ; CHECK-LABEL: vfmerge_zv_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret %head = insertelement poison, float zeroinitializer, i32 0 @@ -268,7 +268,7 @@ define @vfmerge_vv_nxv16f32( %va, %vb, %cond) { ; CHECK-LABEL: vfmerge_vv_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -278,7 +278,7 @@ define @vfmerge_fv_nxv16f32( %va, float %b, %cond) { ; CHECK-LABEL: vfmerge_fv_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 @@ -290,7 +290,7 @@ define @vfmerge_vv_nxv1f64( %va, %vb, %cond) { ; CHECK-LABEL: vfmerge_vv_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -300,7 +300,7 @@ define @vfmerge_fv_nxv1f64( %va, double %b, %cond) { ; CHECK-LABEL: vfmerge_fv_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -312,7 +312,7 @@ define @vfmerge_vv_nxv2f64( %va, %vb, %cond) { ; CHECK-LABEL: vfmerge_vv_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -322,7 +322,7 @@ define @vfmerge_fv_nxv2f64( %va, double %b, %cond) { ; CHECK-LABEL: vfmerge_fv_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -334,7 +334,7 @@ define @vfmerge_vv_nxv4f64( %va, %vb, %cond) { ; CHECK-LABEL: vfmerge_vv_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -344,7 +344,7 @@ define @vfmerge_fv_nxv4f64( %va, double %b, %cond) { ; CHECK-LABEL: vfmerge_fv_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -356,7 +356,7 @@ define @vfmerge_vv_nxv8f64( %va, %vb, %cond) { ; CHECK-LABEL: vfmerge_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -366,7 +366,7 @@ define @vfmerge_fv_nxv8f64( %va, double %b, %cond) { ; CHECK-LABEL: vfmerge_fv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 @@ -378,7 +378,7 @@ define @vfmerge_zv_nxv8f64( %va, %cond) { ; CHECK-LABEL: vfmerge_zv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 0, v0 ; CHECK-NEXT: ret %head = insertelement poison, double zeroinitializer, i32 0 @@ -407,7 +407,7 @@ ; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: vl8re64.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmseq.vi v24, v16, 0 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload @@ -434,14 +434,14 @@ define void @vselect_legalize_regression( %a, %ma, %mb, * %out) { ; CHECK-LABEL: vselect_legalize_regression: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, m2, ta, ma ; CHECK-NEXT: vlm.v v24, (a0) ; CHECK-NEXT: vmand.mm v1, v0, v24 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a2, a0, 3 -; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v1, a2 -; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; CHECK-NEXT: vmv.v.i v24, 0 ; CHECK-NEXT: vmerge.vvm v16, v24, v16, v0 ; CHECK-NEXT: vmv1r.v v0, v1 diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-int-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-int-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vselect-int-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vselect-int-rv32.ll @@ -4,7 +4,7 @@ define @vmerge_vv_nxv1i8( %va, %vb, %cond) { ; CHECK-LABEL: vmerge_vv_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -14,7 +14,7 @@ define @vmerge_xv_nxv1i8( %va, i8 signext %b, %cond) { ; CHECK-LABEL: vmerge_xv_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -26,7 +26,7 @@ define @vmerge_iv_nxv1i8( %va, %cond) { ; CHECK-LABEL: vmerge_iv_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 ; CHECK-NEXT: ret %head = insertelement poison, i8 3, i32 0 @@ -38,7 +38,7 @@ define @vmerge_vv_nxv2i8( %va, %vb, %cond) { ; CHECK-LABEL: vmerge_vv_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -48,7 +48,7 @@ define @vmerge_xv_nxv2i8( %va, i8 signext %b, %cond) { ; CHECK-LABEL: vmerge_xv_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -60,7 +60,7 @@ define @vmerge_iv_nxv2i8( %va, %cond) { ; CHECK-LABEL: vmerge_iv_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 ; CHECK-NEXT: ret %head = insertelement poison, i8 3, i32 0 @@ -72,7 +72,7 @@ define @vmerge_vv_nxv3i8( %va, %vb, %cond) { ; CHECK-LABEL: vmerge_vv_nxv3i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -82,7 +82,7 @@ define @vmerge_xv_nxv3i8( %va, i8 signext %b, %cond) { ; CHECK-LABEL: vmerge_xv_nxv3i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -94,7 +94,7 @@ define @vmerge_iv_nxv3i8( %va, %cond) { ; CHECK-LABEL: vmerge_iv_nxv3i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 ; CHECK-NEXT: ret %head = insertelement poison, i8 3, i32 0 @@ -106,7 +106,7 @@ define @vmerge_vv_nxv4i8( %va, %vb, %cond) { ; CHECK-LABEL: vmerge_vv_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -116,7 +116,7 @@ define @vmerge_xv_nxv4i8( %va, i8 signext %b, %cond) { ; CHECK-LABEL: vmerge_xv_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -128,7 +128,7 @@ define @vmerge_iv_nxv4i8( %va, %cond) { ; CHECK-LABEL: vmerge_iv_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 ; CHECK-NEXT: ret %head = insertelement poison, i8 3, i32 0 @@ -140,7 +140,7 @@ define @vmerge_vv_nxv8i8( %va, %vb, %cond) { ; CHECK-LABEL: vmerge_vv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -150,7 +150,7 @@ define @vmerge_xv_nxv8i8( %va, i8 signext %b, %cond) { ; CHECK-LABEL: vmerge_xv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -162,7 +162,7 @@ define @vmerge_iv_nxv8i8( %va, %cond) { ; CHECK-LABEL: vmerge_iv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 ; CHECK-NEXT: ret %head = insertelement poison, i8 3, i32 0 @@ -174,7 +174,7 @@ define @vmerge_vv_nxv16i8( %va, %vb, %cond) { ; CHECK-LABEL: vmerge_vv_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -184,7 +184,7 @@ define @vmerge_xv_nxv16i8( %va, i8 signext %b, %cond) { ; CHECK-LABEL: vmerge_xv_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -196,7 +196,7 @@ define @vmerge_iv_nxv16i8( %va, %cond) { ; CHECK-LABEL: vmerge_iv_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 ; CHECK-NEXT: ret %head = insertelement poison, i8 3, i32 0 @@ -208,7 +208,7 @@ define @vmerge_vv_nxv32i8( %va, %vb, %cond) { ; CHECK-LABEL: vmerge_vv_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -218,7 +218,7 @@ define @vmerge_xv_nxv32i8( %va, i8 signext %b, %cond) { ; CHECK-LABEL: vmerge_xv_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -230,7 +230,7 @@ define @vmerge_iv_nxv32i8( %va, %cond) { ; CHECK-LABEL: vmerge_iv_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 ; CHECK-NEXT: ret %head = insertelement poison, i8 3, i32 0 @@ -242,7 +242,7 @@ define @vmerge_vv_nxv64i8( %va, %vb, %cond) { ; CHECK-LABEL: vmerge_vv_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -252,7 +252,7 @@ define @vmerge_xv_nxv64i8( %va, i8 signext %b, %cond) { ; CHECK-LABEL: vmerge_xv_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -264,7 +264,7 @@ define @vmerge_iv_nxv64i8( %va, %cond) { ; CHECK-LABEL: vmerge_iv_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 ; CHECK-NEXT: ret %head = insertelement poison, i8 3, i32 0 @@ -276,7 +276,7 @@ define @vmerge_vv_nxv1i16( %va, %vb, %cond) { ; CHECK-LABEL: vmerge_vv_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -286,7 +286,7 @@ define @vmerge_xv_nxv1i16( %va, i16 signext %b, %cond) { ; CHECK-LABEL: vmerge_xv_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -298,7 +298,7 @@ define @vmerge_iv_nxv1i16( %va, %cond) { ; CHECK-LABEL: vmerge_iv_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 ; CHECK-NEXT: ret %head = insertelement poison, i16 3, i32 0 @@ -310,7 +310,7 @@ define @vmerge_vv_nxv2i16( %va, %vb, %cond) { ; CHECK-LABEL: vmerge_vv_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -320,7 +320,7 @@ define @vmerge_xv_nxv2i16( %va, i16 signext %b, %cond) { ; CHECK-LABEL: vmerge_xv_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -332,7 +332,7 @@ define @vmerge_iv_nxv2i16( %va, %cond) { ; CHECK-LABEL: vmerge_iv_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 ; CHECK-NEXT: ret %head = insertelement poison, i16 3, i32 0 @@ -344,7 +344,7 @@ define @vmerge_vv_nxv4i16( %va, %vb, %cond) { ; CHECK-LABEL: vmerge_vv_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -354,7 +354,7 @@ define @vmerge_xv_nxv4i16( %va, i16 signext %b, %cond) { ; CHECK-LABEL: vmerge_xv_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -366,7 +366,7 @@ define @vmerge_iv_nxv4i16( %va, %cond) { ; CHECK-LABEL: vmerge_iv_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 ; CHECK-NEXT: ret %head = insertelement poison, i16 3, i32 0 @@ -378,7 +378,7 @@ define @vmerge_vv_nxv8i16( %va, %vb, %cond) { ; CHECK-LABEL: vmerge_vv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -388,7 +388,7 @@ define @vmerge_xv_nxv8i16( %va, i16 signext %b, %cond) { ; CHECK-LABEL: vmerge_xv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -400,7 +400,7 @@ define @vmerge_iv_nxv8i16( %va, %cond) { ; CHECK-LABEL: vmerge_iv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 ; CHECK-NEXT: ret %head = insertelement poison, i16 3, i32 0 @@ -412,7 +412,7 @@ define @vmerge_vv_nxv16i16( %va, %vb, %cond) { ; CHECK-LABEL: vmerge_vv_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -422,7 +422,7 @@ define @vmerge_xv_nxv16i16( %va, i16 signext %b, %cond) { ; CHECK-LABEL: vmerge_xv_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -434,7 +434,7 @@ define @vmerge_iv_nxv16i16( %va, %cond) { ; CHECK-LABEL: vmerge_iv_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 ; CHECK-NEXT: ret %head = insertelement poison, i16 3, i32 0 @@ -446,7 +446,7 @@ define @vmerge_vv_nxv32i16( %va, %vb, %cond) { ; CHECK-LABEL: vmerge_vv_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -456,7 +456,7 @@ define @vmerge_xv_nxv32i16( %va, i16 signext %b, %cond) { ; CHECK-LABEL: vmerge_xv_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -468,7 +468,7 @@ define @vmerge_iv_nxv32i16( %va, %cond) { ; CHECK-LABEL: vmerge_iv_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 ; CHECK-NEXT: ret %head = insertelement poison, i16 3, i32 0 @@ -480,7 +480,7 @@ define @vmerge_vv_nxv1i32( %va, %vb, %cond) { ; CHECK-LABEL: vmerge_vv_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -490,7 +490,7 @@ define @vmerge_xv_nxv1i32( %va, i32 %b, %cond) { ; CHECK-LABEL: vmerge_xv_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -502,7 +502,7 @@ define @vmerge_iv_nxv1i32( %va, %cond) { ; CHECK-LABEL: vmerge_iv_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 ; CHECK-NEXT: ret %head = insertelement poison, i32 3, i32 0 @@ -514,7 +514,7 @@ define @vmerge_vv_nxv2i32( %va, %vb, %cond) { ; CHECK-LABEL: vmerge_vv_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -524,7 +524,7 @@ define @vmerge_xv_nxv2i32( %va, i32 %b, %cond) { ; CHECK-LABEL: vmerge_xv_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -536,7 +536,7 @@ define @vmerge_iv_nxv2i32( %va, %cond) { ; CHECK-LABEL: vmerge_iv_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 ; CHECK-NEXT: ret %head = insertelement poison, i32 3, i32 0 @@ -548,7 +548,7 @@ define @vmerge_vv_nxv4i32( %va, %vb, %cond) { ; CHECK-LABEL: vmerge_vv_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -558,7 +558,7 @@ define @vmerge_xv_nxv4i32( %va, i32 %b, %cond) { ; CHECK-LABEL: vmerge_xv_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -570,7 +570,7 @@ define @vmerge_iv_nxv4i32( %va, %cond) { ; CHECK-LABEL: vmerge_iv_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 ; CHECK-NEXT: ret %head = insertelement poison, i32 3, i32 0 @@ -582,7 +582,7 @@ define @vmerge_vv_nxv8i32( %va, %vb, %cond) { ; CHECK-LABEL: vmerge_vv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -592,7 +592,7 @@ define @vmerge_xv_nxv8i32( %va, i32 %b, %cond) { ; CHECK-LABEL: vmerge_xv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -604,7 +604,7 @@ define @vmerge_iv_nxv8i32( %va, %cond) { ; CHECK-LABEL: vmerge_iv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 ; CHECK-NEXT: ret %head = insertelement poison, i32 3, i32 0 @@ -616,7 +616,7 @@ define @vmerge_vv_nxv16i32( %va, %vb, %cond) { ; CHECK-LABEL: vmerge_vv_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -626,7 +626,7 @@ define @vmerge_xv_nxv16i32( %va, i32 %b, %cond) { ; CHECK-LABEL: vmerge_xv_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -638,7 +638,7 @@ define @vmerge_iv_nxv16i32( %va, %cond) { ; CHECK-LABEL: vmerge_iv_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 ; CHECK-NEXT: ret %head = insertelement poison, i32 3, i32 0 @@ -650,7 +650,7 @@ define @vmerge_vv_nxv1i64( %va, %vb, %cond) { ; CHECK-LABEL: vmerge_vv_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -678,7 +678,7 @@ define @vmerge_iv_nxv1i64( %va, %cond) { ; CHECK-LABEL: vmerge_iv_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 ; CHECK-NEXT: ret %head = insertelement poison, i64 3, i32 0 @@ -690,7 +690,7 @@ define @vmerge_vv_nxv2i64( %va, %vb, %cond) { ; CHECK-LABEL: vmerge_vv_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -718,7 +718,7 @@ define @vmerge_iv_nxv2i64( %va, %cond) { ; CHECK-LABEL: vmerge_iv_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 ; CHECK-NEXT: ret %head = insertelement poison, i64 3, i32 0 @@ -730,7 +730,7 @@ define @vmerge_vv_nxv4i64( %va, %vb, %cond) { ; CHECK-LABEL: vmerge_vv_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -758,7 +758,7 @@ define @vmerge_iv_nxv4i64( %va, %cond) { ; CHECK-LABEL: vmerge_iv_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 ; CHECK-NEXT: ret %head = insertelement poison, i64 3, i32 0 @@ -770,7 +770,7 @@ define @vmerge_vv_nxv8i64( %va, %vb, %cond) { ; CHECK-LABEL: vmerge_vv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -798,7 +798,7 @@ define @vmerge_iv_nxv8i64( %va, %cond) { ; CHECK-LABEL: vmerge_iv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 ; CHECK-NEXT: ret %head = insertelement poison, i64 3, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-int-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-int-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vselect-int-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vselect-int-rv64.ll @@ -4,7 +4,7 @@ define @vmerge_vv_nxv1i8( %va, %vb, %cond) { ; CHECK-LABEL: vmerge_vv_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -14,7 +14,7 @@ define @vmerge_xv_nxv1i8( %va, i8 signext %b, %cond) { ; CHECK-LABEL: vmerge_xv_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -26,7 +26,7 @@ define @vmerge_iv_nxv1i8( %va, %cond) { ; CHECK-LABEL: vmerge_iv_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 ; CHECK-NEXT: ret %head = insertelement poison, i8 3, i32 0 @@ -38,7 +38,7 @@ define @vmerge_vv_nxv2i8( %va, %vb, %cond) { ; CHECK-LABEL: vmerge_vv_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -48,7 +48,7 @@ define @vmerge_xv_nxv2i8( %va, i8 signext %b, %cond) { ; CHECK-LABEL: vmerge_xv_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -60,7 +60,7 @@ define @vmerge_iv_nxv2i8( %va, %cond) { ; CHECK-LABEL: vmerge_iv_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 ; CHECK-NEXT: ret %head = insertelement poison, i8 3, i32 0 @@ -72,7 +72,7 @@ define @vmerge_vv_nxv3i8( %va, %vb, %cond) { ; CHECK-LABEL: vmerge_vv_nxv3i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -82,7 +82,7 @@ define @vmerge_xv_nxv3i8( %va, i8 signext %b, %cond) { ; CHECK-LABEL: vmerge_xv_nxv3i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -94,7 +94,7 @@ define @vmerge_iv_nxv3i8( %va, %cond) { ; CHECK-LABEL: vmerge_iv_nxv3i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 ; CHECK-NEXT: ret %head = insertelement poison, i8 3, i32 0 @@ -106,7 +106,7 @@ define @vmerge_vv_nxv4i8( %va, %vb, %cond) { ; CHECK-LABEL: vmerge_vv_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -116,7 +116,7 @@ define @vmerge_xv_nxv4i8( %va, i8 signext %b, %cond) { ; CHECK-LABEL: vmerge_xv_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -128,7 +128,7 @@ define @vmerge_iv_nxv4i8( %va, %cond) { ; CHECK-LABEL: vmerge_iv_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 ; CHECK-NEXT: ret %head = insertelement poison, i8 3, i32 0 @@ -140,7 +140,7 @@ define @vmerge_vv_nxv8i8( %va, %vb, %cond) { ; CHECK-LABEL: vmerge_vv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -150,7 +150,7 @@ define @vmerge_xv_nxv8i8( %va, i8 signext %b, %cond) { ; CHECK-LABEL: vmerge_xv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -162,7 +162,7 @@ define @vmerge_iv_nxv8i8( %va, %cond) { ; CHECK-LABEL: vmerge_iv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 ; CHECK-NEXT: ret %head = insertelement poison, i8 3, i32 0 @@ -174,7 +174,7 @@ define @vmerge_vv_nxv16i8( %va, %vb, %cond) { ; CHECK-LABEL: vmerge_vv_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -184,7 +184,7 @@ define @vmerge_xv_nxv16i8( %va, i8 signext %b, %cond) { ; CHECK-LABEL: vmerge_xv_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -196,7 +196,7 @@ define @vmerge_iv_nxv16i8( %va, %cond) { ; CHECK-LABEL: vmerge_iv_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 ; CHECK-NEXT: ret %head = insertelement poison, i8 3, i32 0 @@ -208,7 +208,7 @@ define @vmerge_vv_nxv32i8( %va, %vb, %cond) { ; CHECK-LABEL: vmerge_vv_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -218,7 +218,7 @@ define @vmerge_xv_nxv32i8( %va, i8 signext %b, %cond) { ; CHECK-LABEL: vmerge_xv_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -230,7 +230,7 @@ define @vmerge_iv_nxv32i8( %va, %cond) { ; CHECK-LABEL: vmerge_iv_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 ; CHECK-NEXT: ret %head = insertelement poison, i8 3, i32 0 @@ -242,7 +242,7 @@ define @vmerge_vv_nxv64i8( %va, %vb, %cond) { ; CHECK-LABEL: vmerge_vv_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -252,7 +252,7 @@ define @vmerge_xv_nxv64i8( %va, i8 signext %b, %cond) { ; CHECK-LABEL: vmerge_xv_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -264,7 +264,7 @@ define @vmerge_iv_nxv64i8( %va, %cond) { ; CHECK-LABEL: vmerge_iv_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 ; CHECK-NEXT: ret %head = insertelement poison, i8 3, i32 0 @@ -276,7 +276,7 @@ define @vmerge_vv_nxv1i16( %va, %vb, %cond) { ; CHECK-LABEL: vmerge_vv_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -286,7 +286,7 @@ define @vmerge_xv_nxv1i16( %va, i16 signext %b, %cond) { ; CHECK-LABEL: vmerge_xv_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -298,7 +298,7 @@ define @vmerge_iv_nxv1i16( %va, %cond) { ; CHECK-LABEL: vmerge_iv_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 ; CHECK-NEXT: ret %head = insertelement poison, i16 3, i32 0 @@ -310,7 +310,7 @@ define @vmerge_vv_nxv2i16( %va, %vb, %cond) { ; CHECK-LABEL: vmerge_vv_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -320,7 +320,7 @@ define @vmerge_xv_nxv2i16( %va, i16 signext %b, %cond) { ; CHECK-LABEL: vmerge_xv_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -332,7 +332,7 @@ define @vmerge_iv_nxv2i16( %va, %cond) { ; CHECK-LABEL: vmerge_iv_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 ; CHECK-NEXT: ret %head = insertelement poison, i16 3, i32 0 @@ -344,7 +344,7 @@ define @vmerge_vv_nxv4i16( %va, %vb, %cond) { ; CHECK-LABEL: vmerge_vv_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -354,7 +354,7 @@ define @vmerge_xv_nxv4i16( %va, i16 signext %b, %cond) { ; CHECK-LABEL: vmerge_xv_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -366,7 +366,7 @@ define @vmerge_iv_nxv4i16( %va, %cond) { ; CHECK-LABEL: vmerge_iv_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 ; CHECK-NEXT: ret %head = insertelement poison, i16 3, i32 0 @@ -378,7 +378,7 @@ define @vmerge_vv_nxv8i16( %va, %vb, %cond) { ; CHECK-LABEL: vmerge_vv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -388,7 +388,7 @@ define @vmerge_xv_nxv8i16( %va, i16 signext %b, %cond) { ; CHECK-LABEL: vmerge_xv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -400,7 +400,7 @@ define @vmerge_iv_nxv8i16( %va, %cond) { ; CHECK-LABEL: vmerge_iv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 ; CHECK-NEXT: ret %head = insertelement poison, i16 3, i32 0 @@ -412,7 +412,7 @@ define @vmerge_vv_nxv16i16( %va, %vb, %cond) { ; CHECK-LABEL: vmerge_vv_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -422,7 +422,7 @@ define @vmerge_xv_nxv16i16( %va, i16 signext %b, %cond) { ; CHECK-LABEL: vmerge_xv_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -434,7 +434,7 @@ define @vmerge_iv_nxv16i16( %va, %cond) { ; CHECK-LABEL: vmerge_iv_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 ; CHECK-NEXT: ret %head = insertelement poison, i16 3, i32 0 @@ -446,7 +446,7 @@ define @vmerge_vv_nxv32i16( %va, %vb, %cond) { ; CHECK-LABEL: vmerge_vv_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -456,7 +456,7 @@ define @vmerge_xv_nxv32i16( %va, i16 signext %b, %cond) { ; CHECK-LABEL: vmerge_xv_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -468,7 +468,7 @@ define @vmerge_iv_nxv32i16( %va, %cond) { ; CHECK-LABEL: vmerge_iv_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 ; CHECK-NEXT: ret %head = insertelement poison, i16 3, i32 0 @@ -480,7 +480,7 @@ define @vmerge_vv_nxv1i32( %va, %vb, %cond) { ; CHECK-LABEL: vmerge_vv_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -490,7 +490,7 @@ define @vmerge_xv_nxv1i32( %va, i32 signext %b, %cond) { ; CHECK-LABEL: vmerge_xv_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -502,7 +502,7 @@ define @vmerge_iv_nxv1i32( %va, %cond) { ; CHECK-LABEL: vmerge_iv_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 ; CHECK-NEXT: ret %head = insertelement poison, i32 3, i32 0 @@ -514,7 +514,7 @@ define @vmerge_vv_nxv2i32( %va, %vb, %cond) { ; CHECK-LABEL: vmerge_vv_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -524,7 +524,7 @@ define @vmerge_xv_nxv2i32( %va, i32 signext %b, %cond) { ; CHECK-LABEL: vmerge_xv_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -536,7 +536,7 @@ define @vmerge_iv_nxv2i32( %va, %cond) { ; CHECK-LABEL: vmerge_iv_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 ; CHECK-NEXT: ret %head = insertelement poison, i32 3, i32 0 @@ -548,7 +548,7 @@ define @vmerge_vv_nxv4i32( %va, %vb, %cond) { ; CHECK-LABEL: vmerge_vv_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -558,7 +558,7 @@ define @vmerge_xv_nxv4i32( %va, i32 signext %b, %cond) { ; CHECK-LABEL: vmerge_xv_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -570,7 +570,7 @@ define @vmerge_iv_nxv4i32( %va, %cond) { ; CHECK-LABEL: vmerge_iv_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 ; CHECK-NEXT: ret %head = insertelement poison, i32 3, i32 0 @@ -582,7 +582,7 @@ define @vmerge_vv_nxv8i32( %va, %vb, %cond) { ; CHECK-LABEL: vmerge_vv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -592,7 +592,7 @@ define @vmerge_xv_nxv8i32( %va, i32 signext %b, %cond) { ; CHECK-LABEL: vmerge_xv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -604,7 +604,7 @@ define @vmerge_iv_nxv8i32( %va, %cond) { ; CHECK-LABEL: vmerge_iv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 ; CHECK-NEXT: ret %head = insertelement poison, i32 3, i32 0 @@ -616,7 +616,7 @@ define @vmerge_vv_nxv16i32( %va, %vb, %cond) { ; CHECK-LABEL: vmerge_vv_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -626,7 +626,7 @@ define @vmerge_xv_nxv16i32( %va, i32 signext %b, %cond) { ; CHECK-LABEL: vmerge_xv_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -638,7 +638,7 @@ define @vmerge_iv_nxv16i32( %va, %cond) { ; CHECK-LABEL: vmerge_iv_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 ; CHECK-NEXT: ret %head = insertelement poison, i32 3, i32 0 @@ -650,7 +650,7 @@ define @vmerge_vv_nxv1i64( %va, %vb, %cond) { ; CHECK-LABEL: vmerge_vv_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -660,7 +660,7 @@ define @vmerge_xv_nxv1i64( %va, i64 %b, %cond) { ; CHECK-LABEL: vmerge_xv_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -672,7 +672,7 @@ define @vmerge_iv_nxv1i64( %va, %cond) { ; CHECK-LABEL: vmerge_iv_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 ; CHECK-NEXT: ret %head = insertelement poison, i64 3, i32 0 @@ -684,7 +684,7 @@ define @vmerge_vv_nxv2i64( %va, %vb, %cond) { ; CHECK-LABEL: vmerge_vv_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -694,7 +694,7 @@ define @vmerge_xv_nxv2i64( %va, i64 %b, %cond) { ; CHECK-LABEL: vmerge_xv_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -706,7 +706,7 @@ define @vmerge_iv_nxv2i64( %va, %cond) { ; CHECK-LABEL: vmerge_iv_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 ; CHECK-NEXT: ret %head = insertelement poison, i64 3, i32 0 @@ -718,7 +718,7 @@ define @vmerge_vv_nxv4i64( %va, %vb, %cond) { ; CHECK-LABEL: vmerge_vv_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -728,7 +728,7 @@ define @vmerge_xv_nxv4i64( %va, i64 %b, %cond) { ; CHECK-LABEL: vmerge_xv_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -740,7 +740,7 @@ define @vmerge_iv_nxv4i64( %va, %cond) { ; CHECK-LABEL: vmerge_iv_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 ; CHECK-NEXT: ret %head = insertelement poison, i64 3, i32 0 @@ -752,7 +752,7 @@ define @vmerge_vv_nxv8i64( %va, %vb, %cond) { ; CHECK-LABEL: vmerge_vv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 ; CHECK-NEXT: ret %vc = select %cond, %va, %vb @@ -762,7 +762,7 @@ define @vmerge_xv_nxv8i64( %va, i64 %b, %cond) { ; CHECK-LABEL: vmerge_xv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0 ; CHECK-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -774,7 +774,7 @@ define @vmerge_iv_nxv8i64( %va, %cond) { ; CHECK-LABEL: vmerge_iv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 3, v0 ; CHECK-NEXT: ret %head = insertelement poison, i64 3, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-mask.ll --- a/llvm/test/CodeGen/RISCV/rvv/vselect-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vselect-mask.ll @@ -5,7 +5,7 @@ define @vselect_nxv1i1( %a, %b, %cc) { ; CHECK-LABEL: vselect_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmandn.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 @@ -17,7 +17,7 @@ define @vselect_nxv2i1( %a, %b, %cc) { ; CHECK-LABEL: vselect_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmandn.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 @@ -29,7 +29,7 @@ define @vselect_nxv4i1( %a, %b, %cc) { ; CHECK-LABEL: vselect_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmandn.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 @@ -41,7 +41,7 @@ define @vselect_nxv8i1( %a, %b, %cc) { ; CHECK-LABEL: vselect_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmandn.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 @@ -53,7 +53,7 @@ define @vselect_nxv16i1( %a, %b, %cc) { ; CHECK-LABEL: vselect_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmandn.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 @@ -65,7 +65,7 @@ define @vselect_nxv32i1( %a, %b, %cc) { ; CHECK-LABEL: vselect_nxv32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vmandn.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 @@ -77,7 +77,7 @@ define @vselect_nxv64i1( %a, %b, %cc) { ; CHECK-LABEL: vselect_nxv64i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vmandn.mm v8, v8, v9 ; CHECK-NEXT: vmand.mm v9, v0, v9 ; CHECK-NEXT: vmor.mm v0, v9, v8 diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll @@ -9,7 +9,7 @@ define @select_nxv1i1( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv1i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmandn.mm v9, v9, v0 ; CHECK-NEXT: vmand.mm v8, v8, v0 ; CHECK-NEXT: vmor.mm v0, v8, v9 @@ -23,7 +23,7 @@ define @select_nxv2i1( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmandn.mm v9, v9, v0 ; CHECK-NEXT: vmand.mm v8, v8, v0 ; CHECK-NEXT: vmor.mm v0, v8, v9 @@ -37,7 +37,7 @@ define @select_nxv4i1( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmandn.mm v9, v9, v0 ; CHECK-NEXT: vmand.mm v8, v8, v0 ; CHECK-NEXT: vmor.mm v0, v8, v9 @@ -51,7 +51,7 @@ define @select_nxv8i1( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmandn.mm v9, v9, v0 ; CHECK-NEXT: vmand.mm v8, v8, v0 ; CHECK-NEXT: vmor.mm v0, v8, v9 @@ -65,7 +65,7 @@ define @select_nxv16i1( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmandn.mm v9, v9, v0 ; CHECK-NEXT: vmand.mm v8, v8, v0 ; CHECK-NEXT: vmor.mm v0, v8, v9 @@ -79,7 +79,7 @@ define @select_nxv32i1( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmandn.mm v9, v9, v0 ; CHECK-NEXT: vmand.mm v8, v8, v0 ; CHECK-NEXT: vmor.mm v0, v8, v9 @@ -93,7 +93,7 @@ define @select_nxv64i1( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv64i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmandn.mm v9, v9, v0 ; CHECK-NEXT: vmand.mm v8, v8, v0 ; CHECK-NEXT: vmor.mm v0, v8, v9 @@ -107,7 +107,7 @@ define @select_nxv8i7( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv8i7: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.select.nxv8i7( %a, %b, %c, i32 %evl) @@ -119,7 +119,7 @@ define @select_nxv1i8( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.select.nxv1i8( %a, %b, %c, i32 %evl) @@ -131,7 +131,7 @@ define @select_nxv2i8( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.select.nxv2i8( %a, %b, %c, i32 %evl) @@ -143,7 +143,7 @@ define @select_nxv4i8( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.select.nxv4i8( %a, %b, %c, i32 %evl) @@ -155,7 +155,7 @@ define @select_nxv8i8( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.select.nxv8i8( %a, %b, %c, i32 %evl) @@ -167,7 +167,7 @@ define @select_nxv14i8( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv14i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.select.nxv14i8( %a, %b, %c, i32 %evl) @@ -179,7 +179,7 @@ define @select_nxv16i8( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.select.nxv16i8( %a, %b, %c, i32 %evl) @@ -191,7 +191,7 @@ define @select_nxv32i8( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.select.nxv32i8( %a, %b, %c, i32 %evl) @@ -203,7 +203,7 @@ define @select_nxv64i8( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.select.nxv64i8( %a, %b, %c, i32 %evl) @@ -215,7 +215,7 @@ define @select_nxv1i16( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.select.nxv1i16( %a, %b, %c, i32 %evl) @@ -227,7 +227,7 @@ define @select_nxv2i16( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.select.nxv2i16( %a, %b, %c, i32 %evl) @@ -239,7 +239,7 @@ define @select_nxv4i16( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.select.nxv4i16( %a, %b, %c, i32 %evl) @@ -251,7 +251,7 @@ define @select_nxv8i16( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.select.nxv8i16( %a, %b, %c, i32 %evl) @@ -263,7 +263,7 @@ define @select_nxv16i16( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.select.nxv16i16( %a, %b, %c, i32 %evl) @@ -275,7 +275,7 @@ define @select_nxv32i16( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.select.nxv32i16( %a, %b, %c, i32 %evl) @@ -287,7 +287,7 @@ define @select_nxv1i32( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.select.nxv1i32( %a, %b, %c, i32 %evl) @@ -299,7 +299,7 @@ define @select_nxv2i32( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.select.nxv2i32( %a, %b, %c, i32 %evl) @@ -311,7 +311,7 @@ define @select_nxv4i32( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.select.nxv4i32( %a, %b, %c, i32 %evl) @@ -323,7 +323,7 @@ define @select_nxv8i32( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.select.nxv8i32( %a, %b, %c, i32 %evl) @@ -335,7 +335,7 @@ define @select_nxv16i32( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.select.nxv16i32( %a, %b, %c, i32 %evl) @@ -364,7 +364,7 @@ ; CHECK-NEXT: add a4, a0, a4 ; CHECK-NEXT: vl8re32.v v8, (a4) ; CHECK-NEXT: srli a5, a1, 2 -; CHECK-NEXT: vsetvli a4, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a4, zero, e8, mf2, ta, ma ; CHECK-NEXT: slli a1, a1, 1 ; CHECK-NEXT: sub a4, a2, a1 ; CHECK-NEXT: vslidedown.vx v0, v0, a5 @@ -375,13 +375,13 @@ ; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill -; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v16, v8, v16, v0 ; CHECK-NEXT: bltu a2, a1, .LBB27_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a2, a1 ; CHECK-NEXT: .LBB27_4: -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v1 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 @@ -422,7 +422,7 @@ ; CHECK-NEXT: add a2, a0, a2 ; CHECK-NEXT: vl8re32.v v8, (a2) ; CHECK-NEXT: srli a5, a1, 2 -; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma ; CHECK-NEXT: slli a2, a1, 1 ; CHECK-NEXT: sub a4, a1, a2 ; CHECK-NEXT: vslidedown.vx v0, v0, a5 @@ -433,13 +433,13 @@ ; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill -; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v16, v8, v16, v0 ; CHECK-NEXT: bltu a1, a2, .LBB28_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a1, a2 ; CHECK-NEXT: .LBB28_4: -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v1 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 @@ -465,7 +465,7 @@ define @select_nxv1i64( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.select.nxv1i64( %a, %b, %c, i32 %evl) @@ -477,7 +477,7 @@ define @select_nxv2i64( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.select.nxv2i64( %a, %b, %c, i32 %evl) @@ -489,7 +489,7 @@ define @select_nxv4i64( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.select.nxv4i64( %a, %b, %c, i32 %evl) @@ -501,7 +501,7 @@ define @select_nxv8i64( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.select.nxv8i64( %a, %b, %c, i32 %evl) @@ -513,7 +513,7 @@ define @select_nxv1f16( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.select.nxv1f16( %a, %b, %c, i32 %evl) @@ -525,7 +525,7 @@ define @select_nxv2f16( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.select.nxv2f16( %a, %b, %c, i32 %evl) @@ -537,7 +537,7 @@ define @select_nxv4f16( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.select.nxv4f16( %a, %b, %c, i32 %evl) @@ -549,7 +549,7 @@ define @select_nxv8f16( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.select.nxv8f16( %a, %b, %c, i32 %evl) @@ -561,7 +561,7 @@ define @select_nxv16f16( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.select.nxv16f16( %a, %b, %c, i32 %evl) @@ -573,7 +573,7 @@ define @select_nxv32f16( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.select.nxv32f16( %a, %b, %c, i32 %evl) @@ -585,7 +585,7 @@ define @select_nxv1f32( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.select.nxv1f32( %a, %b, %c, i32 %evl) @@ -597,7 +597,7 @@ define @select_nxv2f32( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.select.nxv2f32( %a, %b, %c, i32 %evl) @@ -609,7 +609,7 @@ define @select_nxv4f32( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.select.nxv4f32( %a, %b, %c, i32 %evl) @@ -621,7 +621,7 @@ define @select_nxv8f32( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.select.nxv8f32( %a, %b, %c, i32 %evl) @@ -633,7 +633,7 @@ define @select_nxv16f32( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.select.nxv16f32( %a, %b, %c, i32 %evl) @@ -645,7 +645,7 @@ define @select_nxv1f64( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv1f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.select.nxv1f64( %a, %b, %c, i32 %evl) @@ -657,7 +657,7 @@ define @select_nxv2f64( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.select.nxv2f64( %a, %b, %c, i32 %evl) @@ -669,7 +669,7 @@ define @select_nxv4f64( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv4f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.select.nxv4f64( %a, %b, %c, i32 %evl) @@ -681,7 +681,7 @@ define @select_nxv8f64( %a, %b, %c, i32 zeroext %evl) { ; CHECK-LABEL: select_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0 ; CHECK-NEXT: ret %v = call @llvm.vp.select.nxv8f64( %a, %b, %c, i32 %evl) @@ -710,7 +710,7 @@ ; CHECK-NEXT: add a4, a0, a4 ; CHECK-NEXT: vl8re64.v v8, (a4) ; CHECK-NEXT: srli a5, a1, 3 -; CHECK-NEXT: vsetvli a4, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a4, zero, e8, mf4, ta, ma ; CHECK-NEXT: sub a4, a2, a1 ; CHECK-NEXT: vslidedown.vx v0, v0, a5 ; CHECK-NEXT: bltu a2, a4, .LBB48_2 @@ -720,13 +720,13 @@ ; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill -; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v16, v8, v16, v0 ; CHECK-NEXT: bltu a2, a1, .LBB48_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a2, a1 ; CHECK-NEXT: .LBB48_4: -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v1 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll @@ -90,11 +90,12 @@ ; CHECK-NEXT: # %bb.1: # %if.then ; CHECK-NEXT: vsetvli a0, a0, e64, m1, ta, mu ; CHECK-NEXT: vfadd.vv v9, v8, v9 -; CHECK-NEXT: vfmul.vv v8, v9, v8 -; CHECK-NEXT: ret +; CHECK-NEXT: j .LBB2_3 ; CHECK-NEXT: .LBB2_2: # %if.else ; CHECK-NEXT: vsetvli a0, a0, e64, m1, ta, mu ; CHECK-NEXT: vfsub.vv v9, v8, v9 +; CHECK-NEXT: .LBB2_3: # %if.end +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfmul.vv v8, v9, v8 ; CHECK-NEXT: ret entry: @@ -125,7 +126,7 @@ ; CHECK-NEXT: # %bb.1: # %if.then ; CHECK-NEXT: lui a1, %hi(.LCPI3_0) ; CHECK-NEXT: addi a1, a1, %lo(.LCPI3_0) -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v10, (a1), zero ; CHECK-NEXT: lui a1, %hi(.LCPI3_1) ; CHECK-NEXT: addi a1, a1, %lo(.LCPI3_1) @@ -138,7 +139,7 @@ ; CHECK-NEXT: .LBB3_2: # %if.else ; CHECK-NEXT: lui a1, %hi(.LCPI3_2) ; CHECK-NEXT: addi a1, a1, %lo(.LCPI3_2) -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vlse32.v v10, (a1), zero ; CHECK-NEXT: lui a1, %hi(.LCPI3_3) ; CHECK-NEXT: addi a1, a1, %lo(.LCPI3_3) @@ -148,7 +149,7 @@ ; CHECK-NEXT: addi a1, a1, %lo(scratch) ; CHECK-NEXT: vse32.v v10, (a1) ; CHECK-NEXT: .LBB3_3: # %if.end -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -273,7 +274,7 @@ ; CHECK-NEXT: addi a0, a0, %lo(scratch) ; CHECK-NEXT: vse32.v v9, (a0) ; CHECK-NEXT: .LBB5_5: # %if.end10 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vfmul.vv v8, v8, v8 ; CHECK-NEXT: ret entry: @@ -346,7 +347,7 @@ ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill ; CHECK-NEXT: call foo@plt -; CHECK-NEXT: vsetvli zero, s0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, s0, e64, m1, ta, ma ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: add a0, a0, sp ; CHECK-NEXT: addi a0, a0, 16 @@ -413,7 +414,7 @@ ; CHECK-NEXT: .LBB7_2: # %if.else ; CHECK-NEXT: vfsub.vv v9, v8, v9 ; CHECK-NEXT: .LBB7_3: # %if.end -; CHECK-NEXT: vsetvli zero, s0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, s0, e64, m1, ta, ma ; CHECK-NEXT: vfmul.vv v8, v9, v8 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 1 @@ -449,11 +450,12 @@ ; CHECK-NEXT: beqz a3, .LBB8_2 ; CHECK-NEXT: .LBB8_1: # %for.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: vle32.v v16, (a2) ; CHECK-NEXT: slli a4, a3, 2 ; CHECK-NEXT: add a1, a1, a4 -; CHECK-NEXT: vsetvli zero, zero, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m8, tu, ma ; CHECK-NEXT: vfmacc.vf v16, fa0, v8 ; CHECK-NEXT: vse32.v v16, (a2) ; CHECK-NEXT: sub a0, a0, a3 @@ -500,16 +502,16 @@ define @test_vsetvli_x0_x0(* %x, * %y, %z, i64 %vl, i1 %cond) nounwind { ; CHECK-LABEL: test_vsetvli_x0_x0: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: andi a0, a3, 1 ; CHECK-NEXT: beqz a0, .LBB9_2 ; CHECK-NEXT: # %bb.1: # %if ; CHECK-NEXT: vle16.v v10, (a1) -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vwcvt.x.x.v v8, v10 ; CHECK-NEXT: .LBB9_2: # %if.end -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vadd.vv v8, v9, v8 ; CHECK-NEXT: ret entry: @@ -539,23 +541,23 @@ define @test_vsetvli_x0_x0_2(* %x, * %y, * %z, i64 %vl, i1 %cond, i1 %cond2, %w) nounwind { ; CHECK-LABEL: test_vsetvli_x0_x0_2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a3, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: andi a0, a4, 1 ; CHECK-NEXT: beqz a0, .LBB10_2 ; CHECK-NEXT: # %bb.1: # %if ; CHECK-NEXT: vle16.v v10, (a1) -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vwadd.wv v9, v9, v10 ; CHECK-NEXT: .LBB10_2: # %if.end ; CHECK-NEXT: andi a0, a5, 1 ; CHECK-NEXT: beqz a0, .LBB10_4 ; CHECK-NEXT: # %bb.3: # %if2 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v10, (a2) ; CHECK-NEXT: vwadd.wv v9, v9, v10 ; CHECK-NEXT: .LBB10_4: # %if2.end -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; CHECK-NEXT: vadd.vv v8, v9, v8 ; CHECK-NEXT: ret entry: @@ -591,20 +593,21 @@ ; CHECK-NEXT: blez a0, .LBB11_3 ; CHECK-NEXT: # %bb.1: # %for.body.preheader ; CHECK-NEXT: li a5, 0 -; CHECK-NEXT: li t0, 0 +; CHECK-NEXT: li t1, 0 ; CHECK-NEXT: slli a7, a6, 3 ; CHECK-NEXT: .LBB11_2: # %for.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: add a4, a2, a5 -; CHECK-NEXT: vle64.v v8, (a4) +; CHECK-NEXT: add t0, a2, a5 +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma +; CHECK-NEXT: vle64.v v8, (t0) ; CHECK-NEXT: add a4, a3, a5 ; CHECK-NEXT: vle64.v v9, (a4) ; CHECK-NEXT: vfadd.vv v8, v8, v9 ; CHECK-NEXT: add a4, a1, a5 ; CHECK-NEXT: vse64.v v8, (a4) -; CHECK-NEXT: add t0, t0, a6 +; CHECK-NEXT: add t1, t1, a6 ; CHECK-NEXT: add a5, a5, a7 -; CHECK-NEXT: blt t0, a0, .LBB11_2 +; CHECK-NEXT: blt t1, a0, .LBB11_2 ; CHECK-NEXT: .LBB11_3: # %for.end ; CHECK-NEXT: ret entry: @@ -644,6 +647,7 @@ ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: .LBB12_2: # %for.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; CHECK-NEXT: vse64.v v8, (a1) ; CHECK-NEXT: add a3, a3, a2 ; CHECK-NEXT: add a1, a1, a4 @@ -677,11 +681,11 @@ ; CHECK-NEXT: # %bb.1: # %for.body.preheader ; CHECK-NEXT: li a3, 0 ; CHECK-NEXT: slli a4, a2, 3 -; CHECK-NEXT: vsetvli a5, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a5, zero, e64, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: .LBB13_2: # %for.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vse64.v v8, (a1) ; CHECK-NEXT: add a3, a3, a2 ; CHECK-NEXT: add a1, a1, a4 @@ -713,11 +717,11 @@ ; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: vsetivli a3, 4, e64, m1, ta, mu ; CHECK-NEXT: slli a4, a3, 3 -; CHECK-NEXT: vsetvli a5, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a5, zero, e64, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: .LBB14_1: # %for.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vsetivli zero, 4, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m1, ta, ma ; CHECK-NEXT: vse64.v v8, (a1) ; CHECK-NEXT: add a2, a2, a3 ; CHECK-NEXT: add a1, a1, a4 @@ -747,11 +751,11 @@ ; CHECK-LABEL: vector_init_vsetvli_fv2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a2, 0 -; CHECK-NEXT: vsetvli a3, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a3, zero, e64, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: .LBB15_1: # %for.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vsetivli zero, 4, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m1, ta, ma ; CHECK-NEXT: vse64.v v8, (a1) ; CHECK-NEXT: addi a2, a2, 4 ; CHECK-NEXT: addi a1, a1, 32 @@ -781,11 +785,11 @@ ; CHECK-LABEL: vector_init_vsetvli_fv3: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a2, 0 -; CHECK-NEXT: vsetvli a3, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a3, zero, e64, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: .LBB16_1: # %for.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vsetivli zero, 4, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m1, ta, ma ; CHECK-NEXT: vse64.v v8, (a1) ; CHECK-NEXT: addi a2, a2, 4 ; CHECK-NEXT: addi a1, a1, 32 @@ -814,8 +818,9 @@ define @cross_block_mutate( %a, %b, ; CHECK-LABEL: cross_block_mutate: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli a0, 6, e32, m2, tu, mu +; CHECK-NEXT: vsetivli a0, 6, e32, m2, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 +; CHECK-NEXT: vsetvli zero, zero, e32, m2, tu, mu ; CHECK-NEXT: vadd.vv v8, v8, v10, v0.t ; CHECK-NEXT: ret %mask) { @@ -837,13 +842,13 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: andi a1, a0, 1 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: beqz a1, .LBB18_2 ; CHECK-NEXT: # %bb.1: # %if ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu ; CHECK-NEXT: .LBB18_2: # %if.end -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -866,7 +871,7 @@ ; CHECK-LABEL: compat_store_consistency: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: andi a0, a0, 1 -; CHECK-NEXT: vsetvli a3, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a3, zero, e64, m1, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v9 ; CHECK-NEXT: vs1r.v v8, (a1) ; CHECK-NEXT: beqz a0, .LBB19_2 @@ -893,16 +898,16 @@ define @test_ratio_only_vmv_s_x(* %x, * %y, i1 %cond) nounwind { ; CHECK-LABEL: test_ratio_only_vmv_s_x: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: andi a0, a2, 1 ; CHECK-NEXT: beqz a0, .LBB20_2 ; CHECK-NEXT: # %bb.1: # %if ; CHECK-NEXT: vle16.v v9, (a1) -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vwcvt.x.x.v v8, v9 ; CHECK-NEXT: .LBB20_2: # %if.end -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, ma ; CHECK-NEXT: vmv.s.x v8, zero ; CHECK-NEXT: ret entry: @@ -923,7 +928,7 @@ define @test_ratio_only_vmv_s_x2(* %x, * %y, i1 %cond) nounwind { ; CHECK-LABEL: test_ratio_only_vmv_s_x2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 2, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: andi a1, a2, 1 ; CHECK-NEXT: beqz a1, .LBB21_2 @@ -933,7 +938,7 @@ ; CHECK-NEXT: .LBB21_2: ; CHECK-NEXT: vwcvt.x.x.v v8, v9 ; CHECK-NEXT: .LBB21_3: # %if.end -; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, ma ; CHECK-NEXT: vmv.s.x v8, zero ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir @@ -196,7 +196,7 @@ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v8 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x11 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 [[COPY2]], $noreg, 6 /* e64 */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY4:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: BEQ [[COPY3]], [[COPY4]], %bb.2 @@ -275,7 +275,7 @@ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x12 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x11 ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 87 /* e32, mf2, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 215 /* e32, mf2, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: [[PseudoVLE32_V_MF2_:%[0-9]+]]:vr = PseudoVLE32_V_MF2 [[COPY2]], $noreg, 5 /* e32 */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY4:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: BEQ [[COPY3]], [[COPY4]], %bb.2 @@ -284,14 +284,14 @@ ; CHECK-NEXT: bb.1.if.then: ; CHECK-NEXT: successors: %bb.3(0x80000000) ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype, implicit $vl + ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl ; CHECK-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_M1 [[PseudoVLE32_V_MF2_]], $noreg, 6 /* e64 */, implicit $vl, implicit $vtype ; CHECK-NEXT: PseudoBR %bb.3 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.2.if.else: ; CHECK-NEXT: successors: %bb.3(0x80000000) ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype, implicit $vl + ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl ; CHECK-NEXT: early-clobber %2:vr = PseudoVSEXT_VF2_M1 [[PseudoVLE32_V_MF2_]], $noreg, 6 /* e64 */, implicit $vl, implicit $vtype ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.3.if.end: @@ -363,14 +363,14 @@ ; CHECK-NEXT: bb.1.if.then: ; CHECK-NEXT: successors: %bb.3(0x80000000) ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[COPY2]], [[COPY1]], $noreg, 6 /* e64 */, implicit $vl, implicit $vtype ; CHECK-NEXT: PseudoBR %bb.3 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.2.if.else: ; CHECK-NEXT: successors: %bb.3(0x80000000) ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: [[PseudoVSUB_VV_M1_:%[0-9]+]]:vr = PseudoVSUB_VV_M1 [[COPY1]], [[COPY1]], $noreg, 6 /* e64 */, implicit $vl, implicit $vtype ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.3.if.end: @@ -507,9 +507,9 @@ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x11 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 ; CHECK-NEXT: [[DEF:%[0-9]+]]:gpr = IMPLICIT_DEF - ; CHECK-NEXT: dead %12:gpr = PseudoVSETVLIX0 $x0, 95 /* e64, mf2, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: dead %12:gpr = PseudoVSETVLIX0 $x0, 223 /* e64, mf2, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: [[PseudoVID_V_MF2_:%[0-9]+]]:vr = PseudoVID_V_MF2 -1, 6 /* e64 */, implicit $vl, implicit $vtype - ; CHECK-NEXT: dead %13:gpr = PseudoVSETVLIX0 $x0, 87 /* e32, mf2, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: dead %13:gpr = PseudoVSETVLIX0 $x0, 215 /* e32, mf2, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: [[PseudoVMV_V_I_MF2_:%[0-9]+]]:vrnov0 = PseudoVMV_V_I_MF2 0, -1, 5 /* e32 */, implicit $vl, implicit $vtype ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.1: @@ -519,7 +519,7 @@ ; CHECK-NEXT: $v0 = COPY [[PseudoVMSEQ_VI_MF2_]] ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 23 /* e32, mf2, tu, mu */, implicit-def $vl, implicit-def $vtype, implicit $vl ; CHECK-NEXT: [[PseudoVLE32_V_MF2_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32_V_MF2_MASK [[PseudoVMV_V_I_MF2_]], killed [[COPY]], $v0, -1, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype - ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 69 /* e8, mf8, ta, mu */, implicit-def $vl, implicit-def $vtype, implicit $vl + ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 197 /* e8, mf8, ta, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl ; CHECK-NEXT: [[PseudoVCPOP_M_B1_:%[0-9]+]]:gpr = PseudoVCPOP_M_B1 [[PseudoVMSEQ_VI_MF2_]], -1, 0 /* e8 */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: BEQ killed [[PseudoVCPOP_M_B1_]], [[COPY2]], %bb.3 @@ -532,7 +532,7 @@ ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.3: ; CHECK-NEXT: [[PHI:%[0-9]+]]:gpr = PHI [[DEF]], %bb.1, [[LWU]], %bb.2 - ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 87 /* e32, mf2, ta, mu */, implicit-def $vl, implicit-def $vtype, implicit $vl + ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 215 /* e32, mf2, ta, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl ; CHECK-NEXT: [[PseudoVADD_VX_MF2_:%[0-9]+]]:vr = nsw PseudoVADD_VX_MF2 [[PseudoVLE32_V_MF2_MASK]], [[PHI]], -1, 5 /* e32 */, implicit $vl, implicit $vtype ; CHECK-NEXT: $v0 = COPY [[PseudoVADD_VX_MF2_]] ; CHECK-NEXT: PseudoRET implicit $v0 @@ -593,7 +593,7 @@ ; CHECK-NEXT: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 3 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 - ; CHECK-NEXT: dead %11:gpr = PseudoVSETVLIX0 $x0, 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: dead %11:gpr = PseudoVSETVLIX0 $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: [[PseudoVID_V_M1_:%[0-9]+]]:vr = PseudoVID_V_M1 -1, 6 /* e64 */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: {{ $}} @@ -661,7 +661,7 @@ ; CHECK-NEXT: [[PseudoReadVLENB:%[0-9]+]]:gpr = PseudoReadVLENB ; CHECK-NEXT: [[SRLI:%[0-9]+]]:gpr = SRLI [[PseudoReadVLENB]], 3 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11 - ; CHECK-NEXT: dead %11:gpr = PseudoVSETVLIX0 $x0, 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: dead %11:gpr = PseudoVSETVLIX0 $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: [[PseudoVID_V_M1_:%[0-9]+]]:vr = PseudoVID_V_M1 -1, 6 /* e64 */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: {{ $}} @@ -755,7 +755,7 @@ ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 4, 80 /* e32, m1, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 4, 208 /* e32, m1, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: [[PseudoVMV_V_I_M1_:%[0-9]+]]:vr = PseudoVMV_V_I_M1 0, 4, 5 /* e32 */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY [[PseudoVMV_V_I_M1_]] ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY [[COPY2]] @@ -782,7 +782,7 @@ ; CHECK-NEXT: [[PseudoVMV_S_X_M1_:%[0-9]+]]:vr = PseudoVMV_S_X_M1 [[DEF]], [[COPY5]], 1, 5 /* e32 */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF ; CHECK-NEXT: [[PseudoVREDSUM_VS_M1_:%[0-9]+]]:vr = PseudoVREDSUM_VS_M1 [[DEF1]], [[PseudoVADD_VV_M1_]], killed [[PseudoVMV_S_X_M1_]], 4, 5 /* e32 */, implicit $vl, implicit $vtype - ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 1, 80 /* e32, m1, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 1, 208 /* e32, m1, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: PseudoVSE32_V_M1 killed [[PseudoVREDSUM_VS_M1_]], [[COPY]], 1, 5 /* e32 */, implicit $vl, implicit $vtype :: (store (s32) into %ir.res) ; CHECK-NEXT: PseudoRET bb.0.entry: @@ -837,7 +837,7 @@ ; CHECK-NEXT: %t3:vr = COPY $v2 ; CHECK-NEXT: %t4:vr = COPY $v3 ; CHECK-NEXT: %t5:vrnov0 = COPY $v1 - ; CHECK-NEXT: dead %14:gpr = PseudoVSETVLIX0 $x0, 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: dead %14:gpr = PseudoVSETVLIX0 $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: %t6:vr = PseudoVMSEQ_VI_M1 %t1, 0, -1, 6 /* e64 */, implicit $vl, implicit $vtype ; CHECK-NEXT: PseudoBR %bb.1 ; CHECK-NEXT: {{ $}} @@ -861,7 +861,7 @@ ; CHECK-NEXT: bb.3: ; CHECK-NEXT: %stval:vr = PHI %t4, %bb.1, %ldval, %bb.2 ; CHECK-NEXT: $v0 = COPY %mask - ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 69 /* e8, mf8, ta, mu */, implicit-def $vl, implicit-def $vtype, implicit $vl + ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 197 /* e8, mf8, ta, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl ; CHECK-NEXT: PseudoVSOXEI64_V_M1_MF8_MASK killed %stval, killed %b, %idxs, $v0, -1, 3 /* e8 */, implicit $vl, implicit $vtype ; CHECK-NEXT: PseudoRET bb.0: @@ -919,7 +919,7 @@ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x15 ; CHECK-NEXT: %vlenb:gpr = PseudoReadVLENB ; CHECK-NEXT: %inc:gpr = SRLI killed %vlenb, 3 - ; CHECK-NEXT: dead %21:gpr = PseudoVSETVLIX0 $x0, 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: dead %21:gpr = PseudoVSETVLIX0 $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: [[PseudoVID_V_M1_:%[0-9]+]]:vr = PseudoVID_V_M1 -1, 6 /* e64 */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x0 ; CHECK-NEXT: PseudoBR %bb.1 @@ -929,7 +929,7 @@ ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[PHI:%[0-9]+]]:gpr = PHI [[COPY3]], %bb.0, %11, %bb.3 ; CHECK-NEXT: [[ADD:%[0-9]+]]:gpr = ADD [[COPY2]], [[PHI]] - ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype, implicit $vl + ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl ; CHECK-NEXT: [[PseudoVADD_VX_M1_:%[0-9]+]]:vr = PseudoVADD_VX_M1 [[PseudoVID_V_M1_]], killed [[ADD]], -1, 6 /* e64 */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[PseudoVMSLTU_VX_M1_:%[0-9]+]]:vr = PseudoVMSLTU_VX_M1 [[PseudoVADD_VX_M1_]], [[COPY1]], -1, 6 /* e64 */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[PseudoVCPOP_M_B1_:%[0-9]+]]:gpr = PseudoVCPOP_M_B1 [[PseudoVMSLTU_VX_M1_]], -1, 0 /* e8 */, implicit $vl, implicit $vtype @@ -942,7 +942,7 @@ ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[ADD1:%[0-9]+]]:gpr = ADD %src, [[PHI]] ; CHECK-NEXT: [[PseudoVLE8_V_MF8_:%[0-9]+]]:vrnov0 = PseudoVLE8_V_MF8 killed [[ADD1]], -1, 3 /* e8 */, implicit $vl, implicit $vtype - ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 69 /* e8, mf8, ta, mu */, implicit-def $vl, implicit-def $vtype, implicit $vl + ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 197 /* e8, mf8, ta, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl ; CHECK-NEXT: [[PseudoVADD_VI_MF8_:%[0-9]+]]:vrnov0 = PseudoVADD_VI_MF8 [[PseudoVLE8_V_MF8_]], 4, -1, 3 /* e8 */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[ADD2:%[0-9]+]]:gpr = ADD %dst, [[PHI]] ; CHECK-NEXT: PseudoVSE8_V_MF8 killed [[PseudoVADD_VI_MF8_]], killed [[ADD2]], -1, 3 /* e8 */, implicit $vl, implicit $vtype diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll @@ -19,7 +19,7 @@ ; CHECK-LABEL: test1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32, mf2, ta, mu -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -35,7 +35,7 @@ define @test2(i64 %avl, %a, %b) nounwind { ; CHECK-LABEL: test2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -109,13 +109,14 @@ ; CHECK-NEXT: li a4, 0 ; CHECK-NEXT: .LBB5_2: # %for.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: slli a6, a4, 2 -; CHECK-NEXT: add a5, a0, a6 -; CHECK-NEXT: vle32.v v8, (a5) +; CHECK-NEXT: slli a5, a4, 2 +; CHECK-NEXT: add a6, a0, a5 +; CHECK-NEXT: vsetvli zero, a3, e32, m1, ta, ma +; CHECK-NEXT: vle32.v v8, (a6) ; CHECK-NEXT: vmsle.vi v9, v8, -3 ; CHECK-NEXT: vmsgt.vi v10, v8, 2 ; CHECK-NEXT: vmor.mm v0, v9, v10 -; CHECK-NEXT: add a5, a1, a6 +; CHECK-NEXT: add a5, a5, a1 ; CHECK-NEXT: vse32.v v8, (a5), v0.t ; CHECK-NEXT: add a4, a4, a3 ; CHECK-NEXT: vsetvli a3, a2, e32, m1, ta, mu @@ -151,7 +152,7 @@ define @test7( %a, i64 %b, %mask) nounwind { ; CHECK-LABEL: test7: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, zero, e64, m1, tu, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m1, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret entry: @@ -166,7 +167,7 @@ define @test8( %a, i64 %b, %mask) nounwind { ; CHECK-LABEL: test8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 6, e64, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 6, e64, m1, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret entry: @@ -180,6 +181,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetivli zero, 9, e64, m1, tu, mu ; CHECK-NEXT: vadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret entry: @@ -197,7 +199,7 @@ define @test10( %a, double %b) nounwind { ; CHECK-LABEL: test10: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, zero, e64, m1, tu, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, tu, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: @@ -210,7 +212,7 @@ define @test11( %a, double %b) nounwind { ; CHECK-LABEL: test11: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 6, e64, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 6, e64, m1, tu, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: @@ -225,6 +227,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetivli zero, 9, e64, m1, tu, mu ; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t +; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: @@ -243,7 +246,7 @@ define @test13( %a, %b) nounwind { ; CHECK-LABEL: test13: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -259,9 +262,9 @@ ; CHECK-LABEL: test14: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e32, mf2, ta, mu -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v9 -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -282,7 +285,7 @@ define @test15(i64 %avl, %a, %b) nounwind { ; CHECK-LABEL: test15: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v9 ; CHECK-NEXT: vfadd.vv v8, v8, v9 ; CHECK-NEXT: ret @@ -308,9 +311,9 @@ ; CHECK-LABEL: test16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a0, a0, e64, mf2, ta, mu -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vfmv.v.f v9, fa0 -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfadd.vv v8, v9, v8 ; CHECK-NEXT: ret entry: @@ -329,9 +332,9 @@ define double @test17(i64 %avl, %a, %b) nounwind { ; CHECK-LABEL: test17: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, a0, e64, m1, ta, ma ; CHECK-NEXT: vfmv.f.s ft0, v8 -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v9 ; CHECK-NEXT: vfmv.f.s ft1, v8 ; CHECK-NEXT: fadd.d fa0, ft0, ft1 @@ -353,14 +356,14 @@ define @test18( %a, double %b) nounwind { ; CHECK-LABEL: test18: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 6, e64, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 6, e64, m1, tu, ma ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, ma ; CHECK-NEXT: vfmv.s.f v8, fa0 -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfadd.vv v8, v9, v8 ; CHECK-NEXT: ret entry: @@ -381,10 +384,10 @@ define @test19( %a, double %b) nounwind { ; CHECK-LABEL: test19: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 6, e64, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 6, e64, m1, tu, ma ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vfmv.s.f v9, fa0 -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vfadd.vv v8, v9, v8 ; CHECK-NEXT: ret entry: @@ -413,7 +416,7 @@ ; CHECK-LABEL: avl_forward1b_neg: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetivli a1, 6, e16, m1, ta, mu -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: mv a0, a1 ; CHECK-NEXT: ret @@ -469,7 +472,7 @@ ; CHECK-LABEL: avl_forward4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16, m1, ta, mu -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -483,7 +486,7 @@ ; CHECK-LABEL: avl_forward4b: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e16, m1, ta, mu -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: mv a0, a1 ; CHECK-NEXT: ret @@ -499,9 +502,9 @@ ; CHECK-LABEL: vleNff: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8, m4, ta, mu -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vle64ff.v v8, (a0) -; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, mu +; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, ma ; CHECK-NEXT: vadd.vx v8, v8, a2 ; CHECK-NEXT: ret entry: @@ -520,7 +523,7 @@ ; CHECK-LABEL: vleNff2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a1, a1, e8, m4, ta, mu -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vle64ff.v v8, (a0) ; CHECK-NEXT: vadd.vx v8, v8, a2 ; CHECK-NEXT: ret @@ -546,7 +549,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli a1, a1, e8, m4, ta, mu -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: ret %gvl = tail call i64 @llvm.riscv.vsetvli.i64(i64 32, i64 0, i64 2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir @@ -121,7 +121,7 @@ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x10 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8 - ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[COPY2]], [[COPY1]], $noreg, 6 /* e64 */, implicit $vl, implicit $vtype ; CHECK-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]] ; CHECK-NEXT: PseudoRET implicit $v8 @@ -160,7 +160,7 @@ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v8 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 [[COPY2]], $noreg, 6 /* e64 */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 killed [[PseudoVLE64_V_M1_]], [[COPY1]], $noreg, 6 /* e64 */, implicit $vl, implicit $vtype ; CHECK-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]] @@ -198,7 +198,7 @@ ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: [[PseudoVLE32_V_MF2_:%[0-9]+]]:vr = PseudoVLE32_V_MF2 [[COPY1]], $noreg, 5 /* e32 */, implicit $vl, implicit $vtype ; CHECK-NEXT: early-clobber %3:vr = PseudoVZEXT_VF2_M1 killed [[PseudoVLE32_V_MF2_]], $noreg, 6 /* e64 */, implicit $vl, implicit $vtype ; CHECK-NEXT: $v8 = COPY %3 @@ -231,7 +231,7 @@ ; CHECK: liveins: $v8 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8 - ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: [[PseudoVMV_X_S_M1_:%[0-9]+]]:gpr = PseudoVMV_X_S_M1 [[COPY]], 6 /* e64 */, implicit $vtype ; CHECK-NEXT: $x10 = COPY [[PseudoVMV_X_S_M1_]] ; CHECK-NEXT: PseudoRET implicit $x10 @@ -266,7 +266,7 @@ ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x11 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 [[COPY1]], 2, 6 /* e64 */, implicit $vl, implicit $vtype :: (load (s128) from %ir.x) ; CHECK-NEXT: [[PseudoVLE64_V_M1_1:%[0-9]+]]:vr = PseudoVLE64_V_M1 [[COPY]], 2, 6 /* e64 */, implicit $vl, implicit $vtype :: (load (s128) from %ir.y) ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 killed [[PseudoVLE64_V_M1_]], killed [[PseudoVLE64_V_M1_1]], 2, 6 /* e64 */, implicit $vl, implicit $vtype @@ -305,12 +305,12 @@ ; CHECK: liveins: $x10 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 [[COPY]], 2, 6 /* e64 */, implicit $vl, implicit $vtype :: (load (s128) from %ir.x) - ; CHECK-NEXT: dead %6:gpr = PseudoVSETVLIX0 $x0, 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: dead %6:gpr = PseudoVSETVLIX0 $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: [[PseudoVMV_V_I_M1_:%[0-9]+]]:vr = PseudoVMV_V_I_M1 0, -1, 6 /* e64 */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF - ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: [[PseudoVREDSUM_VS_M1_:%[0-9]+]]:vr = PseudoVREDSUM_VS_M1 [[DEF]], killed [[PseudoVLE64_V_M1_]], killed [[PseudoVMV_V_I_M1_]], 2, 6 /* e64 */, implicit $vl, implicit $vtype ; CHECK-NEXT: [[PseudoVMV_X_S_M1_:%[0-9]+]]:gpr = PseudoVMV_X_S_M1 killed [[PseudoVREDSUM_VS_M1_]], 6 /* e64 */, implicit $vtype ; CHECK-NEXT: $x10 = COPY [[PseudoVMV_X_S_M1_]] @@ -392,10 +392,10 @@ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v8 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10 - ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 [[COPY2]], $noreg, 6 /* e64 */, implicit $vl, implicit $vtype ; CHECK-NEXT: INLINEASM &"", 1 /* sideeffect attdialect */ - ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 killed [[PseudoVLE64_V_M1_]], [[COPY1]], $noreg, 6 /* e64 */, implicit $vl, implicit $vtype ; CHECK-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]] ; CHECK-NEXT: PseudoRET implicit $v8 @@ -419,9 +419,9 @@ ; CHECK-LABEL: name: vmv_v_i_different_lmuls ; CHECK: liveins: $x10, $v8, $x11 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 4, 89 /* e64, m2, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 4, 217 /* e64, m2, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: [[PseudoVID_V_M2_:%[0-9]+]]:vrm2 = PseudoVID_V_M2 4, 6 /* e64 */, implicit $vl, implicit $vtype - ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 70 /* e8, mf4, ta, mu */, implicit-def $vl, implicit-def $vtype, implicit $vl + ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 198 /* e8, mf4, ta, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl ; CHECK-NEXT: [[PseudoVMV_V_I_MF4_:%[0-9]+]]:vr = PseudoVMV_V_I_MF4 0, 4, 3 /* e8 */, implicit $vl, implicit $vtype ; CHECK-NEXT: PseudoRET %0:vrm2 = PseudoVID_V_M2 4, 6 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-regression.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-regression.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-regression.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-regression.ll @@ -9,10 +9,10 @@ define i32 @illegal_preserve_vl( %a, %x, * %y) { ; CHECK-LABEL: illegal_preserve_vl: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; CHECK-NEXT: vadd.vv v12, v12, v12 ; CHECK-NEXT: vs4r.v v12, (a0) -; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, ma ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: ret %index = add %x, %x diff --git a/llvm/test/CodeGen/RISCV/rvv/vsext-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vsext-vp-mask.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsext-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsext-vp-mask.ll @@ -7,7 +7,7 @@ define @vsext_nxv2i1_nxv2i16( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vsext_nxv2i1_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -18,7 +18,7 @@ define @vsext_nxv2i1_nxv2i16_unmasked( %a, i32 zeroext %vl) { ; CHECK-LABEL: vsext_nxv2i1_nxv2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -31,7 +31,7 @@ define @vsext_nxv2i1_nxv2i32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vsext_nxv2i1_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -42,7 +42,7 @@ define @vsext_nxv2i1_nxv2i32_unmasked( %a, i32 zeroext %vl) { ; CHECK-LABEL: vsext_nxv2i1_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -55,7 +55,7 @@ define @vsext_nxv2i1_nxv2i64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vsext_nxv2i1_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret @@ -66,7 +66,7 @@ define @vsext_nxv2i1_nxv2i64_unmasked( %a, i32 zeroext %vl) { ; CHECK-LABEL: vsext_nxv2i1_nxv2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll @@ -18,7 +18,7 @@ define @vsext_nxv2i8_nxv2i16_unmasked( %a, i32 zeroext %vl) { ; CHECK-LABEL: vsext_nxv2i8_nxv2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsext.vf2 v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -42,7 +42,7 @@ define @vsext_nxv2i8_nxv2i32_unmasked( %a, i32 zeroext %vl) { ; CHECK-LABEL: vsext_nxv2i8_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsext.vf4 v9, v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -66,7 +66,7 @@ define @vsext_nxv2i8_nxv2i64_unmasked( %a, i32 zeroext %vl) { ; CHECK-LABEL: vsext_nxv2i8_nxv2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsext.vf8 v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -90,7 +90,7 @@ define @vsext_nxv2i16_nxv2i32_unmasked( %a, i32 zeroext %vl) { ; CHECK-LABEL: vsext_nxv2i16_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsext.vf2 v9, v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -114,7 +114,7 @@ define @vsext_nxv2i16_nxv2i64_unmasked( %a, i32 zeroext %vl) { ; CHECK-LABEL: vsext_nxv2i16_nxv2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsext.vf4 v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -138,7 +138,7 @@ define @vsext_nxv2i32_nxv2i64_unmasked( %a, i32 zeroext %vl) { ; CHECK-LABEL: vsext_nxv2i32_nxv2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsext.vf2 v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -155,7 +155,7 @@ ; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a4, a1, 2 -; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma ; CHECK-NEXT: slli a1, a1, 1 ; CHECK-NEXT: sub a3, a0, a1 ; CHECK-NEXT: vslidedown.vx v0, v0, a4 @@ -189,14 +189,14 @@ ; CHECK-NEXT: mv a2, a1 ; CHECK-NEXT: .LBB13_2: ; CHECK-NEXT: li a3, 0 -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: sub a1, a0, a1 ; CHECK-NEXT: vsext.vf4 v24, v8 ; CHECK-NEXT: bltu a0, a1, .LBB13_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a3, a1 ; CHECK-NEXT: .LBB13_4: -; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; CHECK-NEXT: vsext.vf4 v16, v10 ; CHECK-NEXT: vmv8r.v v8, v24 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vsext.ll b/llvm/test/CodeGen/RISCV/rvv/vsext.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsext.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsext.ll @@ -11,7 +11,7 @@ define @intrinsic_vsext_vf8_nxv1i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsext.vf8 v9, v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -55,7 +55,7 @@ define @intrinsic_vsext_vf8_nxv2i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsext.vf8 v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -99,7 +99,7 @@ define @intrinsic_vsext_vf8_nxv4i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsext.vf8 v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -143,7 +143,7 @@ define @intrinsic_vsext_vf8_nxv8i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf8_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsext.vf8 v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -187,7 +187,7 @@ define @intrinsic_vsext_vf4_nxv1i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf4_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsext.vf4 v9, v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -231,7 +231,7 @@ define @intrinsic_vsext_vf4_nxv2i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf4_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsext.vf4 v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -275,7 +275,7 @@ define @intrinsic_vsext_vf4_nxv4i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf4_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsext.vf4 v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -319,7 +319,7 @@ define @intrinsic_vsext_vf4_nxv8i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf4_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsext.vf4 v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -363,7 +363,7 @@ define @intrinsic_vsext_vf4_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf4_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsext.vf4 v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -407,7 +407,7 @@ define @intrinsic_vsext_vf4_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf4_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsext.vf4 v9, v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -451,7 +451,7 @@ define @intrinsic_vsext_vf4_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf4_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsext.vf4 v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -495,7 +495,7 @@ define @intrinsic_vsext_vf4_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf4_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsext.vf4 v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -539,7 +539,7 @@ define @intrinsic_vsext_vf4_nxv16i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf4_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vsext.vf4 v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -583,7 +583,7 @@ define @intrinsic_vsext_vf2_nxv1i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf2_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsext.vf2 v9, v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -627,7 +627,7 @@ define @intrinsic_vsext_vf2_nxv2i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf2_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsext.vf2 v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -671,7 +671,7 @@ define @intrinsic_vsext_vf2_nxv4i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf2_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsext.vf2 v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -715,7 +715,7 @@ define @intrinsic_vsext_vf2_nxv8i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf2_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsext.vf2 v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -759,7 +759,7 @@ define @intrinsic_vsext_vf2_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf2_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsext.vf2 v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -803,7 +803,7 @@ define @intrinsic_vsext_vf2_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf2_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsext.vf2 v9, v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -847,7 +847,7 @@ define @intrinsic_vsext_vf2_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf2_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsext.vf2 v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -891,7 +891,7 @@ define @intrinsic_vsext_vf2_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf2_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsext.vf2 v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -935,7 +935,7 @@ define @intrinsic_vsext_vf2_nxv16i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf2_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vsext.vf2 v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -979,7 +979,7 @@ define @intrinsic_vsext_vf2_nxv1i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf2_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsext.vf2 v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1023,7 +1023,7 @@ define @intrinsic_vsext_vf2_nxv2i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf2_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsext.vf2 v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1067,7 +1067,7 @@ define @intrinsic_vsext_vf2_nxv4i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf2_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsext.vf2 v9, v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -1111,7 +1111,7 @@ define @intrinsic_vsext_vf2_nxv8i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf2_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsext.vf2 v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1155,7 +1155,7 @@ define @intrinsic_vsext_vf2_nxv16i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf2_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vsext.vf2 v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1199,7 +1199,7 @@ define @intrinsic_vsext_vf2_nxv32i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsext_vf2_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vsext.vf2 v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vshl-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vshl-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vshl-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vshl-sdnode.ll @@ -5,7 +5,7 @@ define @vshl_vx_nxv1i8( %va, i8 signext %b) { ; CHECK-LABEL: vshl_vx_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -17,7 +17,7 @@ define @vshl_vx_nxv1i8_0( %va) { ; CHECK-LABEL: vshl_vx_nxv1i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 6 ; CHECK-NEXT: ret %head = insertelement poison, i8 6, i32 0 @@ -29,7 +29,7 @@ define @vshl_vx_nxv2i8( %va, i8 signext %b) { ; CHECK-LABEL: vshl_vx_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -41,7 +41,7 @@ define @vshl_vx_nxv2i8_0( %va) { ; CHECK-LABEL: vshl_vx_nxv2i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 6 ; CHECK-NEXT: ret %head = insertelement poison, i8 6, i32 0 @@ -53,7 +53,7 @@ define @vshl_vx_nxv4i8( %va, i8 signext %b) { ; CHECK-LABEL: vshl_vx_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -65,7 +65,7 @@ define @vshl_vx_nxv4i8_0( %va) { ; CHECK-LABEL: vshl_vx_nxv4i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 6 ; CHECK-NEXT: ret %head = insertelement poison, i8 6, i32 0 @@ -77,7 +77,7 @@ define @vshl_vx_nxv8i8( %va, i8 signext %b) { ; CHECK-LABEL: vshl_vx_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -89,7 +89,7 @@ define @vshl_vx_nxv8i8_0( %va) { ; CHECK-LABEL: vshl_vx_nxv8i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 6 ; CHECK-NEXT: ret %head = insertelement poison, i8 6, i32 0 @@ -101,7 +101,7 @@ define @vshl_vx_nxv16i8( %va, i8 signext %b) { ; CHECK-LABEL: vshl_vx_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -113,7 +113,7 @@ define @vshl_vx_nxv16i8_0( %va) { ; CHECK-LABEL: vshl_vx_nxv16i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 6 ; CHECK-NEXT: ret %head = insertelement poison, i8 6, i32 0 @@ -125,7 +125,7 @@ define @vshl_vx_nxv32i8( %va, i8 signext %b) { ; CHECK-LABEL: vshl_vx_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -137,7 +137,7 @@ define @vshl_vx_nxv32i8_0( %va) { ; CHECK-LABEL: vshl_vx_nxv32i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 6 ; CHECK-NEXT: ret %head = insertelement poison, i8 6, i32 0 @@ -149,7 +149,7 @@ define @vshl_vx_nxv64i8( %va, i8 signext %b) { ; CHECK-LABEL: vshl_vx_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -161,7 +161,7 @@ define @vshl_vx_nxv64i8_0( %va) { ; CHECK-LABEL: vshl_vx_nxv64i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 6 ; CHECK-NEXT: ret %head = insertelement poison, i8 6, i32 0 @@ -173,7 +173,7 @@ define @vshl_vx_nxv1i16( %va, i16 signext %b) { ; CHECK-LABEL: vshl_vx_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -185,7 +185,7 @@ define @vshl_vx_nxv1i16_0( %va) { ; CHECK-LABEL: vshl_vx_nxv1i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 6 ; CHECK-NEXT: ret %head = insertelement poison, i16 6, i32 0 @@ -197,7 +197,7 @@ define @vshl_vx_nxv2i16( %va, i16 signext %b) { ; CHECK-LABEL: vshl_vx_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -209,7 +209,7 @@ define @vshl_vx_nxv2i16_0( %va) { ; CHECK-LABEL: vshl_vx_nxv2i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 6 ; CHECK-NEXT: ret %head = insertelement poison, i16 6, i32 0 @@ -221,7 +221,7 @@ define @vshl_vx_nxv4i16( %va, i16 signext %b) { ; CHECK-LABEL: vshl_vx_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -233,7 +233,7 @@ define @vshl_vx_nxv4i16_0( %va) { ; CHECK-LABEL: vshl_vx_nxv4i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 6 ; CHECK-NEXT: ret %head = insertelement poison, i16 6, i32 0 @@ -245,7 +245,7 @@ define @vshl_vx_nxv8i16( %va, i16 signext %b) { ; CHECK-LABEL: vshl_vx_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -257,7 +257,7 @@ define @vshl_vx_nxv8i16_0( %va) { ; CHECK-LABEL: vshl_vx_nxv8i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 6 ; CHECK-NEXT: ret %head = insertelement poison, i16 6, i32 0 @@ -269,7 +269,7 @@ define @vshl_vx_nxv16i16( %va, i16 signext %b) { ; CHECK-LABEL: vshl_vx_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -281,7 +281,7 @@ define @vshl_vx_nxv16i16_0( %va) { ; CHECK-LABEL: vshl_vx_nxv16i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 6 ; CHECK-NEXT: ret %head = insertelement poison, i16 6, i32 0 @@ -293,7 +293,7 @@ define @vshl_vx_nxv32i16( %va, i16 signext %b) { ; CHECK-LABEL: vshl_vx_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -305,7 +305,7 @@ define @vshl_vx_nxv32i16_0( %va) { ; CHECK-LABEL: vshl_vx_nxv32i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 6 ; CHECK-NEXT: ret %head = insertelement poison, i16 6, i32 0 @@ -317,7 +317,7 @@ define @vshl_vx_nxv1i32( %va, i32 signext %b) { ; CHECK-LABEL: vshl_vx_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -329,7 +329,7 @@ define @vshl_vx_nxv1i32_0( %va) { ; CHECK-LABEL: vshl_vx_nxv1i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 31 ; CHECK-NEXT: ret %head = insertelement poison, i32 31, i32 0 @@ -341,7 +341,7 @@ define @vshl_vx_nxv2i32( %va, i32 signext %b) { ; CHECK-LABEL: vshl_vx_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -353,7 +353,7 @@ define @vshl_vx_nxv2i32_0( %va) { ; CHECK-LABEL: vshl_vx_nxv2i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 31 ; CHECK-NEXT: ret %head = insertelement poison, i32 31, i32 0 @@ -365,7 +365,7 @@ define @vshl_vx_nxv4i32( %va, i32 signext %b) { ; CHECK-LABEL: vshl_vx_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -377,7 +377,7 @@ define @vshl_vx_nxv4i32_0( %va) { ; CHECK-LABEL: vshl_vx_nxv4i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 31 ; CHECK-NEXT: ret %head = insertelement poison, i32 31, i32 0 @@ -389,7 +389,7 @@ define @vshl_vx_nxv8i32( %va, i32 signext %b) { ; CHECK-LABEL: vshl_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -401,7 +401,7 @@ define @vshl_vx_nxv8i32_0( %va) { ; CHECK-LABEL: vshl_vx_nxv8i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 31 ; CHECK-NEXT: ret %head = insertelement poison, i32 31, i32 0 @@ -413,7 +413,7 @@ define @vshl_vx_nxv16i32( %va, i32 signext %b) { ; CHECK-LABEL: vshl_vx_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -425,7 +425,7 @@ define @vshl_vx_nxv16i32_0( %va) { ; CHECK-LABEL: vshl_vx_nxv16i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 31 ; CHECK-NEXT: ret %head = insertelement poison, i32 31, i32 0 @@ -437,7 +437,7 @@ define @vshl_vx_nxv1i64( %va, i64 %b) { ; CHECK-LABEL: vshl_vx_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -449,7 +449,7 @@ define @vshl_vx_nxv1i64_0( %va) { ; CHECK-LABEL: vshl_vx_nxv1i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 31 ; CHECK-NEXT: ret %head = insertelement poison, i64 31, i32 0 @@ -462,7 +462,7 @@ ; CHECK-LABEL: vshl_vx_nxv1i64_1: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 32, i32 0 @@ -474,7 +474,7 @@ define @vshl_vx_nxv1i64_2( %va) { ; CHECK-LABEL: vshl_vx_nxv1i64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i64 1, i32 0 @@ -486,7 +486,7 @@ define @vshl_vx_nxv2i64( %va, i64 %b) { ; CHECK-LABEL: vshl_vx_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -498,7 +498,7 @@ define @vshl_vx_nxv2i64_0( %va) { ; CHECK-LABEL: vshl_vx_nxv2i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 31 ; CHECK-NEXT: ret %head = insertelement poison, i64 31, i32 0 @@ -511,7 +511,7 @@ ; CHECK-LABEL: vshl_vx_nxv2i64_1: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 32, i32 0 @@ -523,7 +523,7 @@ define @vshl_vx_nxv2i64_2( %va) { ; CHECK-LABEL: vshl_vx_nxv2i64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i64 1, i32 0 @@ -535,7 +535,7 @@ define @vshl_vx_nxv4i64( %va, i64 %b) { ; CHECK-LABEL: vshl_vx_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -547,7 +547,7 @@ define @vshl_vx_nxv4i64_0( %va) { ; CHECK-LABEL: vshl_vx_nxv4i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 31 ; CHECK-NEXT: ret %head = insertelement poison, i64 31, i32 0 @@ -560,7 +560,7 @@ ; CHECK-LABEL: vshl_vx_nxv4i64_1: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 32, i32 0 @@ -572,7 +572,7 @@ define @vshl_vx_nxv4i64_2( %va) { ; CHECK-LABEL: vshl_vx_nxv4i64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i64 1, i32 0 @@ -584,7 +584,7 @@ define @vshl_vx_nxv8i64( %va, i64 %b) { ; CHECK-LABEL: vshl_vx_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -596,7 +596,7 @@ define @vshl_vx_nxv8i64_0( %va) { ; CHECK-LABEL: vshl_vx_nxv8i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 31 ; CHECK-NEXT: ret %head = insertelement poison, i64 31, i32 0 @@ -609,7 +609,7 @@ ; CHECK-LABEL: vshl_vx_nxv8i64_1: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 32, i32 0 @@ -621,7 +621,7 @@ define @vshl_vx_nxv8i64_2( %va) { ; CHECK-LABEL: vshl_vx_nxv8i64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i64 1, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vshl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vshl-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vshl-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vshl-vp.ll @@ -9,7 +9,7 @@ define @vsll_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_nxv8i7: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: li a0, 127 ; CHECK-NEXT: vand.vx v9, v9, a0 @@ -37,7 +37,7 @@ define @vsll_vv_nxv1i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv1i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -61,7 +61,7 @@ define @vsll_vx_nxv1i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_nxv1i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -87,7 +87,7 @@ define @vsll_vi_nxv1i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_nxv1i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 3, i32 0 @@ -113,7 +113,7 @@ define @vsll_vv_nxv2i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -137,7 +137,7 @@ define @vsll_vx_nxv2i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_nxv2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -163,7 +163,7 @@ define @vsll_vi_nxv2i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_nxv2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 3, i32 0 @@ -189,7 +189,7 @@ define @vsll_vv_nxv4i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -213,7 +213,7 @@ define @vsll_vx_nxv4i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_nxv4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -239,7 +239,7 @@ define @vsll_vi_nxv4i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_nxv4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 3, i32 0 @@ -277,7 +277,7 @@ define @vsll_vv_nxv8i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -301,7 +301,7 @@ define @vsll_vx_nxv8i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_nxv8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -327,7 +327,7 @@ define @vsll_vi_nxv8i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_nxv8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 3, i32 0 @@ -353,7 +353,7 @@ define @vsll_vv_nxv16i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -377,7 +377,7 @@ define @vsll_vx_nxv16i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_nxv16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -403,7 +403,7 @@ define @vsll_vi_nxv16i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_nxv16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 3, i32 0 @@ -429,7 +429,7 @@ define @vsll_vv_nxv32i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv32i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -453,7 +453,7 @@ define @vsll_vx_nxv32i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_nxv32i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -479,7 +479,7 @@ define @vsll_vi_nxv32i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_nxv32i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 3, i32 0 @@ -505,7 +505,7 @@ define @vsll_vv_nxv64i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv64i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -529,7 +529,7 @@ define @vsll_vx_nxv64i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_nxv64i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -555,7 +555,7 @@ define @vsll_vi_nxv64i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_nxv64i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 3, i32 0 @@ -581,7 +581,7 @@ define @vsll_vv_nxv1i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv1i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -605,7 +605,7 @@ define @vsll_vx_nxv1i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_nxv1i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -631,7 +631,7 @@ define @vsll_vi_nxv1i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_nxv1i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 3, i32 0 @@ -657,7 +657,7 @@ define @vsll_vv_nxv2i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -681,7 +681,7 @@ define @vsll_vx_nxv2i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_nxv2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -707,7 +707,7 @@ define @vsll_vi_nxv2i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_nxv2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 3, i32 0 @@ -733,7 +733,7 @@ define @vsll_vv_nxv4i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -757,7 +757,7 @@ define @vsll_vx_nxv4i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_nxv4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -783,7 +783,7 @@ define @vsll_vi_nxv4i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_nxv4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 3, i32 0 @@ -809,7 +809,7 @@ define @vsll_vv_nxv8i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -833,7 +833,7 @@ define @vsll_vx_nxv8i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_nxv8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -859,7 +859,7 @@ define @vsll_vi_nxv8i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_nxv8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 3, i32 0 @@ -885,7 +885,7 @@ define @vsll_vv_nxv16i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -909,7 +909,7 @@ define @vsll_vx_nxv16i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_nxv16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -935,7 +935,7 @@ define @vsll_vi_nxv16i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_nxv16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 3, i32 0 @@ -961,7 +961,7 @@ define @vsll_vv_nxv32i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv32i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -985,7 +985,7 @@ define @vsll_vx_nxv32i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_nxv32i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -1011,7 +1011,7 @@ define @vsll_vi_nxv32i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_nxv32i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 3, i32 0 @@ -1037,7 +1037,7 @@ define @vsll_vv_nxv1i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv1i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1061,7 +1061,7 @@ define @vsll_vx_nxv1i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_nxv1i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1087,7 +1087,7 @@ define @vsll_vi_nxv1i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_nxv1i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 3, i32 0 @@ -1113,7 +1113,7 @@ define @vsll_vv_nxv2i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1137,7 +1137,7 @@ define @vsll_vx_nxv2i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1163,7 +1163,7 @@ define @vsll_vi_nxv2i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 3, i32 0 @@ -1189,7 +1189,7 @@ define @vsll_vv_nxv4i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1213,7 +1213,7 @@ define @vsll_vx_nxv4i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_nxv4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1239,7 +1239,7 @@ define @vsll_vi_nxv4i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_nxv4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 3, i32 0 @@ -1265,7 +1265,7 @@ define @vsll_vv_nxv8i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1289,7 +1289,7 @@ define @vsll_vx_nxv8i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_nxv8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1315,7 +1315,7 @@ define @vsll_vi_nxv8i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_nxv8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 3, i32 0 @@ -1341,7 +1341,7 @@ define @vsll_vv_nxv16i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1365,7 +1365,7 @@ define @vsll_vx_nxv16i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_nxv16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1391,7 +1391,7 @@ define @vsll_vi_nxv16i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_nxv16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 3, i32 0 @@ -1417,7 +1417,7 @@ define @vsll_vv_nxv1i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv1i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1447,13 +1447,13 @@ define @vsll_vx_nxv1i64_unmasked( %va, i64 %b, i32 zeroext %evl) { ; RV32-LABEL: vsll_vx_nxv1i64_unmasked: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vsll.vx v8, v8, a0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsll_vx_nxv1i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vsll.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1479,7 +1479,7 @@ define @vsll_vi_nxv1i64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_nxv1i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 3, i32 0 @@ -1505,7 +1505,7 @@ define @vsll_vv_nxv2i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1535,13 +1535,13 @@ define @vsll_vx_nxv2i64_unmasked( %va, i64 %b, i32 zeroext %evl) { ; RV32-LABEL: vsll_vx_nxv2i64_unmasked: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vsll.vx v8, v8, a0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsll_vx_nxv2i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vsll.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1567,7 +1567,7 @@ define @vsll_vi_nxv2i64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_nxv2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 3, i32 0 @@ -1593,7 +1593,7 @@ define @vsll_vv_nxv4i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv4i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1623,13 +1623,13 @@ define @vsll_vx_nxv4i64_unmasked( %va, i64 %b, i32 zeroext %evl) { ; RV32-LABEL: vsll_vx_nxv4i64_unmasked: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vsll.vx v8, v8, a0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsll_vx_nxv4i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vsll.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1655,7 +1655,7 @@ define @vsll_vi_nxv4i64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_nxv4i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 3, i32 0 @@ -1681,7 +1681,7 @@ define @vsll_vv_nxv8i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vv_nxv8i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1711,13 +1711,13 @@ define @vsll_vx_nxv8i64_unmasked( %va, i64 %b, i32 zeroext %evl) { ; RV32-LABEL: vsll_vx_nxv8i64_unmasked: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vsll.vx v8, v8, a0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsll_vx_nxv8i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vsll.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1743,7 +1743,7 @@ define @vsll_vi_nxv8i64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vi_nxv8i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 3 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 3, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp-mask.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp-mask.ll @@ -20,7 +20,7 @@ define @vsitofp_nxv2f16_nxv2i1_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f16_nxv2i1_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: vfcvt.f.x.v v8, v8 @@ -47,7 +47,7 @@ define @vsitofp_nxv2f32_nxv2i1_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f32_nxv2i1_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: vfcvt.f.x.v v8, v8 @@ -74,7 +74,7 @@ define @vsitofp_nxv2f64_nxv2i1_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f64_nxv2i1_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, -1, v0 ; CHECK-NEXT: vfcvt.f.x.v v8, v8 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll @@ -7,7 +7,7 @@ define @vsitofp_nxv2f16_nxv2i7( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f16_nxv2i7: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vsra.vi v9, v8, 1 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu @@ -33,7 +33,7 @@ define @vsitofp_nxv2f16_nxv2i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f16_nxv2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vfwcvt.f.x.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -56,7 +56,7 @@ define @vsitofp_nxv2f16_nxv2i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f16_nxv2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfcvt.f.x.v v8, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.sitofp.nxv2f16.nxv2i16( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) @@ -79,7 +79,7 @@ define @vsitofp_nxv2f16_nxv2i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f16_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.f.x.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -104,9 +104,9 @@ define @vsitofp_nxv2f16_nxv2i64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f16_nxv2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.f.x.w v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v8, v10 ; CHECK-NEXT: ret %v = call @llvm.vp.sitofp.nxv2f16.nxv2i64( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) @@ -129,7 +129,7 @@ define @vsitofp_nxv2f32_nxv2i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f32_nxv2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsext.vf2 v9, v8 ; CHECK-NEXT: vfwcvt.f.x.v v8, v9 ; CHECK-NEXT: ret @@ -153,7 +153,7 @@ define @vsitofp_nxv2f32_nxv2i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f32_nxv2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.x.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -176,7 +176,7 @@ define @vsitofp_nxv2f32_nxv2i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f32_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfcvt.f.x.v v8, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.sitofp.nxv2f32.nxv2i32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) @@ -199,7 +199,7 @@ define @vsitofp_nxv2f32_nxv2i64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f32_nxv2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.f.x.w v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -223,7 +223,7 @@ define @vsitofp_nxv2f64_nxv2i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f64_nxv2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsext.vf4 v10, v8 ; CHECK-NEXT: vfwcvt.f.x.v v8, v10 ; CHECK-NEXT: ret @@ -247,7 +247,7 @@ define @vsitofp_nxv2f64_nxv2i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f64_nxv2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsext.vf2 v10, v8 ; CHECK-NEXT: vfwcvt.f.x.v v8, v10 ; CHECK-NEXT: ret @@ -271,7 +271,7 @@ define @vsitofp_nxv2f64_nxv2i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f64_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfwcvt.f.x.v v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -294,7 +294,7 @@ define @vsitofp_nxv2f64_nxv2i64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2f64_nxv2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfcvt.f.x.v v8, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.sitofp.nxv2f64.nxv2i64( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) @@ -317,7 +317,7 @@ ; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a4, a1, 2 -; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma ; CHECK-NEXT: slli a1, a1, 1 ; CHECK-NEXT: sub a3, a0, a1 ; CHECK-NEXT: vslidedown.vx v0, v0, a4 @@ -354,7 +354,7 @@ ; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a4, a1, 2 -; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma ; CHECK-NEXT: slli a1, a1, 1 ; CHECK-NEXT: sub a3, a0, a1 ; CHECK-NEXT: vslidedown.vx v0, v0, a4 @@ -387,14 +387,14 @@ ; CHECK-NEXT: mv a2, a1 ; CHECK-NEXT: .LBB27_2: ; CHECK-NEXT: li a3, 0 -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: sub a1, a0, a1 ; CHECK-NEXT: vfcvt.f.x.v v8, v8 ; CHECK-NEXT: bltu a0, a1, .LBB27_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a3, a1 ; CHECK-NEXT: .LBB27_4: -; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; CHECK-NEXT: vfcvt.f.x.v v16, v16 ; CHECK-NEXT: ret %v = call @llvm.vp.sitofp.nxv32f32.nxv32i32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1down-constant-vl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1down-constant-vl-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vslide1down-constant-vl-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslide1down-constant-vl-rv32.ll @@ -20,7 +20,7 @@ define @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl1( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: vslide1down.vx v8, v8, a1 ; CHECK-NEXT: ret @@ -37,14 +37,14 @@ define @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2( %0, i64 %1) nounwind { ; CHECK-128-65536-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2: ; CHECK-128-65536: # %bb.0: # %entry -; CHECK-128-65536-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-128-65536-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-128-65536-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-128-65536-NEXT: vslide1down.vx v8, v8, a1 ; CHECK-128-65536-NEXT: ret ; ; CHECK-512-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2: ; CHECK-512: # %bb.0: # %entry -; CHECK-512-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-512-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-512-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-512-NEXT: vslide1down.vx v8, v8, a1 ; CHECK-512-NEXT: ret @@ -70,14 +70,14 @@ ; CHECK-128-65536: # %bb.0: # %entry ; CHECK-128-65536-NEXT: vsetivli a2, 3, e64, m1, ta, mu ; CHECK-128-65536-NEXT: slli a2, a2, 1 -; CHECK-128-65536-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-128-65536-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-128-65536-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-128-65536-NEXT: vslide1down.vx v8, v8, a1 ; CHECK-128-65536-NEXT: ret ; ; CHECK-512-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl3: ; CHECK-512: # %bb.0: # %entry -; CHECK-512-NEXT: vsetivli zero, 6, e32, m1, ta, mu +; CHECK-512-NEXT: vsetivli zero, 6, e32, m1, ta, ma ; CHECK-512-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-512-NEXT: vslide1down.vx v8, v8, a1 ; CHECK-512-NEXT: ret @@ -103,14 +103,14 @@ ; CHECK-128-65536: # %bb.0: # %entry ; CHECK-128-65536-NEXT: vsetivli a2, 8, e64, m1, ta, mu ; CHECK-128-65536-NEXT: slli a2, a2, 1 -; CHECK-128-65536-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-128-65536-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-128-65536-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-128-65536-NEXT: vslide1down.vx v8, v8, a1 ; CHECK-128-65536-NEXT: ret ; ; CHECK-512-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl8: ; CHECK-512: # %bb.0: # %entry -; CHECK-512-NEXT: vsetivli zero, 16, e32, m1, ta, mu +; CHECK-512-NEXT: vsetivli zero, 16, e32, m1, ta, ma ; CHECK-512-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-512-NEXT: vslide1down.vx v8, v8, a1 ; CHECK-512-NEXT: ret @@ -136,7 +136,7 @@ ; CHECK-128-65536: # %bb.0: # %entry ; CHECK-128-65536-NEXT: vsetivli a2, 9, e64, m1, ta, mu ; CHECK-128-65536-NEXT: slli a2, a2, 1 -; CHECK-128-65536-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-128-65536-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-128-65536-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-128-65536-NEXT: vslide1down.vx v8, v8, a1 ; CHECK-128-65536-NEXT: ret @@ -145,7 +145,7 @@ ; CHECK-512: # %bb.0: # %entry ; CHECK-512-NEXT: vsetivli a2, 9, e64, m1, ta, mu ; CHECK-512-NEXT: slli a2, a2, 1 -; CHECK-512-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-512-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-512-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-512-NEXT: vslide1down.vx v8, v8, a1 ; CHECK-512-NEXT: ret @@ -171,7 +171,7 @@ ; CHECK-128-65536: # %bb.0: # %entry ; CHECK-128-65536-NEXT: vsetivli a2, 15, e64, m1, ta, mu ; CHECK-128-65536-NEXT: slli a2, a2, 1 -; CHECK-128-65536-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-128-65536-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-128-65536-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-128-65536-NEXT: vslide1down.vx v8, v8, a1 ; CHECK-128-65536-NEXT: ret @@ -180,7 +180,7 @@ ; CHECK-512: # %bb.0: # %entry ; CHECK-512-NEXT: vsetivli a2, 15, e64, m1, ta, mu ; CHECK-512-NEXT: slli a2, a2, 1 -; CHECK-512-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-512-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-512-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-512-NEXT: vslide1down.vx v8, v8, a1 ; CHECK-512-NEXT: ret @@ -206,7 +206,7 @@ ; CHECK-128-65536: # %bb.0: # %entry ; CHECK-128-65536-NEXT: vsetivli a2, 16, e64, m1, ta, mu ; CHECK-128-65536-NEXT: slli a2, a2, 1 -; CHECK-128-65536-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-128-65536-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-128-65536-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-128-65536-NEXT: vslide1down.vx v8, v8, a1 ; CHECK-128-65536-NEXT: ret @@ -240,7 +240,7 @@ ; CHECK-128-65536-NEXT: li a2, 2047 ; CHECK-128-65536-NEXT: vsetvli a2, a2, e64, m1, ta, mu ; CHECK-128-65536-NEXT: slli a2, a2, 1 -; CHECK-128-65536-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-128-65536-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-128-65536-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-128-65536-NEXT: vslide1down.vx v8, v8, a1 ; CHECK-128-65536-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vslide1down_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vslide1down_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vslide1down_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vslide1down_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vslide1down_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vslide1down_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -292,7 +292,7 @@ define @intrinsic_vslide1down_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -339,7 +339,7 @@ define @intrinsic_vslide1down_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -386,7 +386,7 @@ define @intrinsic_vslide1down_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -433,7 +433,7 @@ define @intrinsic_vslide1down_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -480,7 +480,7 @@ define @intrinsic_vslide1down_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -527,7 +527,7 @@ define @intrinsic_vslide1down_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -574,7 +574,7 @@ define @intrinsic_vslide1down_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -621,7 +621,7 @@ define @intrinsic_vslide1down_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -668,7 +668,7 @@ define @intrinsic_vslide1down_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -715,7 +715,7 @@ define @intrinsic_vslide1down_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -762,7 +762,7 @@ define @intrinsic_vslide1down_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -809,7 +809,7 @@ define @intrinsic_vslide1down_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -858,7 +858,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, a2, e64, m1, ta, mu ; CHECK-NEXT: slli a2, a2, 1 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: vslide1down.vx v8, v8, a1 ; CHECK-NEXT: ret @@ -885,10 +885,10 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, a2, e64, m1, ta, mu ; CHECK-NEXT: slli a3, a3, 1 -; CHECK-NEXT: vsetvli zero, a3, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e32, m1, ta, ma ; CHECK-NEXT: vslide1down.vx v9, v9, a0 ; CHECK-NEXT: vslide1down.vx v9, v9, a1 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -913,7 +913,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, a2, e64, m2, ta, mu ; CHECK-NEXT: slli a2, a2, 1 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: vslide1down.vx v8, v8, a1 ; CHECK-NEXT: ret @@ -940,10 +940,10 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, a2, e64, m2, ta, mu ; CHECK-NEXT: slli a3, a3, 1 -; CHECK-NEXT: vsetvli zero, a3, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e32, m2, ta, ma ; CHECK-NEXT: vslide1down.vx v10, v10, a0 ; CHECK-NEXT: vslide1down.vx v10, v10, a1 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0 ; CHECK-NEXT: ret entry: @@ -968,7 +968,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, a2, e64, m4, ta, mu ; CHECK-NEXT: slli a2, a2, 1 -; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: vslide1down.vx v8, v8, a1 ; CHECK-NEXT: ret @@ -995,10 +995,10 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, a2, e64, m4, ta, mu ; CHECK-NEXT: slli a3, a3, 1 -; CHECK-NEXT: vsetvli zero, a3, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e32, m4, ta, ma ; CHECK-NEXT: vslide1down.vx v12, v12, a0 ; CHECK-NEXT: vslide1down.vx v12, v12, a1 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0 ; CHECK-NEXT: ret entry: @@ -1023,7 +1023,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, a2, e64, m8, ta, mu ; CHECK-NEXT: slli a2, a2, 1 -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: vslide1down.vx v8, v8, a1 ; CHECK-NEXT: ret @@ -1050,10 +1050,10 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, a2, e64, m8, ta, mu ; CHECK-NEXT: slli a3, a3, 1 -; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; CHECK-NEXT: vslide1down.vx v16, v16, a0 ; CHECK-NEXT: vslide1down.vx v16, v16, a1 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vslide1down_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vslide1down_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vslide1down_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vslide1down_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vslide1down_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vslide1down_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -292,7 +292,7 @@ define @intrinsic_vslide1down_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -339,7 +339,7 @@ define @intrinsic_vslide1down_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -386,7 +386,7 @@ define @intrinsic_vslide1down_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -433,7 +433,7 @@ define @intrinsic_vslide1down_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -480,7 +480,7 @@ define @intrinsic_vslide1down_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -527,7 +527,7 @@ define @intrinsic_vslide1down_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -574,7 +574,7 @@ define @intrinsic_vslide1down_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -621,7 +621,7 @@ define @intrinsic_vslide1down_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -668,7 +668,7 @@ define @intrinsic_vslide1down_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -715,7 +715,7 @@ define @intrinsic_vslide1down_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -762,7 +762,7 @@ define @intrinsic_vslide1down_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -809,7 +809,7 @@ define @intrinsic_vslide1down_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -856,7 +856,7 @@ define @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -903,7 +903,7 @@ define @intrinsic_vslide1down_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -950,7 +950,7 @@ define @intrinsic_vslide1down_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -997,7 +997,7 @@ define @intrinsic_vslide1down_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1up-constant-vl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1up-constant-vl-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vslide1up-constant-vl-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslide1up-constant-vl-rv32.ll @@ -20,7 +20,7 @@ define @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl1( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; CHECK-NEXT: vslide1up.vx v9, v8, a1 ; CHECK-NEXT: vslide1up.vx v8, v9, a0 ; CHECK-NEXT: ret @@ -37,14 +37,14 @@ define @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl2( %0, i64 %1) nounwind { ; CHECK-128-65536-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl2: ; CHECK-128-65536: # %bb.0: # %entry -; CHECK-128-65536-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-128-65536-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-128-65536-NEXT: vslide1up.vx v9, v8, a1 ; CHECK-128-65536-NEXT: vslide1up.vx v8, v9, a0 ; CHECK-128-65536-NEXT: ret ; ; CHECK-512-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl2: ; CHECK-512: # %bb.0: # %entry -; CHECK-512-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-512-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-512-NEXT: vslide1up.vx v9, v8, a1 ; CHECK-512-NEXT: vslide1up.vx v8, v9, a0 ; CHECK-512-NEXT: ret @@ -70,14 +70,14 @@ ; CHECK-128-65536: # %bb.0: # %entry ; CHECK-128-65536-NEXT: vsetivli a2, 3, e64, m1, ta, mu ; CHECK-128-65536-NEXT: slli a2, a2, 1 -; CHECK-128-65536-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-128-65536-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-128-65536-NEXT: vslide1up.vx v9, v8, a1 ; CHECK-128-65536-NEXT: vslide1up.vx v8, v9, a0 ; CHECK-128-65536-NEXT: ret ; ; CHECK-512-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl3: ; CHECK-512: # %bb.0: # %entry -; CHECK-512-NEXT: vsetivli zero, 6, e32, m1, ta, mu +; CHECK-512-NEXT: vsetivli zero, 6, e32, m1, ta, ma ; CHECK-512-NEXT: vslide1up.vx v9, v8, a1 ; CHECK-512-NEXT: vslide1up.vx v8, v9, a0 ; CHECK-512-NEXT: ret @@ -103,14 +103,14 @@ ; CHECK-128-65536: # %bb.0: # %entry ; CHECK-128-65536-NEXT: vsetivli a2, 8, e64, m1, ta, mu ; CHECK-128-65536-NEXT: slli a2, a2, 1 -; CHECK-128-65536-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-128-65536-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-128-65536-NEXT: vslide1up.vx v9, v8, a1 ; CHECK-128-65536-NEXT: vslide1up.vx v8, v9, a0 ; CHECK-128-65536-NEXT: ret ; ; CHECK-512-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl8: ; CHECK-512: # %bb.0: # %entry -; CHECK-512-NEXT: vsetivli zero, 16, e32, m1, ta, mu +; CHECK-512-NEXT: vsetivli zero, 16, e32, m1, ta, ma ; CHECK-512-NEXT: vslide1up.vx v9, v8, a1 ; CHECK-512-NEXT: vslide1up.vx v8, v9, a0 ; CHECK-512-NEXT: ret @@ -136,7 +136,7 @@ ; CHECK-128-65536: # %bb.0: # %entry ; CHECK-128-65536-NEXT: vsetivli a2, 9, e64, m1, ta, mu ; CHECK-128-65536-NEXT: slli a2, a2, 1 -; CHECK-128-65536-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-128-65536-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-128-65536-NEXT: vslide1up.vx v9, v8, a1 ; CHECK-128-65536-NEXT: vslide1up.vx v8, v9, a0 ; CHECK-128-65536-NEXT: ret @@ -145,7 +145,7 @@ ; CHECK-512: # %bb.0: # %entry ; CHECK-512-NEXT: vsetivli a2, 9, e64, m1, ta, mu ; CHECK-512-NEXT: slli a2, a2, 1 -; CHECK-512-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-512-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-512-NEXT: vslide1up.vx v9, v8, a1 ; CHECK-512-NEXT: vslide1up.vx v8, v9, a0 ; CHECK-512-NEXT: ret @@ -171,7 +171,7 @@ ; CHECK-128-65536: # %bb.0: # %entry ; CHECK-128-65536-NEXT: vsetivli a2, 15, e64, m1, ta, mu ; CHECK-128-65536-NEXT: slli a2, a2, 1 -; CHECK-128-65536-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-128-65536-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-128-65536-NEXT: vslide1up.vx v9, v8, a1 ; CHECK-128-65536-NEXT: vslide1up.vx v8, v9, a0 ; CHECK-128-65536-NEXT: ret @@ -180,7 +180,7 @@ ; CHECK-512: # %bb.0: # %entry ; CHECK-512-NEXT: vsetivli a2, 15, e64, m1, ta, mu ; CHECK-512-NEXT: slli a2, a2, 1 -; CHECK-512-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-512-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-512-NEXT: vslide1up.vx v9, v8, a1 ; CHECK-512-NEXT: vslide1up.vx v8, v9, a0 ; CHECK-512-NEXT: ret @@ -206,7 +206,7 @@ ; CHECK-128-65536: # %bb.0: # %entry ; CHECK-128-65536-NEXT: vsetivli a2, 16, e64, m1, ta, mu ; CHECK-128-65536-NEXT: slli a2, a2, 1 -; CHECK-128-65536-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-128-65536-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-128-65536-NEXT: vslide1up.vx v9, v8, a1 ; CHECK-128-65536-NEXT: vslide1up.vx v8, v9, a0 ; CHECK-128-65536-NEXT: ret @@ -240,7 +240,7 @@ ; CHECK-128-65536-NEXT: li a2, 2047 ; CHECK-128-65536-NEXT: vsetvli a2, a2, e64, m1, ta, mu ; CHECK-128-65536-NEXT: slli a2, a2, 1 -; CHECK-128-65536-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-128-65536-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-128-65536-NEXT: vslide1up.vx v9, v8, a1 ; CHECK-128-65536-NEXT: vslide1up.vx v8, v9, a0 ; CHECK-128-65536-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vslide1up_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vslide1up.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -58,7 +58,7 @@ define @intrinsic_vslide1up_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vslide1up.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -106,7 +106,7 @@ define @intrinsic_vslide1up_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vslide1up.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -154,7 +154,7 @@ define @intrinsic_vslide1up_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vslide1up.vx v9, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -202,7 +202,7 @@ define @intrinsic_vslide1up_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vslide1up.vx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -250,7 +250,7 @@ define @intrinsic_vslide1up_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vslide1up.vx v12, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -298,7 +298,7 @@ define @intrinsic_vslide1up_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vslide1up.vx v16, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -346,7 +346,7 @@ define @intrinsic_vslide1up_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vslide1up.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -394,7 +394,7 @@ define @intrinsic_vslide1up_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vslide1up.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -442,7 +442,7 @@ define @intrinsic_vslide1up_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vslide1up.vx v9, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -490,7 +490,7 @@ define @intrinsic_vslide1up_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vslide1up.vx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -538,7 +538,7 @@ define @intrinsic_vslide1up_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vslide1up.vx v12, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -586,7 +586,7 @@ define @intrinsic_vslide1up_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vslide1up.vx v16, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -634,7 +634,7 @@ define @intrinsic_vslide1up_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vslide1up.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -682,7 +682,7 @@ define @intrinsic_vslide1up_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vslide1up.vx v9, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -730,7 +730,7 @@ define @intrinsic_vslide1up_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vslide1up.vx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -778,7 +778,7 @@ define @intrinsic_vslide1up_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vslide1up.vx v12, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -826,7 +826,7 @@ define @intrinsic_vslide1up_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vslide1up.vx v16, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -876,7 +876,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, a2, e64, m1, ta, mu ; CHECK-NEXT: slli a2, a2, 1 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vslide1up.vx v9, v8, a1 ; CHECK-NEXT: vslide1up.vx v8, v9, a0 ; CHECK-NEXT: ret @@ -903,10 +903,10 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, a2, e64, m1, ta, mu ; CHECK-NEXT: slli a3, a3, 1 -; CHECK-NEXT: vsetvli zero, a3, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e32, m1, ta, ma ; CHECK-NEXT: vslide1up.vx v10, v9, a1 ; CHECK-NEXT: vslide1up.vx v9, v10, a0 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0 ; CHECK-NEXT: ret entry: @@ -931,7 +931,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, a2, e64, m2, ta, mu ; CHECK-NEXT: slli a2, a2, 1 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vslide1up.vx v10, v8, a1 ; CHECK-NEXT: vslide1up.vx v8, v10, a0 ; CHECK-NEXT: ret @@ -958,10 +958,10 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, a2, e64, m2, ta, mu ; CHECK-NEXT: slli a3, a3, 1 -; CHECK-NEXT: vsetvli zero, a3, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e32, m2, ta, ma ; CHECK-NEXT: vslide1up.vx v12, v10, a1 ; CHECK-NEXT: vslide1up.vx v10, v12, a0 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0 ; CHECK-NEXT: ret entry: @@ -986,7 +986,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, a2, e64, m4, ta, mu ; CHECK-NEXT: slli a2, a2, 1 -; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma ; CHECK-NEXT: vslide1up.vx v12, v8, a1 ; CHECK-NEXT: vslide1up.vx v8, v12, a0 ; CHECK-NEXT: ret @@ -1013,10 +1013,10 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, a2, e64, m4, ta, mu ; CHECK-NEXT: slli a3, a3, 1 -; CHECK-NEXT: vsetvli zero, a3, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e32, m4, ta, ma ; CHECK-NEXT: vslide1up.vx v16, v12, a1 ; CHECK-NEXT: vslide1up.vx v12, v16, a0 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0 ; CHECK-NEXT: ret entry: @@ -1041,7 +1041,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a2, a2, e64, m8, ta, mu ; CHECK-NEXT: slli a2, a2, 1 -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: vslide1up.vx v16, v8, a1 ; CHECK-NEXT: vslide1up.vx v8, v16, a0 ; CHECK-NEXT: ret @@ -1068,10 +1068,10 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli a3, a2, e64, m8, ta, mu ; CHECK-NEXT: slli a3, a3, 1 -; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; CHECK-NEXT: vslide1up.vx v24, v16, a1 ; CHECK-NEXT: vslide1up.vx v16, v24, a0 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vslide1up_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vslide1up.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -58,7 +58,7 @@ define @intrinsic_vslide1up_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vslide1up.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -106,7 +106,7 @@ define @intrinsic_vslide1up_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vslide1up.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -154,7 +154,7 @@ define @intrinsic_vslide1up_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vslide1up.vx v9, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -202,7 +202,7 @@ define @intrinsic_vslide1up_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vslide1up.vx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -250,7 +250,7 @@ define @intrinsic_vslide1up_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vslide1up.vx v12, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -298,7 +298,7 @@ define @intrinsic_vslide1up_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vslide1up.vx v16, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -346,7 +346,7 @@ define @intrinsic_vslide1up_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vslide1up.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -394,7 +394,7 @@ define @intrinsic_vslide1up_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vslide1up.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -442,7 +442,7 @@ define @intrinsic_vslide1up_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vslide1up.vx v9, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -490,7 +490,7 @@ define @intrinsic_vslide1up_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vslide1up.vx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -538,7 +538,7 @@ define @intrinsic_vslide1up_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vslide1up.vx v12, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -586,7 +586,7 @@ define @intrinsic_vslide1up_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vslide1up.vx v16, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -634,7 +634,7 @@ define @intrinsic_vslide1up_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vslide1up.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -682,7 +682,7 @@ define @intrinsic_vslide1up_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vslide1up.vx v9, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -730,7 +730,7 @@ define @intrinsic_vslide1up_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vslide1up.vx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -778,7 +778,7 @@ define @intrinsic_vslide1up_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vslide1up.vx v12, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -826,7 +826,7 @@ define @intrinsic_vslide1up_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vslide1up.vx v16, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -874,7 +874,7 @@ define @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vslide1up.vx v9, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -922,7 +922,7 @@ define @intrinsic_vslide1up_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vslide1up.vx v10, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -970,7 +970,7 @@ define @intrinsic_vslide1up_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vslide1up.vx v12, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1018,7 +1018,7 @@ define @intrinsic_vslide1up_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vslide1up.vx v16, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll @@ -12,7 +12,7 @@ define @intrinsic_vslidedown_vx_nxv1i8_nxv1i8( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -53,7 +53,7 @@ define @intrinsic_vslidedown_vi_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -95,7 +95,7 @@ define @intrinsic_vslidedown_vx_nxv2i8_nxv2i8( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -136,7 +136,7 @@ define @intrinsic_vslidedown_vi_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -178,7 +178,7 @@ define @intrinsic_vslidedown_vx_nxv4i8_nxv4i8( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -219,7 +219,7 @@ define @intrinsic_vslidedown_vi_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -261,7 +261,7 @@ define @intrinsic_vslidedown_vx_nxv8i8_nxv8i8( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -302,7 +302,7 @@ define @intrinsic_vslidedown_vi_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -344,7 +344,7 @@ define @intrinsic_vslidedown_vx_nxv16i8_nxv16i8( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -385,7 +385,7 @@ define @intrinsic_vslidedown_vi_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -427,7 +427,7 @@ define @intrinsic_vslidedown_vx_nxv32i8_nxv32i8( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -468,7 +468,7 @@ define @intrinsic_vslidedown_vi_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -510,7 +510,7 @@ define @intrinsic_vslidedown_vx_nxv1i16_nxv1i16( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -551,7 +551,7 @@ define @intrinsic_vslidedown_vi_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -593,7 +593,7 @@ define @intrinsic_vslidedown_vx_nxv2i16_nxv2i16( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -634,7 +634,7 @@ define @intrinsic_vslidedown_vi_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -676,7 +676,7 @@ define @intrinsic_vslidedown_vx_nxv4i16_nxv4i16( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -717,7 +717,7 @@ define @intrinsic_vslidedown_vi_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -759,7 +759,7 @@ define @intrinsic_vslidedown_vx_nxv8i16_nxv8i16( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -800,7 +800,7 @@ define @intrinsic_vslidedown_vi_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -842,7 +842,7 @@ define @intrinsic_vslidedown_vx_nxv16i16_nxv16i16( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -883,7 +883,7 @@ define @intrinsic_vslidedown_vi_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -925,7 +925,7 @@ define @intrinsic_vslidedown_vx_nxv1i32_nxv1i32( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -966,7 +966,7 @@ define @intrinsic_vslidedown_vi_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1008,7 +1008,7 @@ define @intrinsic_vslidedown_vx_nxv2i32_nxv2i32( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -1049,7 +1049,7 @@ define @intrinsic_vslidedown_vi_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1091,7 +1091,7 @@ define @intrinsic_vslidedown_vx_nxv4i32_nxv4i32( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -1132,7 +1132,7 @@ define @intrinsic_vslidedown_vi_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -1174,7 +1174,7 @@ define @intrinsic_vslidedown_vx_nxv8i32_nxv8i32( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -1215,7 +1215,7 @@ define @intrinsic_vslidedown_vi_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -1257,7 +1257,7 @@ define @intrinsic_vslidedown_vx_nxv1i64_nxv1i64( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -1298,7 +1298,7 @@ define @intrinsic_vslidedown_vi_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1340,7 +1340,7 @@ define @intrinsic_vslidedown_vx_nxv2i64_nxv2i64( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -1381,7 +1381,7 @@ define @intrinsic_vslidedown_vi_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -1423,7 +1423,7 @@ define @intrinsic_vslidedown_vx_nxv4i64_nxv4i64( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -1464,7 +1464,7 @@ define @intrinsic_vslidedown_vi_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -1506,7 +1506,7 @@ define @intrinsic_vslidedown_vx_nxv1f16_nxv1f16( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -1547,7 +1547,7 @@ define @intrinsic_vslidedown_vi_nxv1f16_nxv1f16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1589,7 +1589,7 @@ define @intrinsic_vslidedown_vx_nxv2f16_nxv2f16( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -1630,7 +1630,7 @@ define @intrinsic_vslidedown_vi_nxv2f16_nxv2f16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1672,7 +1672,7 @@ define @intrinsic_vslidedown_vx_nxv4f16_nxv4f16( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -1713,7 +1713,7 @@ define @intrinsic_vslidedown_vi_nxv4f16_nxv4f16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1755,7 +1755,7 @@ define @intrinsic_vslidedown_vx_nxv8f16_nxv8f16( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -1796,7 +1796,7 @@ define @intrinsic_vslidedown_vi_nxv8f16_nxv8f16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -1838,7 +1838,7 @@ define @intrinsic_vslidedown_vx_nxv16f16_nxv16f16( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -1879,7 +1879,7 @@ define @intrinsic_vslidedown_vi_nxv16f16_nxv16f16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -1921,7 +1921,7 @@ define @intrinsic_vslidedown_vx_nxv1f32_nxv1f32( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -1962,7 +1962,7 @@ define @intrinsic_vslidedown_vi_nxv1f32_nxv1f32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -2004,7 +2004,7 @@ define @intrinsic_vslidedown_vx_nxv2f32_nxv2f32( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -2045,7 +2045,7 @@ define @intrinsic_vslidedown_vi_nxv2f32_nxv2f32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -2087,7 +2087,7 @@ define @intrinsic_vslidedown_vx_nxv4f32_nxv4f32( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -2128,7 +2128,7 @@ define @intrinsic_vslidedown_vi_nxv4f32_nxv4f32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -2170,7 +2170,7 @@ define @intrinsic_vslidedown_vx_nxv8f32_nxv8f32( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -2211,7 +2211,7 @@ define @intrinsic_vslidedown_vi_nxv8f32_nxv8f32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -2253,7 +2253,7 @@ define @intrinsic_vslidedown_vx_nxv1f64_nxv1f64( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -2294,7 +2294,7 @@ define @intrinsic_vslidedown_vi_nxv1f64_nxv1f64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -2336,7 +2336,7 @@ define @intrinsic_vslidedown_vx_nxv2f64_nxv2f64( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -2377,7 +2377,7 @@ define @intrinsic_vslidedown_vi_nxv2f64_nxv2f64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -2419,7 +2419,7 @@ define @intrinsic_vslidedown_vx_nxv4f64_nxv4f64( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -2460,7 +2460,7 @@ define @intrinsic_vslidedown_vi_nxv4f64_nxv4f64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v12, 9 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll @@ -12,7 +12,7 @@ define @intrinsic_vslidedown_vx_nxv1i8_nxv1i8( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -53,7 +53,7 @@ define @intrinsic_vslidedown_vi_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -95,7 +95,7 @@ define @intrinsic_vslidedown_vx_nxv2i8_nxv2i8( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -136,7 +136,7 @@ define @intrinsic_vslidedown_vi_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -178,7 +178,7 @@ define @intrinsic_vslidedown_vx_nxv4i8_nxv4i8( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -219,7 +219,7 @@ define @intrinsic_vslidedown_vi_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -261,7 +261,7 @@ define @intrinsic_vslidedown_vx_nxv8i8_nxv8i8( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -302,7 +302,7 @@ define @intrinsic_vslidedown_vi_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -344,7 +344,7 @@ define @intrinsic_vslidedown_vx_nxv16i8_nxv16i8( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -385,7 +385,7 @@ define @intrinsic_vslidedown_vi_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -427,7 +427,7 @@ define @intrinsic_vslidedown_vx_nxv32i8_nxv32i8( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -468,7 +468,7 @@ define @intrinsic_vslidedown_vi_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -510,7 +510,7 @@ define @intrinsic_vslidedown_vx_nxv1i16_nxv1i16( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -551,7 +551,7 @@ define @intrinsic_vslidedown_vi_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -593,7 +593,7 @@ define @intrinsic_vslidedown_vx_nxv2i16_nxv2i16( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -634,7 +634,7 @@ define @intrinsic_vslidedown_vi_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -676,7 +676,7 @@ define @intrinsic_vslidedown_vx_nxv4i16_nxv4i16( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -717,7 +717,7 @@ define @intrinsic_vslidedown_vi_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -759,7 +759,7 @@ define @intrinsic_vslidedown_vx_nxv8i16_nxv8i16( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -800,7 +800,7 @@ define @intrinsic_vslidedown_vi_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -842,7 +842,7 @@ define @intrinsic_vslidedown_vx_nxv16i16_nxv16i16( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -883,7 +883,7 @@ define @intrinsic_vslidedown_vi_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -925,7 +925,7 @@ define @intrinsic_vslidedown_vx_nxv1i32_nxv1i32( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -966,7 +966,7 @@ define @intrinsic_vslidedown_vi_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1008,7 +1008,7 @@ define @intrinsic_vslidedown_vx_nxv2i32_nxv2i32( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -1049,7 +1049,7 @@ define @intrinsic_vslidedown_vi_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1091,7 +1091,7 @@ define @intrinsic_vslidedown_vx_nxv4i32_nxv4i32( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -1132,7 +1132,7 @@ define @intrinsic_vslidedown_vi_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -1174,7 +1174,7 @@ define @intrinsic_vslidedown_vx_nxv8i32_nxv8i32( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -1215,7 +1215,7 @@ define @intrinsic_vslidedown_vi_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -1257,7 +1257,7 @@ define @intrinsic_vslidedown_vx_nxv1i64_nxv1i64( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -1298,7 +1298,7 @@ define @intrinsic_vslidedown_vi_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1340,7 +1340,7 @@ define @intrinsic_vslidedown_vx_nxv2i64_nxv2i64( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -1381,7 +1381,7 @@ define @intrinsic_vslidedown_vi_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -1423,7 +1423,7 @@ define @intrinsic_vslidedown_vx_nxv4i64_nxv4i64( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -1464,7 +1464,7 @@ define @intrinsic_vslidedown_vi_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -1506,7 +1506,7 @@ define @intrinsic_vslidedown_vx_nxv1f16_nxv1f16( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -1547,7 +1547,7 @@ define @intrinsic_vslidedown_vi_nxv1f16_nxv1f16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1589,7 +1589,7 @@ define @intrinsic_vslidedown_vx_nxv2f16_nxv2f16( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -1630,7 +1630,7 @@ define @intrinsic_vslidedown_vi_nxv2f16_nxv2f16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1672,7 +1672,7 @@ define @intrinsic_vslidedown_vx_nxv4f16_nxv4f16( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -1713,7 +1713,7 @@ define @intrinsic_vslidedown_vi_nxv4f16_nxv4f16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1755,7 +1755,7 @@ define @intrinsic_vslidedown_vx_nxv8f16_nxv8f16( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -1796,7 +1796,7 @@ define @intrinsic_vslidedown_vi_nxv8f16_nxv8f16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -1838,7 +1838,7 @@ define @intrinsic_vslidedown_vx_nxv16f16_nxv16f16( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -1879,7 +1879,7 @@ define @intrinsic_vslidedown_vi_nxv16f16_nxv16f16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -1921,7 +1921,7 @@ define @intrinsic_vslidedown_vx_nxv1f32_nxv1f32( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -1962,7 +1962,7 @@ define @intrinsic_vslidedown_vi_nxv1f32_nxv1f32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -2004,7 +2004,7 @@ define @intrinsic_vslidedown_vx_nxv2f32_nxv2f32( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -2045,7 +2045,7 @@ define @intrinsic_vslidedown_vi_nxv2f32_nxv2f32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -2087,7 +2087,7 @@ define @intrinsic_vslidedown_vx_nxv4f32_nxv4f32( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -2128,7 +2128,7 @@ define @intrinsic_vslidedown_vi_nxv4f32_nxv4f32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -2170,7 +2170,7 @@ define @intrinsic_vslidedown_vx_nxv8f32_nxv8f32( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -2211,7 +2211,7 @@ define @intrinsic_vslidedown_vi_nxv8f32_nxv8f32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -2253,7 +2253,7 @@ define @intrinsic_vslidedown_vx_nxv1f64_nxv1f64( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -2294,7 +2294,7 @@ define @intrinsic_vslidedown_vi_nxv1f64_nxv1f64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -2336,7 +2336,7 @@ define @intrinsic_vslidedown_vx_nxv2f64_nxv2f64( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -2377,7 +2377,7 @@ define @intrinsic_vslidedown_vi_nxv2f64_nxv2f64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -2419,7 +2419,7 @@ define @intrinsic_vslidedown_vx_nxv4f64_nxv4f64( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -2460,7 +2460,7 @@ define @intrinsic_vslidedown_vi_nxv4f64_nxv4f64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v12, 9 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll @@ -12,7 +12,7 @@ define @intrinsic_vslideup_vx_nxv1i8_nxv1i8( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -53,7 +53,7 @@ define @intrinsic_vslideup_vi_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -95,7 +95,7 @@ define @intrinsic_vslideup_vx_nxv2i8_nxv2i8( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -136,7 +136,7 @@ define @intrinsic_vslideup_vi_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -178,7 +178,7 @@ define @intrinsic_vslideup_vx_nxv4i8_nxv4i8( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -219,7 +219,7 @@ define @intrinsic_vslideup_vi_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -261,7 +261,7 @@ define @intrinsic_vslideup_vx_nxv8i8_nxv8i8( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -302,7 +302,7 @@ define @intrinsic_vslideup_vi_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -344,7 +344,7 @@ define @intrinsic_vslideup_vx_nxv16i8_nxv16i8( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -385,7 +385,7 @@ define @intrinsic_vslideup_vi_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vslideup.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -427,7 +427,7 @@ define @intrinsic_vslideup_vx_nxv32i8_nxv32i8( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -468,7 +468,7 @@ define @intrinsic_vslideup_vi_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vslideup.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -510,7 +510,7 @@ define @intrinsic_vslideup_vx_nxv1i16_nxv1i16( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -551,7 +551,7 @@ define @intrinsic_vslideup_vi_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -593,7 +593,7 @@ define @intrinsic_vslideup_vx_nxv2i16_nxv2i16( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -634,7 +634,7 @@ define @intrinsic_vslideup_vi_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -676,7 +676,7 @@ define @intrinsic_vslideup_vx_nxv4i16_nxv4i16( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -717,7 +717,7 @@ define @intrinsic_vslideup_vi_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -759,7 +759,7 @@ define @intrinsic_vslideup_vx_nxv8i16_nxv8i16( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -800,7 +800,7 @@ define @intrinsic_vslideup_vi_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vslideup.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -842,7 +842,7 @@ define @intrinsic_vslideup_vx_nxv16i16_nxv16i16( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -883,7 +883,7 @@ define @intrinsic_vslideup_vi_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vslideup.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -925,7 +925,7 @@ define @intrinsic_vslideup_vx_nxv1i32_nxv1i32( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -966,7 +966,7 @@ define @intrinsic_vslideup_vi_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1008,7 +1008,7 @@ define @intrinsic_vslideup_vx_nxv2i32_nxv2i32( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -1049,7 +1049,7 @@ define @intrinsic_vslideup_vi_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1091,7 +1091,7 @@ define @intrinsic_vslideup_vx_nxv4i32_nxv4i32( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -1132,7 +1132,7 @@ define @intrinsic_vslideup_vi_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vslideup.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -1174,7 +1174,7 @@ define @intrinsic_vslideup_vx_nxv8i32_nxv8i32( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -1215,7 +1215,7 @@ define @intrinsic_vslideup_vi_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vslideup.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -1257,7 +1257,7 @@ define @intrinsic_vslideup_vx_nxv1i64_nxv1i64( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -1298,7 +1298,7 @@ define @intrinsic_vslideup_vi_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1340,7 +1340,7 @@ define @intrinsic_vslideup_vx_nxv2i64_nxv2i64( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -1381,7 +1381,7 @@ define @intrinsic_vslideup_vi_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vslideup.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -1423,7 +1423,7 @@ define @intrinsic_vslideup_vx_nxv4i64_nxv4i64( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -1464,7 +1464,7 @@ define @intrinsic_vslideup_vi_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vslideup.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -1506,7 +1506,7 @@ define @intrinsic_vslideup_vx_nxv1f16_nxv1f16( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -1547,7 +1547,7 @@ define @intrinsic_vslideup_vi_nxv1f16_nxv1f16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1589,7 +1589,7 @@ define @intrinsic_vslideup_vx_nxv2f16_nxv2f16( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -1630,7 +1630,7 @@ define @intrinsic_vslideup_vi_nxv2f16_nxv2f16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1672,7 +1672,7 @@ define @intrinsic_vslideup_vx_nxv4f16_nxv4f16( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -1713,7 +1713,7 @@ define @intrinsic_vslideup_vi_nxv4f16_nxv4f16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1755,7 +1755,7 @@ define @intrinsic_vslideup_vx_nxv8f16_nxv8f16( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -1796,7 +1796,7 @@ define @intrinsic_vslideup_vi_nxv8f16_nxv8f16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vslideup.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -1838,7 +1838,7 @@ define @intrinsic_vslideup_vx_nxv16f16_nxv16f16( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -1879,7 +1879,7 @@ define @intrinsic_vslideup_vi_nxv16f16_nxv16f16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vslideup.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -1921,7 +1921,7 @@ define @intrinsic_vslideup_vx_nxv1f32_nxv1f32( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -1962,7 +1962,7 @@ define @intrinsic_vslideup_vi_nxv1f32_nxv1f32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -2004,7 +2004,7 @@ define @intrinsic_vslideup_vx_nxv2f32_nxv2f32( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -2045,7 +2045,7 @@ define @intrinsic_vslideup_vi_nxv2f32_nxv2f32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -2087,7 +2087,7 @@ define @intrinsic_vslideup_vx_nxv4f32_nxv4f32( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -2128,7 +2128,7 @@ define @intrinsic_vslideup_vi_nxv4f32_nxv4f32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vslideup.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -2170,7 +2170,7 @@ define @intrinsic_vslideup_vx_nxv8f32_nxv8f32( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -2211,7 +2211,7 @@ define @intrinsic_vslideup_vi_nxv8f32_nxv8f32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vslideup.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -2253,7 +2253,7 @@ define @intrinsic_vslideup_vx_nxv1f64_nxv1f64( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -2294,7 +2294,7 @@ define @intrinsic_vslideup_vi_nxv1f64_nxv1f64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -2336,7 +2336,7 @@ define @intrinsic_vslideup_vx_nxv2f64_nxv2f64( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -2377,7 +2377,7 @@ define @intrinsic_vslideup_vi_nxv2f64_nxv2f64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vslideup.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -2419,7 +2419,7 @@ define @intrinsic_vslideup_vx_nxv4f64_nxv4f64( %0, %1, i32 %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -2460,7 +2460,7 @@ define @intrinsic_vslideup_vi_nxv4f64_nxv4f64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vslideup.vi v8, v12, 9 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll @@ -12,7 +12,7 @@ define @intrinsic_vslideup_vx_nxv1i8_nxv1i8( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -53,7 +53,7 @@ define @intrinsic_vslideup_vi_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -95,7 +95,7 @@ define @intrinsic_vslideup_vx_nxv2i8_nxv2i8( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -136,7 +136,7 @@ define @intrinsic_vslideup_vi_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -178,7 +178,7 @@ define @intrinsic_vslideup_vx_nxv4i8_nxv4i8( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -219,7 +219,7 @@ define @intrinsic_vslideup_vi_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -261,7 +261,7 @@ define @intrinsic_vslideup_vx_nxv8i8_nxv8i8( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -302,7 +302,7 @@ define @intrinsic_vslideup_vi_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -344,7 +344,7 @@ define @intrinsic_vslideup_vx_nxv16i8_nxv16i8( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -385,7 +385,7 @@ define @intrinsic_vslideup_vi_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vslideup.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -427,7 +427,7 @@ define @intrinsic_vslideup_vx_nxv32i8_nxv32i8( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -468,7 +468,7 @@ define @intrinsic_vslideup_vi_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vslideup.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -510,7 +510,7 @@ define @intrinsic_vslideup_vx_nxv1i16_nxv1i16( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -551,7 +551,7 @@ define @intrinsic_vslideup_vi_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -593,7 +593,7 @@ define @intrinsic_vslideup_vx_nxv2i16_nxv2i16( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -634,7 +634,7 @@ define @intrinsic_vslideup_vi_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -676,7 +676,7 @@ define @intrinsic_vslideup_vx_nxv4i16_nxv4i16( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -717,7 +717,7 @@ define @intrinsic_vslideup_vi_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -759,7 +759,7 @@ define @intrinsic_vslideup_vx_nxv8i16_nxv8i16( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -800,7 +800,7 @@ define @intrinsic_vslideup_vi_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vslideup.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -842,7 +842,7 @@ define @intrinsic_vslideup_vx_nxv16i16_nxv16i16( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -883,7 +883,7 @@ define @intrinsic_vslideup_vi_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vslideup.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -925,7 +925,7 @@ define @intrinsic_vslideup_vx_nxv1i32_nxv1i32( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -966,7 +966,7 @@ define @intrinsic_vslideup_vi_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1008,7 +1008,7 @@ define @intrinsic_vslideup_vx_nxv2i32_nxv2i32( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -1049,7 +1049,7 @@ define @intrinsic_vslideup_vi_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1091,7 +1091,7 @@ define @intrinsic_vslideup_vx_nxv4i32_nxv4i32( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -1132,7 +1132,7 @@ define @intrinsic_vslideup_vi_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vslideup.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -1174,7 +1174,7 @@ define @intrinsic_vslideup_vx_nxv8i32_nxv8i32( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -1215,7 +1215,7 @@ define @intrinsic_vslideup_vi_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vslideup.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -1257,7 +1257,7 @@ define @intrinsic_vslideup_vx_nxv1i64_nxv1i64( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -1298,7 +1298,7 @@ define @intrinsic_vslideup_vi_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1340,7 +1340,7 @@ define @intrinsic_vslideup_vx_nxv2i64_nxv2i64( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -1381,7 +1381,7 @@ define @intrinsic_vslideup_vi_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vslideup.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -1423,7 +1423,7 @@ define @intrinsic_vslideup_vx_nxv4i64_nxv4i64( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -1464,7 +1464,7 @@ define @intrinsic_vslideup_vi_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vslideup.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -1506,7 +1506,7 @@ define @intrinsic_vslideup_vx_nxv1f16_nxv1f16( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -1547,7 +1547,7 @@ define @intrinsic_vslideup_vi_nxv1f16_nxv1f16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1589,7 +1589,7 @@ define @intrinsic_vslideup_vx_nxv2f16_nxv2f16( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -1630,7 +1630,7 @@ define @intrinsic_vslideup_vi_nxv2f16_nxv2f16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1672,7 +1672,7 @@ define @intrinsic_vslideup_vx_nxv4f16_nxv4f16( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -1713,7 +1713,7 @@ define @intrinsic_vslideup_vi_nxv4f16_nxv4f16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -1755,7 +1755,7 @@ define @intrinsic_vslideup_vx_nxv8f16_nxv8f16( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -1796,7 +1796,7 @@ define @intrinsic_vslideup_vi_nxv8f16_nxv8f16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vslideup.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -1838,7 +1838,7 @@ define @intrinsic_vslideup_vx_nxv16f16_nxv16f16( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -1879,7 +1879,7 @@ define @intrinsic_vslideup_vi_nxv16f16_nxv16f16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vslideup.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -1921,7 +1921,7 @@ define @intrinsic_vslideup_vx_nxv1f32_nxv1f32( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -1962,7 +1962,7 @@ define @intrinsic_vslideup_vi_nxv1f32_nxv1f32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -2004,7 +2004,7 @@ define @intrinsic_vslideup_vx_nxv2f32_nxv2f32( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -2045,7 +2045,7 @@ define @intrinsic_vslideup_vi_nxv2f32_nxv2f32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -2087,7 +2087,7 @@ define @intrinsic_vslideup_vx_nxv4f32_nxv4f32( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -2128,7 +2128,7 @@ define @intrinsic_vslideup_vi_nxv4f32_nxv4f32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vslideup.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -2170,7 +2170,7 @@ define @intrinsic_vslideup_vx_nxv8f32_nxv8f32( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -2211,7 +2211,7 @@ define @intrinsic_vslideup_vi_nxv8f32_nxv8f32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vslideup.vi v8, v12, 9 ; CHECK-NEXT: ret entry: @@ -2253,7 +2253,7 @@ define @intrinsic_vslideup_vx_nxv1f64_nxv1f64( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret entry: @@ -2294,7 +2294,7 @@ define @intrinsic_vslideup_vi_nxv1f64_nxv1f64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vslideup.vi v8, v9, 9 ; CHECK-NEXT: ret entry: @@ -2336,7 +2336,7 @@ define @intrinsic_vslideup_vx_nxv2f64_nxv2f64( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret entry: @@ -2377,7 +2377,7 @@ define @intrinsic_vslideup_vi_nxv2f64_nxv2f64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vslideup.vi v8, v10, 9 ; CHECK-NEXT: ret entry: @@ -2419,7 +2419,7 @@ define @intrinsic_vslideup_vx_nxv4f64_nxv4f64( %0, %1, i64 %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: ret entry: @@ -2460,7 +2460,7 @@ define @intrinsic_vslideup_vi_nxv4f64_nxv4f64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vslideup.vi v8, v12, 9 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vsll_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vsll_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vsll_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vsll_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vsll_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -292,7 +292,7 @@ define @intrinsic_vsll_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vsll_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vsll_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vsll_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vsll_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vsll_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vsll_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vsll_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vsll_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -717,7 +717,7 @@ define @intrinsic_vsll_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -764,7 +764,7 @@ define @intrinsic_vsll_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -811,7 +811,7 @@ define @intrinsic_vsll_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vsll_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vsll_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vsll_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vsll_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1048,7 +1048,7 @@ define @intrinsic_vsll_vx_nxv1i8_nxv1i8( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1095,7 +1095,7 @@ define @intrinsic_vsll_vx_nxv2i8_nxv2i8( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1142,7 +1142,7 @@ define @intrinsic_vsll_vx_nxv4i8_nxv4i8( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1189,7 +1189,7 @@ define @intrinsic_vsll_vx_nxv8i8_nxv8i8( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1236,7 +1236,7 @@ define @intrinsic_vsll_vx_nxv16i8_nxv16i8( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1283,7 +1283,7 @@ define @intrinsic_vsll_vx_nxv32i8_nxv32i8( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1330,7 +1330,7 @@ define @intrinsic_vsll_vx_nxv64i8_nxv64i8( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1377,7 +1377,7 @@ define @intrinsic_vsll_vx_nxv1i16_nxv1i16( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1424,7 +1424,7 @@ define @intrinsic_vsll_vx_nxv2i16_nxv2i16( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1471,7 +1471,7 @@ define @intrinsic_vsll_vx_nxv4i16_nxv4i16( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1518,7 +1518,7 @@ define @intrinsic_vsll_vx_nxv8i16_nxv8i16( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1565,7 +1565,7 @@ define @intrinsic_vsll_vx_nxv16i16_nxv16i16( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1612,7 +1612,7 @@ define @intrinsic_vsll_vx_nxv32i16_nxv32i16( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1659,7 +1659,7 @@ define @intrinsic_vsll_vx_nxv1i32_nxv1i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1706,7 +1706,7 @@ define @intrinsic_vsll_vx_nxv2i32_nxv2i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1753,7 +1753,7 @@ define @intrinsic_vsll_vx_nxv4i32_nxv4i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1800,7 +1800,7 @@ define @intrinsic_vsll_vx_nxv8i32_nxv8i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1847,7 +1847,7 @@ define @intrinsic_vsll_vx_nxv16i32_nxv16i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1894,7 +1894,7 @@ define @intrinsic_vsll_vx_nxv1i64_nxv1i64( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1941,7 +1941,7 @@ define @intrinsic_vsll_vx_nxv2i64_nxv2i64( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1988,7 +1988,7 @@ define @intrinsic_vsll_vx_nxv4i64_nxv4i64( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -2035,7 +2035,7 @@ define @intrinsic_vsll_vx_nxv8i64_nxv8i64( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -2076,7 +2076,7 @@ define @intrinsic_vsll_vi_nxv1i8_nxv1i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsll_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2092,7 +2092,7 @@ define @intrinsic_vsll_1_nxv1i8_nxv1i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsll_1_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: ret entry: @@ -2142,7 +2142,7 @@ define @intrinsic_vsll_vi_nxv2i8_nxv2i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsll_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2175,7 +2175,7 @@ define @intrinsic_vsll_vi_nxv4i8_nxv4i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsll_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2208,7 +2208,7 @@ define @intrinsic_vsll_vi_nxv8i8_nxv8i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsll_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2241,7 +2241,7 @@ define @intrinsic_vsll_vi_nxv16i8_nxv16i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsll_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2274,7 +2274,7 @@ define @intrinsic_vsll_vi_nxv32i8_nxv32i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsll_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2307,7 +2307,7 @@ define @intrinsic_vsll_vi_nxv64i8_nxv64i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsll_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2340,7 +2340,7 @@ define @intrinsic_vsll_vi_nxv1i16_nxv1i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsll_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2373,7 +2373,7 @@ define @intrinsic_vsll_vi_nxv2i16_nxv2i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsll_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2406,7 +2406,7 @@ define @intrinsic_vsll_vi_nxv4i16_nxv4i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsll_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2439,7 +2439,7 @@ define @intrinsic_vsll_vi_nxv8i16_nxv8i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsll_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2472,7 +2472,7 @@ define @intrinsic_vsll_vi_nxv16i16_nxv16i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsll_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2505,7 +2505,7 @@ define @intrinsic_vsll_vi_nxv32i16_nxv32i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsll_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2538,7 +2538,7 @@ define @intrinsic_vsll_vi_nxv1i32_nxv1i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsll_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2571,7 +2571,7 @@ define @intrinsic_vsll_vi_nxv2i32_nxv2i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsll_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2604,7 +2604,7 @@ define @intrinsic_vsll_vi_nxv4i32_nxv4i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsll_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2637,7 +2637,7 @@ define @intrinsic_vsll_vi_nxv8i32_nxv8i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsll_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2670,7 +2670,7 @@ define @intrinsic_vsll_vi_nxv16i32_nxv16i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsll_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2703,7 +2703,7 @@ define @intrinsic_vsll_vi_nxv1i64_nxv1i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsll_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2736,7 +2736,7 @@ define @intrinsic_vsll_vi_nxv2i64_nxv2i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsll_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2769,7 +2769,7 @@ define @intrinsic_vsll_vi_nxv4i64_nxv4i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsll_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2802,7 +2802,7 @@ define @intrinsic_vsll_vi_nxv8i64_nxv8i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsll_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vsll_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vsll_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vsll_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vsll_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vsll_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -292,7 +292,7 @@ define @intrinsic_vsll_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vsll_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vsll_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vsll_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vsll_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vsll_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vsll_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vsll_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vsll_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -717,7 +717,7 @@ define @intrinsic_vsll_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -764,7 +764,7 @@ define @intrinsic_vsll_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -811,7 +811,7 @@ define @intrinsic_vsll_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vsll_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vsll_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vsll_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vsll_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsll.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1048,7 +1048,7 @@ define @intrinsic_vsll_vx_nxv1i8_nxv1i8( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1095,7 +1095,7 @@ define @intrinsic_vsll_vx_nxv2i8_nxv2i8( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1142,7 +1142,7 @@ define @intrinsic_vsll_vx_nxv4i8_nxv4i8( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1189,7 +1189,7 @@ define @intrinsic_vsll_vx_nxv8i8_nxv8i8( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1236,7 +1236,7 @@ define @intrinsic_vsll_vx_nxv16i8_nxv16i8( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1283,7 +1283,7 @@ define @intrinsic_vsll_vx_nxv32i8_nxv32i8( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1330,7 +1330,7 @@ define @intrinsic_vsll_vx_nxv64i8_nxv64i8( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1377,7 +1377,7 @@ define @intrinsic_vsll_vx_nxv1i16_nxv1i16( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1424,7 +1424,7 @@ define @intrinsic_vsll_vx_nxv2i16_nxv2i16( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1471,7 +1471,7 @@ define @intrinsic_vsll_vx_nxv4i16_nxv4i16( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1518,7 +1518,7 @@ define @intrinsic_vsll_vx_nxv8i16_nxv8i16( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1565,7 +1565,7 @@ define @intrinsic_vsll_vx_nxv16i16_nxv16i16( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1612,7 +1612,7 @@ define @intrinsic_vsll_vx_nxv32i16_nxv32i16( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1659,7 +1659,7 @@ define @intrinsic_vsll_vx_nxv1i32_nxv1i32( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1706,7 +1706,7 @@ define @intrinsic_vsll_vx_nxv2i32_nxv2i32( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1753,7 +1753,7 @@ define @intrinsic_vsll_vx_nxv4i32_nxv4i32( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1800,7 +1800,7 @@ define @intrinsic_vsll_vx_nxv8i32_nxv8i32( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1847,7 +1847,7 @@ define @intrinsic_vsll_vx_nxv16i32_nxv16i32( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1894,7 +1894,7 @@ define @intrinsic_vsll_vx_nxv1i64_nxv1i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1941,7 +1941,7 @@ define @intrinsic_vsll_vx_nxv2i64_nxv2i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1988,7 +1988,7 @@ define @intrinsic_vsll_vx_nxv4i64_nxv4i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -2035,7 +2035,7 @@ define @intrinsic_vsll_vx_nxv8i64_nxv8i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsll_vx_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vsll.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -2076,7 +2076,7 @@ define @intrinsic_vsll_vi_nxv1i8_nxv1i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsll_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2092,7 +2092,7 @@ define @intrinsic_vsll_1_nxv1i8_nxv1i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsll_1_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: ret entry: @@ -2142,7 +2142,7 @@ define @intrinsic_vsll_vi_nxv2i8_nxv2i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsll_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2175,7 +2175,7 @@ define @intrinsic_vsll_vi_nxv4i8_nxv4i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsll_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2208,7 +2208,7 @@ define @intrinsic_vsll_vi_nxv8i8_nxv8i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsll_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2241,7 +2241,7 @@ define @intrinsic_vsll_vi_nxv16i8_nxv16i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsll_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2274,7 +2274,7 @@ define @intrinsic_vsll_vi_nxv32i8_nxv32i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsll_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2307,7 +2307,7 @@ define @intrinsic_vsll_vi_nxv64i8_nxv64i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsll_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2340,7 +2340,7 @@ define @intrinsic_vsll_vi_nxv1i16_nxv1i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsll_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2373,7 +2373,7 @@ define @intrinsic_vsll_vi_nxv2i16_nxv2i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsll_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2406,7 +2406,7 @@ define @intrinsic_vsll_vi_nxv4i16_nxv4i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsll_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2439,7 +2439,7 @@ define @intrinsic_vsll_vi_nxv8i16_nxv8i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsll_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2472,7 +2472,7 @@ define @intrinsic_vsll_vi_nxv16i16_nxv16i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsll_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2505,7 +2505,7 @@ define @intrinsic_vsll_vi_nxv32i16_nxv32i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsll_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2538,7 +2538,7 @@ define @intrinsic_vsll_vi_nxv1i32_nxv1i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsll_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2571,7 +2571,7 @@ define @intrinsic_vsll_vi_nxv2i32_nxv2i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsll_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2604,7 +2604,7 @@ define @intrinsic_vsll_vi_nxv4i32_nxv4i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsll_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2637,7 +2637,7 @@ define @intrinsic_vsll_vi_nxv8i32_nxv8i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsll_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2670,7 +2670,7 @@ define @intrinsic_vsll_vi_nxv16i32_nxv16i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsll_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2703,7 +2703,7 @@ define @intrinsic_vsll_vi_nxv1i64_nxv1i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsll_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2736,7 +2736,7 @@ define @intrinsic_vsll_vi_nxv2i64_nxv2i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsll_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2769,7 +2769,7 @@ define @intrinsic_vsll_vi_nxv4i64_nxv4i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsll_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2802,7 +2802,7 @@ define @intrinsic_vsll_vi_nxv8i64_nxv8i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsll_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsm.ll b/llvm/test/CodeGen/RISCV/rvv/vsm.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsm.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsm.ll @@ -9,7 +9,7 @@ define void @intrinsic_vsm_v_nxv1i1( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm_v_nxv1i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsm.v v0, (a0) ; CHECK-NEXT: ret entry: @@ -22,7 +22,7 @@ define void @intrinsic_vsm_v_nxv2i1( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm_v_nxv2i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsm.v v0, (a0) ; CHECK-NEXT: ret entry: @@ -35,7 +35,7 @@ define void @intrinsic_vsm_v_nxv4i1( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm_v_nxv4i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsm.v v0, (a0) ; CHECK-NEXT: ret entry: @@ -48,7 +48,7 @@ define void @intrinsic_vsm_v_nxv8i1( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm_v_nxv8i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsm.v v0, (a0) ; CHECK-NEXT: ret entry: @@ -61,7 +61,7 @@ define void @intrinsic_vsm_v_nxv16i1( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm_v_nxv16i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsm.v v0, (a0) ; CHECK-NEXT: ret entry: @@ -74,7 +74,7 @@ define void @intrinsic_vsm_v_nxv32i1( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm_v_nxv32i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsm.v v0, (a0) ; CHECK-NEXT: ret entry: @@ -87,7 +87,7 @@ define void @intrinsic_vsm_v_nxv64i1( %0, * %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsm_v_nxv64i1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vsm.v v0, (a0) ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define void @test_vsetvli_i16( %0, %1, * %2, iXLen %3) nounwind { ; CHECK-LABEL: test_vsetvli_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vmseq.vv v8, v8, v9 ; CHECK-NEXT: vsm.v v8, (a0) ; CHECK-NEXT: ret @@ -125,7 +125,7 @@ define void @test_vsetvli_i32( %0, %1, * %2, iXLen %3) nounwind { ; CHECK-LABEL: test_vsetvli_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vmseq.vv v8, v8, v9 ; CHECK-NEXT: vsm.v v8, (a0) ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vsmul_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vsmul_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vsmul_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vsmul_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vsmul.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vsmul_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vsmul.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -292,7 +292,7 @@ define @intrinsic_vsmul_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vsmul.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vsmul_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vsmul_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vsmul_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vsmul_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsmul.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vsmul_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vsmul.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vsmul_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vsmul.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vsmul_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vsmul_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -717,7 +717,7 @@ define @intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsmul.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -764,7 +764,7 @@ define @intrinsic_vsmul_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsmul.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -811,7 +811,7 @@ define @intrinsic_vsmul_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vsmul.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vsmul_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsmul.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsmul.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsmul.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1048,7 +1048,7 @@ define @intrinsic_vsmul_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1095,7 +1095,7 @@ define @intrinsic_vsmul_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1142,7 +1142,7 @@ define @intrinsic_vsmul_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1189,7 +1189,7 @@ define @intrinsic_vsmul_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1236,7 +1236,7 @@ define @intrinsic_vsmul_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1283,7 +1283,7 @@ define @intrinsic_vsmul_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1330,7 +1330,7 @@ define @intrinsic_vsmul_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1377,7 +1377,7 @@ define @intrinsic_vsmul_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1424,7 +1424,7 @@ define @intrinsic_vsmul_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1471,7 +1471,7 @@ define @intrinsic_vsmul_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1518,7 +1518,7 @@ define @intrinsic_vsmul_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1565,7 +1565,7 @@ define @intrinsic_vsmul_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1612,7 +1612,7 @@ define @intrinsic_vsmul_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1659,7 +1659,7 @@ define @intrinsic_vsmul_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1706,7 +1706,7 @@ define @intrinsic_vsmul_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1753,7 +1753,7 @@ define @intrinsic_vsmul_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1800,7 +1800,7 @@ define @intrinsic_vsmul_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1847,7 +1847,7 @@ define @intrinsic_vsmul_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1898,7 +1898,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vsmul.vv v8, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 @@ -1957,7 +1957,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vsmul.vv v8, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 @@ -2016,7 +2016,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vsmul.vv v8, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 @@ -2075,7 +2075,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vsmul.vv v8, v8, v16 ; CHECK-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vsmul_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vsmul_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vsmul_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vsmul_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vsmul.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vsmul_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vsmul.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -292,7 +292,7 @@ define @intrinsic_vsmul_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vsmul.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vsmul_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vsmul_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vsmul_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vsmul_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsmul.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vsmul_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vsmul.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vsmul_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vsmul.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vsmul_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vsmul_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -717,7 +717,7 @@ define @intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsmul.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -764,7 +764,7 @@ define @intrinsic_vsmul_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsmul.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -811,7 +811,7 @@ define @intrinsic_vsmul_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vsmul.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vsmul_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsmul.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsmul.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsmul.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsmul.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1048,7 +1048,7 @@ define @intrinsic_vsmul_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1095,7 +1095,7 @@ define @intrinsic_vsmul_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1142,7 +1142,7 @@ define @intrinsic_vsmul_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1189,7 +1189,7 @@ define @intrinsic_vsmul_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1236,7 +1236,7 @@ define @intrinsic_vsmul_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1283,7 +1283,7 @@ define @intrinsic_vsmul_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1330,7 +1330,7 @@ define @intrinsic_vsmul_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1377,7 +1377,7 @@ define @intrinsic_vsmul_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1424,7 +1424,7 @@ define @intrinsic_vsmul_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1471,7 +1471,7 @@ define @intrinsic_vsmul_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1518,7 +1518,7 @@ define @intrinsic_vsmul_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1565,7 +1565,7 @@ define @intrinsic_vsmul_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1612,7 +1612,7 @@ define @intrinsic_vsmul_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1659,7 +1659,7 @@ define @intrinsic_vsmul_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1706,7 +1706,7 @@ define @intrinsic_vsmul_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1753,7 +1753,7 @@ define @intrinsic_vsmul_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1800,7 +1800,7 @@ define @intrinsic_vsmul_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1847,7 +1847,7 @@ define @intrinsic_vsmul_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1894,7 +1894,7 @@ define @intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1941,7 +1941,7 @@ define @intrinsic_vsmul_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1988,7 +1988,7 @@ define @intrinsic_vsmul_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -2035,7 +2035,7 @@ define @intrinsic_vsmul_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vsmul.vx v8, v8, a0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv64.ll @@ -13,7 +13,7 @@ define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -36,7 +36,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -59,7 +59,7 @@ define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -82,7 +82,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -105,7 +105,7 @@ define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -128,7 +128,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -174,7 +174,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -197,7 +197,7 @@ define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -220,7 +220,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -243,7 +243,7 @@ define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -266,7 +266,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -289,7 +289,7 @@ define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -312,7 +312,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -335,7 +335,7 @@ define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -358,7 +358,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -381,7 +381,7 @@ define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -404,7 +404,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -427,7 +427,7 @@ define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -450,7 +450,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -473,7 +473,7 @@ define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -496,7 +496,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -519,7 +519,7 @@ define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -542,7 +542,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -565,7 +565,7 @@ define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -588,7 +588,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -611,7 +611,7 @@ define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -634,7 +634,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -657,7 +657,7 @@ define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -680,7 +680,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -703,7 +703,7 @@ define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -726,7 +726,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -749,7 +749,7 @@ define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -772,7 +772,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -795,7 +795,7 @@ define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -818,7 +818,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -841,7 +841,7 @@ define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -864,7 +864,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -887,7 +887,7 @@ define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -910,7 +910,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -933,7 +933,7 @@ define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -956,7 +956,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -979,7 +979,7 @@ define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -1002,7 +1002,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1025,7 +1025,7 @@ define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -1048,7 +1048,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -1071,7 +1071,7 @@ define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -1094,7 +1094,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -1117,7 +1117,7 @@ define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1140,7 +1140,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1163,7 +1163,7 @@ define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -1186,7 +1186,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1209,7 +1209,7 @@ define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -1232,7 +1232,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -1255,7 +1255,7 @@ define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -1278,7 +1278,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsoxei.ll b/llvm/test/CodeGen/RISCV/rvv/vsoxei.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsoxei.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsoxei.ll @@ -12,7 +12,7 @@ define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -35,7 +35,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -58,7 +58,7 @@ define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -81,7 +81,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -127,7 +127,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -150,7 +150,7 @@ define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -173,7 +173,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -196,7 +196,7 @@ define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -219,7 +219,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -242,7 +242,7 @@ define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -265,7 +265,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -288,7 +288,7 @@ define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -311,7 +311,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -334,7 +334,7 @@ define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -357,7 +357,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -380,7 +380,7 @@ define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -403,7 +403,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -426,7 +426,7 @@ define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -449,7 +449,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -472,7 +472,7 @@ define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -495,7 +495,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -518,7 +518,7 @@ define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -541,7 +541,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -564,7 +564,7 @@ define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -587,7 +587,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -610,7 +610,7 @@ define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -633,7 +633,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -656,7 +656,7 @@ define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -679,7 +679,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +702,7 @@ define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -725,7 +725,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -748,7 +748,7 @@ define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -771,7 +771,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -794,7 +794,7 @@ define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -817,7 +817,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -840,7 +840,7 @@ define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -863,7 +863,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -886,7 +886,7 @@ define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -909,7 +909,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -932,7 +932,7 @@ define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -955,7 +955,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -978,7 +978,7 @@ define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -1001,7 +1001,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1024,7 +1024,7 @@ define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -1047,7 +1047,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -1070,7 +1070,7 @@ define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -1093,7 +1093,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -1116,7 +1116,7 @@ define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1139,7 +1139,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1162,7 +1162,7 @@ define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1185,7 +1185,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1208,7 +1208,7 @@ define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -1231,7 +1231,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1254,7 +1254,7 @@ define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -1277,7 +1277,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -1300,7 +1300,7 @@ define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -1323,7 +1323,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -1346,7 +1346,7 @@ define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1369,7 +1369,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1392,7 +1392,7 @@ define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -1415,7 +1415,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1438,7 +1438,7 @@ define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -1461,7 +1461,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -1484,7 +1484,7 @@ define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -1507,7 +1507,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vsoxei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -1530,7 +1530,7 @@ define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1553,7 +1553,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1576,7 +1576,7 @@ define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1599,7 +1599,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1622,7 +1622,7 @@ define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1645,7 +1645,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1668,7 +1668,7 @@ define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -1691,7 +1691,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1714,7 +1714,7 @@ define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -1737,7 +1737,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1760,7 @@ define void @intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -1783,7 +1783,7 @@ define void @intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -1806,7 +1806,7 @@ define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1829,7 +1829,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1852,7 +1852,7 @@ define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1875,7 +1875,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1898,7 +1898,7 @@ define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1921,7 +1921,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1944,7 +1944,7 @@ define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -1967,7 +1967,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1990,7 +1990,7 @@ define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -2013,7 +2013,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -2036,7 +2036,7 @@ define void @intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -2059,7 +2059,7 @@ define void @intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -2082,7 +2082,7 @@ define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2105,7 +2105,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2128,7 +2128,7 @@ define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2151,7 +2151,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2174,7 +2174,7 @@ define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -2197,7 +2197,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -2220,7 +2220,7 @@ define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -2243,7 +2243,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -2266,7 +2266,7 @@ define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -2289,7 +2289,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -2312,7 +2312,7 @@ define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2335,7 +2335,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2358,7 +2358,7 @@ define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -2381,7 +2381,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -2404,7 +2404,7 @@ define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -2427,7 +2427,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -2450,7 +2450,7 @@ define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -2473,7 +2473,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -2496,7 +2496,7 @@ define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2519,7 +2519,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2542,7 +2542,7 @@ define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2565,7 +2565,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2588,7 +2588,7 @@ define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2611,7 +2611,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2634,7 +2634,7 @@ define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -2657,7 +2657,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -2680,7 +2680,7 @@ define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -2703,7 +2703,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -2726,7 +2726,7 @@ define void @intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -2749,7 +2749,7 @@ define void @intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -2772,7 +2772,7 @@ define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2795,7 +2795,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2818,7 +2818,7 @@ define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2841,7 +2841,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2864,7 +2864,7 @@ define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -2887,7 +2887,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -2910,7 +2910,7 @@ define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -2933,7 +2933,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -2956,7 +2956,7 @@ define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -2979,7 +2979,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -3002,7 +3002,7 @@ define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3025,7 +3025,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3048,7 +3048,7 @@ define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -3071,7 +3071,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3094,7 +3094,7 @@ define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -3117,7 +3117,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3140,7 +3140,7 @@ define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -3163,7 +3163,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vsoxei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -3186,7 +3186,7 @@ define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3209,7 +3209,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3232,7 +3232,7 @@ define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3255,7 +3255,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3278,7 +3278,7 @@ define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3301,7 +3301,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3324,7 +3324,7 @@ define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3347,7 +3347,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3370,7 +3370,7 @@ define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -3393,7 +3393,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3416,7 +3416,7 @@ define void @intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -3439,7 +3439,7 @@ define void @intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3462,7 +3462,7 @@ define void @intrinsic_vsoxei_v_nxv64i8_nxv64i8_nxv64i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -3485,7 +3485,7 @@ define void @intrinsic_vsoxei_mask_v_nxv64i8_nxv64i8_nxv64i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -3508,7 +3508,7 @@ define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3531,7 +3531,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3554,7 +3554,7 @@ define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3577,7 +3577,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3600,7 +3600,7 @@ define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3623,7 +3623,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3646,7 +3646,7 @@ define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -3669,7 +3669,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3692,7 +3692,7 @@ define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -3715,7 +3715,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3738,7 +3738,7 @@ define void @intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -3761,7 +3761,7 @@ define void @intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -3784,7 +3784,7 @@ define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3807,7 +3807,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3830,7 +3830,7 @@ define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3853,7 +3853,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3876,7 +3876,7 @@ define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -3899,7 +3899,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3922,7 +3922,7 @@ define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -3945,7 +3945,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3968,7 +3968,7 @@ define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -3991,7 +3991,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -4014,7 +4014,7 @@ define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4037,7 +4037,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4060,7 +4060,7 @@ define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -4083,7 +4083,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4106,7 +4106,7 @@ define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -4129,7 +4129,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -4152,7 +4152,7 @@ define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -4175,7 +4175,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -4198,7 +4198,7 @@ define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4221,7 +4221,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4244,7 +4244,7 @@ define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4267,7 +4267,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4290,7 +4290,7 @@ define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4313,7 +4313,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4336,7 +4336,7 @@ define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -4359,7 +4359,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4382,7 +4382,7 @@ define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -4405,7 +4405,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -4428,7 +4428,7 @@ define void @intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -4451,7 +4451,7 @@ define void @intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -4474,7 +4474,7 @@ define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4497,7 +4497,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4520,7 +4520,7 @@ define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4543,7 +4543,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4566,7 +4566,7 @@ define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -4589,7 +4589,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4612,7 +4612,7 @@ define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -4635,7 +4635,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -4658,7 +4658,7 @@ define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -4681,7 +4681,7 @@ define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -4704,7 +4704,7 @@ define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4727,7 +4727,7 @@ define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4750,7 +4750,7 @@ define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -4773,7 +4773,7 @@ define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4796,7 +4796,7 @@ define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -4819,7 +4819,7 @@ define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -4842,7 +4842,7 @@ define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -4865,7 +4865,7 @@ define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vsoxei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv32.ll @@ -11,7 +11,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -25,7 +25,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -42,7 +42,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -56,7 +56,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -72,7 +72,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -85,7 +85,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -102,7 +102,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -116,7 +116,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -133,7 +133,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -147,7 +147,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -164,7 +164,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -178,7 +178,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -195,7 +195,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -209,7 +209,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -226,7 +226,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -240,7 +240,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -257,7 +257,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -271,7 +271,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -289,7 +289,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -304,7 +304,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -322,7 +322,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -337,7 +337,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -370,7 +370,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -389,7 +389,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -405,7 +405,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -424,7 +424,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -440,7 +440,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -459,7 +459,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -475,7 +475,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -495,7 +495,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -512,7 +512,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -532,7 +532,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -549,7 +549,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -569,7 +569,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -586,7 +586,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -607,7 +607,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -625,7 +625,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -646,7 +646,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -664,7 +664,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -685,7 +685,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -703,7 +703,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -725,7 +725,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -744,7 +744,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -766,7 +766,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -785,7 +785,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -807,7 +807,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -826,7 +826,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -842,7 +842,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -855,7 +855,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -872,7 +872,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -886,7 +886,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -902,7 +902,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -915,7 +915,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -932,7 +932,7 @@ ; CHECK-NEXT: vmv2r.v v16, v8 ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -946,7 +946,7 @@ ; CHECK-NEXT: vmv2r.v v16, v8 ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -963,7 +963,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -977,7 +977,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -994,7 +994,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -1008,7 +1008,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -1026,7 +1026,7 @@ ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -1041,7 +1041,7 @@ ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -1059,7 +1059,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -1074,7 +1074,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1092,7 +1092,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -1107,7 +1107,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -1124,7 +1124,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -1138,7 +1138,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1155,7 +1155,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -1169,7 +1169,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1186,7 +1186,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -1200,7 +1200,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1217,7 +1217,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1231,7 +1231,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1248,7 +1248,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1262,7 +1262,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1279,7 +1279,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1293,7 +1293,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1311,7 +1311,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1326,7 +1326,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1344,7 +1344,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1359,7 +1359,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1377,7 +1377,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1392,7 +1392,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1411,7 +1411,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1427,7 +1427,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1446,7 +1446,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1462,7 +1462,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1481,7 +1481,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1497,7 +1497,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1517,7 +1517,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1534,7 +1534,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1554,7 +1554,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1571,7 +1571,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1591,7 +1591,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1608,7 +1608,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1629,7 +1629,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1647,7 +1647,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1668,7 +1668,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1686,7 +1686,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1707,7 +1707,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1725,7 +1725,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1747,7 +1747,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1766,7 +1766,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1788,7 +1788,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1807,7 +1807,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1829,7 +1829,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1848,7 +1848,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1865,7 +1865,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -1879,7 +1879,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1896,7 +1896,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -1910,7 +1910,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1926,7 +1926,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -1939,7 +1939,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1956,7 +1956,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1970,7 +1970,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1987,7 +1987,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2001,7 +2001,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2018,7 +2018,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -2032,7 +2032,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -2050,7 +2050,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2065,7 +2065,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2083,7 +2083,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2098,7 +2098,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2116,7 +2116,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -2131,7 +2131,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -2150,7 +2150,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2166,7 +2166,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2185,7 +2185,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2201,7 +2201,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2220,7 +2220,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -2236,7 +2236,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -2256,7 +2256,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2273,7 +2273,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2293,7 +2293,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2310,7 +2310,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2330,7 +2330,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -2347,7 +2347,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -2368,7 +2368,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2386,7 +2386,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2407,7 +2407,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2425,7 +2425,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2446,7 +2446,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -2464,7 +2464,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -2486,7 +2486,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2505,7 +2505,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2527,7 +2527,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2546,7 +2546,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2568,7 +2568,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -2587,7 +2587,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -2604,7 +2604,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -2618,7 +2618,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -2635,7 +2635,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -2649,7 +2649,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -2666,7 +2666,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -2680,7 +2680,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -2697,7 +2697,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2711,7 +2711,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2728,7 +2728,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2742,7 +2742,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2759,7 +2759,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2773,7 +2773,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2791,7 +2791,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2806,7 +2806,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2824,7 +2824,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2839,7 +2839,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2857,7 +2857,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2872,7 +2872,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2891,7 +2891,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2907,7 +2907,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2926,7 +2926,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2942,7 +2942,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2961,7 +2961,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2977,7 +2977,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2997,7 +2997,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3014,7 +3014,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3034,7 +3034,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3051,7 +3051,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3071,7 +3071,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3088,7 +3088,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3109,7 +3109,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3127,7 +3127,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3148,7 +3148,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3166,7 +3166,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3187,7 +3187,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3205,7 +3205,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3227,7 +3227,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3246,7 +3246,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3268,7 +3268,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3287,7 +3287,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3309,7 +3309,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3328,7 +3328,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3345,7 +3345,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -3359,7 +3359,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3376,7 +3376,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -3390,7 +3390,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3406,7 +3406,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -3419,7 +3419,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3436,7 +3436,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -3450,7 +3450,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3467,7 +3467,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -3481,7 +3481,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3498,7 +3498,7 @@ ; CHECK-NEXT: vmv2r.v v16, v8 ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -3512,7 +3512,7 @@ ; CHECK-NEXT: vmv2r.v v16, v8 ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3530,7 +3530,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -3545,7 +3545,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3563,7 +3563,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -3578,7 +3578,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3596,7 +3596,7 @@ ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -3611,7 +3611,7 @@ ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3627,7 +3627,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -3640,7 +3640,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3657,7 +3657,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -3671,7 +3671,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3687,7 +3687,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -3700,7 +3700,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3717,7 +3717,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -3731,7 +3731,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3748,7 +3748,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3762,7 +3762,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3779,7 +3779,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -3793,7 +3793,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3811,7 +3811,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -3826,7 +3826,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3844,7 +3844,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3859,7 +3859,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3877,7 +3877,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -3892,7 +3892,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3911,7 +3911,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -3927,7 +3927,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3946,7 +3946,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3962,7 +3962,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3981,7 +3981,7 @@ ; CHECK-NEXT: vmv1r.v v18, v16 ; CHECK-NEXT: vmv1r.v v19, v16 ; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -3997,7 +3997,7 @@ ; CHECK-NEXT: vmv1r.v v18, v16 ; CHECK-NEXT: vmv1r.v v19, v16 ; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -4017,7 +4017,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -4034,7 +4034,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4054,7 +4054,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4071,7 +4071,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4091,7 +4091,7 @@ ; CHECK-NEXT: vmv1r.v v19, v16 ; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -4108,7 +4108,7 @@ ; CHECK-NEXT: vmv1r.v v19, v16 ; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -4129,7 +4129,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -4147,7 +4147,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4168,7 +4168,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4186,7 +4186,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4207,7 +4207,7 @@ ; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -4225,7 +4225,7 @@ ; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -4247,7 +4247,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -4266,7 +4266,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4288,7 +4288,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4307,7 +4307,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4329,7 +4329,7 @@ ; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -4348,7 +4348,7 @@ ; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -4365,7 +4365,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -4379,7 +4379,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -4396,7 +4396,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -4410,7 +4410,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -4427,7 +4427,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -4441,7 +4441,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -4458,7 +4458,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -4472,7 +4472,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4489,7 +4489,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -4503,7 +4503,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4519,7 +4519,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -4532,7 +4532,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4549,7 +4549,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4563,7 +4563,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4580,7 +4580,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4594,7 +4594,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4611,7 +4611,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -4625,7 +4625,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4643,7 +4643,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4658,7 +4658,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4676,7 +4676,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4691,7 +4691,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4709,7 +4709,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -4724,7 +4724,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4743,7 +4743,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4759,7 +4759,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4778,7 +4778,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4794,7 +4794,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4813,7 +4813,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -4829,7 +4829,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4849,7 +4849,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4866,7 +4866,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4886,7 +4886,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4903,7 +4903,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4923,7 +4923,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -4940,7 +4940,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4961,7 +4961,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4979,7 +4979,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5000,7 +5000,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5018,7 +5018,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5039,7 +5039,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -5057,7 +5057,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -5079,7 +5079,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5098,7 +5098,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5120,7 +5120,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5139,7 +5139,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5161,7 +5161,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -5180,7 +5180,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -5197,7 +5197,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -5211,7 +5211,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -5228,7 +5228,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -5242,7 +5242,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -5259,7 +5259,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -5273,7 +5273,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -5290,7 +5290,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5304,7 +5304,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5321,7 +5321,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5335,7 +5335,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5352,7 +5352,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5366,7 +5366,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5384,7 +5384,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5399,7 +5399,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5417,7 +5417,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5432,7 +5432,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5450,7 +5450,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5465,7 +5465,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5484,7 +5484,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5500,7 +5500,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5519,7 +5519,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5535,7 +5535,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5554,7 +5554,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5570,7 +5570,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5590,7 +5590,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5607,7 +5607,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5627,7 +5627,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5644,7 +5644,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5664,7 +5664,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5681,7 +5681,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5702,7 +5702,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5720,7 +5720,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5741,7 +5741,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5759,7 +5759,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5780,7 +5780,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5798,7 +5798,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5820,7 +5820,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5839,7 +5839,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5861,7 +5861,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5880,7 +5880,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5902,7 +5902,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5921,7 +5921,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5937,7 +5937,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -5950,7 +5950,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -5967,7 +5967,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -5981,7 +5981,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -5998,7 +5998,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -6012,7 +6012,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -6029,7 +6029,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -6043,7 +6043,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -6060,7 +6060,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -6074,7 +6074,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -6091,7 +6091,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6105,7 +6105,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6122,7 +6122,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6136,7 +6136,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6153,7 +6153,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6167,7 +6167,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6185,7 +6185,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6200,7 +6200,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6218,7 +6218,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6233,7 +6233,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6251,7 +6251,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6266,7 +6266,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6285,7 +6285,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6301,7 +6301,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6320,7 +6320,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6336,7 +6336,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6355,7 +6355,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6371,7 +6371,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6391,7 +6391,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6408,7 +6408,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6428,7 +6428,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6445,7 +6445,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6465,7 +6465,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6482,7 +6482,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6503,7 +6503,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6521,7 +6521,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6542,7 +6542,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6560,7 +6560,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6581,7 +6581,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6599,7 +6599,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6621,7 +6621,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6640,7 +6640,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6662,7 +6662,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6681,7 +6681,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6703,7 +6703,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6722,7 +6722,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6739,7 +6739,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -6753,7 +6753,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -6770,7 +6770,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -6784,7 +6784,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -6801,7 +6801,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -6815,7 +6815,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -6832,7 +6832,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6846,7 +6846,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6863,7 +6863,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6877,7 +6877,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6894,7 +6894,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6908,7 +6908,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6926,7 +6926,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6941,7 +6941,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6959,7 +6959,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6974,7 +6974,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6992,7 +6992,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -7007,7 +7007,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -7026,7 +7026,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -7042,7 +7042,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -7061,7 +7061,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -7077,7 +7077,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -7096,7 +7096,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -7112,7 +7112,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -7132,7 +7132,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -7149,7 +7149,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -7169,7 +7169,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -7186,7 +7186,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -7206,7 +7206,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -7223,7 +7223,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -7244,7 +7244,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -7262,7 +7262,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -7283,7 +7283,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -7301,7 +7301,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -7322,7 +7322,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -7340,7 +7340,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -7362,7 +7362,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -7381,7 +7381,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -7403,7 +7403,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -7422,7 +7422,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -7444,7 +7444,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -7463,7 +7463,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -7480,7 +7480,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -7494,7 +7494,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -7511,7 +7511,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -7525,7 +7525,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -7542,7 +7542,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -7556,7 +7556,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -7573,7 +7573,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -7587,7 +7587,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -7604,7 +7604,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -7618,7 +7618,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -7635,7 +7635,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -7649,7 +7649,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -7667,7 +7667,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -7682,7 +7682,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -7700,7 +7700,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -7715,7 +7715,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -7733,7 +7733,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -7748,7 +7748,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -7765,7 +7765,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -7779,7 +7779,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -7796,7 +7796,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -7810,7 +7810,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -7826,7 +7826,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -7839,7 +7839,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -7856,7 +7856,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -7870,7 +7870,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -7887,7 +7887,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -7901,7 +7901,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -7918,7 +7918,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -7932,7 +7932,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -7949,7 +7949,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -7963,7 +7963,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -7980,7 +7980,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -7994,7 +7994,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -8011,7 +8011,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -8025,7 +8025,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -8042,7 +8042,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8056,7 +8056,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8073,7 +8073,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8087,7 +8087,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8104,7 +8104,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8118,7 +8118,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8136,7 +8136,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8151,7 +8151,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8169,7 +8169,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8184,7 +8184,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8202,7 +8202,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8217,7 +8217,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8236,7 +8236,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8252,7 +8252,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8271,7 +8271,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8287,7 +8287,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8306,7 +8306,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8322,7 +8322,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8342,7 +8342,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8359,7 +8359,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8379,7 +8379,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8396,7 +8396,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8416,7 +8416,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8433,7 +8433,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8454,7 +8454,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8472,7 +8472,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8493,7 +8493,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8511,7 +8511,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8532,7 +8532,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8550,7 +8550,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8572,7 +8572,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8591,7 +8591,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8613,7 +8613,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8632,7 +8632,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8654,7 +8654,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8673,7 +8673,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8690,7 +8690,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -8704,7 +8704,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -8721,7 +8721,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -8735,7 +8735,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -8752,7 +8752,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -8766,7 +8766,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -8783,7 +8783,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8797,7 +8797,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8814,7 +8814,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8828,7 +8828,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8845,7 +8845,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8859,7 +8859,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8877,7 +8877,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8892,7 +8892,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8910,7 +8910,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8925,7 +8925,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8943,7 +8943,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8958,7 +8958,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8977,7 +8977,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8993,7 +8993,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9012,7 +9012,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9028,7 +9028,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9047,7 +9047,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9063,7 +9063,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9083,7 +9083,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9100,7 +9100,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9120,7 +9120,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9137,7 +9137,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9157,7 +9157,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9174,7 +9174,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9195,7 +9195,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9213,7 +9213,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9234,7 +9234,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9252,7 +9252,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9273,7 +9273,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9291,7 +9291,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9313,7 +9313,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9332,7 +9332,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9354,7 +9354,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9373,7 +9373,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9395,7 +9395,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9414,7 +9414,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9431,7 +9431,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -9445,7 +9445,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -9462,7 +9462,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -9476,7 +9476,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -9493,7 +9493,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -9507,7 +9507,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -9524,7 +9524,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9538,7 +9538,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9555,7 +9555,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9569,7 +9569,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9586,7 +9586,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9600,7 +9600,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9618,7 +9618,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9633,7 +9633,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9651,7 +9651,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9666,7 +9666,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9684,7 +9684,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9699,7 +9699,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9718,7 +9718,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9734,7 +9734,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9753,7 +9753,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9769,7 +9769,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9788,7 +9788,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9804,7 +9804,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9824,7 +9824,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9841,7 +9841,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9861,7 +9861,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9878,7 +9878,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9898,7 +9898,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9915,7 +9915,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9936,7 +9936,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9954,7 +9954,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9975,7 +9975,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9993,7 +9993,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10014,7 +10014,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10032,7 +10032,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10054,7 +10054,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10073,7 +10073,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10095,7 +10095,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10114,7 +10114,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10136,7 +10136,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10155,7 +10155,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10172,7 +10172,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -10186,7 +10186,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -10203,7 +10203,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -10217,7 +10217,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -10234,7 +10234,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -10248,7 +10248,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -10265,7 +10265,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10279,7 +10279,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10296,7 +10296,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10310,7 +10310,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10327,7 +10327,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10341,7 +10341,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10359,7 +10359,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10374,7 +10374,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10392,7 +10392,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10407,7 +10407,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10425,7 +10425,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10440,7 +10440,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10459,7 +10459,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10475,7 +10475,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10494,7 +10494,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10510,7 +10510,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10529,7 +10529,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10545,7 +10545,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10565,7 +10565,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10582,7 +10582,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10602,7 +10602,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10619,7 +10619,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10639,7 +10639,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10656,7 +10656,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10677,7 +10677,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10695,7 +10695,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10716,7 +10716,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10734,7 +10734,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10755,7 +10755,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10773,7 +10773,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10795,7 +10795,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10814,7 +10814,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10836,7 +10836,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10855,7 +10855,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10877,7 +10877,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10896,7 +10896,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10913,7 +10913,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -10927,7 +10927,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -10944,7 +10944,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -10958,7 +10958,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -10974,7 +10974,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -10987,7 +10987,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -11004,7 +11004,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11018,7 +11018,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11035,7 +11035,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11049,7 +11049,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11066,7 +11066,7 @@ ; CHECK-NEXT: vmv2r.v v16, v8 ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -11080,7 +11080,7 @@ ; CHECK-NEXT: vmv2r.v v16, v8 ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -11098,7 +11098,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11113,7 +11113,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11131,7 +11131,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11146,7 +11146,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11164,7 +11164,7 @@ ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -11179,7 +11179,7 @@ ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -11196,7 +11196,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -11210,7 +11210,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -11227,7 +11227,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -11241,7 +11241,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -11258,7 +11258,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -11272,7 +11272,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -11289,7 +11289,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -11303,7 +11303,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -11320,7 +11320,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -11334,7 +11334,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -11351,7 +11351,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -11365,7 +11365,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -11382,7 +11382,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11396,7 +11396,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11413,7 +11413,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11427,7 +11427,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11444,7 +11444,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11458,7 +11458,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11476,7 +11476,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11491,7 +11491,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11509,7 +11509,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11524,7 +11524,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11542,7 +11542,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11557,7 +11557,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11574,7 +11574,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11588,7 +11588,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11605,7 +11605,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11619,7 +11619,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11635,7 +11635,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11648,7 +11648,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11665,7 +11665,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -11679,7 +11679,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -11696,7 +11696,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -11710,7 +11710,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -11727,7 +11727,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11741,7 +11741,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11759,7 +11759,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -11774,7 +11774,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -11792,7 +11792,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -11807,7 +11807,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -11825,7 +11825,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11840,7 +11840,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11859,7 +11859,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -11875,7 +11875,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -11894,7 +11894,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -11910,7 +11910,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -11929,7 +11929,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11945,7 +11945,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11965,7 +11965,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -11982,7 +11982,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12002,7 +12002,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12019,7 +12019,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12039,7 +12039,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -12056,7 +12056,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -12077,7 +12077,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12095,7 +12095,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12116,7 +12116,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12134,7 +12134,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12155,7 +12155,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -12173,7 +12173,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -12195,7 +12195,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12214,7 +12214,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12236,7 +12236,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12255,7 +12255,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12277,7 +12277,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -12296,7 +12296,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -12313,7 +12313,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -12327,7 +12327,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -12344,7 +12344,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -12358,7 +12358,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -12375,7 +12375,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -12389,7 +12389,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -12406,7 +12406,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12420,7 +12420,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12437,7 +12437,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12451,7 +12451,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12468,7 +12468,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12482,7 +12482,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12500,7 +12500,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12515,7 +12515,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12533,7 +12533,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12548,7 +12548,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12566,7 +12566,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12581,7 +12581,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12600,7 +12600,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12616,7 +12616,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12635,7 +12635,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12651,7 +12651,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12670,7 +12670,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12686,7 +12686,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12706,7 +12706,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12723,7 +12723,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12743,7 +12743,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12760,7 +12760,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12780,7 +12780,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12797,7 +12797,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12818,7 +12818,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12836,7 +12836,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12857,7 +12857,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12875,7 +12875,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12896,7 +12896,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12914,7 +12914,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12936,7 +12936,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12955,7 +12955,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12977,7 +12977,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12996,7 +12996,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -13018,7 +13018,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -13037,7 +13037,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -13054,7 +13054,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -13068,7 +13068,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -13085,7 +13085,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -13099,7 +13099,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -13116,7 +13116,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -13130,7 +13130,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -13147,7 +13147,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -13161,7 +13161,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -13178,7 +13178,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -13192,7 +13192,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -13209,7 +13209,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -13223,7 +13223,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -13241,7 +13241,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -13256,7 +13256,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -13274,7 +13274,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -13289,7 +13289,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -13307,7 +13307,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -13322,7 +13322,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv64.ll @@ -11,7 +11,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -25,7 +25,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -42,7 +42,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -56,7 +56,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -72,7 +72,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -85,7 +85,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -102,7 +102,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -116,7 +116,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -133,7 +133,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -147,7 +147,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -163,7 +163,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -176,7 +176,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -193,7 +193,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -207,7 +207,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -224,7 +224,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -238,7 +238,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -255,7 +255,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -269,7 +269,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -286,7 +286,7 @@ ; CHECK-NEXT: vmv2r.v v16, v8 ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -300,7 +300,7 @@ ; CHECK-NEXT: vmv2r.v v16, v8 ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -317,7 +317,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -331,7 +331,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +349,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -364,7 +364,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -382,7 +382,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -397,7 +397,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -415,7 +415,7 @@ ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -430,7 +430,7 @@ ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -448,7 +448,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -463,7 +463,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -479,7 +479,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -492,7 +492,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -509,7 +509,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -523,7 +523,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -539,7 +539,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -552,7 +552,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -569,7 +569,7 @@ ; CHECK-NEXT: vmv2r.v v16, v8 ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -583,7 +583,7 @@ ; CHECK-NEXT: vmv2r.v v16, v8 ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -600,7 +600,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -614,7 +614,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -631,7 +631,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -645,7 +645,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -663,7 +663,7 @@ ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -678,7 +678,7 @@ ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -696,7 +696,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -711,7 +711,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -729,7 +729,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -744,7 +744,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -761,7 +761,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -775,7 +775,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -792,7 +792,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -806,7 +806,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -823,7 +823,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -837,7 +837,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -854,7 +854,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -868,7 +868,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -885,7 +885,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -899,7 +899,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -916,7 +916,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -930,7 +930,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -947,7 +947,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -961,7 +961,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -978,7 +978,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -992,7 +992,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1010,7 +1010,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1025,7 +1025,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1043,7 +1043,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1058,7 +1058,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1076,7 +1076,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1091,7 +1091,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1109,7 +1109,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1124,7 +1124,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1143,7 +1143,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1159,7 +1159,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1178,7 +1178,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1194,7 +1194,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1213,7 +1213,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1229,7 +1229,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1248,7 +1248,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1264,7 +1264,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1284,7 +1284,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1301,7 +1301,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1321,7 +1321,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1338,7 +1338,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1358,7 +1358,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1375,7 +1375,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1395,7 +1395,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1412,7 +1412,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1433,7 +1433,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1451,7 +1451,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1472,7 +1472,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1490,7 +1490,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1511,7 +1511,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1529,7 +1529,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1550,7 +1550,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1568,7 +1568,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1590,7 +1590,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1609,7 +1609,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1631,7 +1631,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1650,7 +1650,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1672,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1691,7 +1691,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1713,7 +1713,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1732,7 +1732,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1749,7 +1749,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -1763,7 +1763,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1780,7 +1780,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -1794,7 +1794,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1811,7 +1811,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -1825,7 +1825,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1842,7 +1842,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -1856,7 +1856,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1873,7 +1873,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1887,7 +1887,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1904,7 +1904,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1918,7 +1918,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1935,7 +1935,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1949,7 +1949,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1966,7 +1966,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1980,7 +1980,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1998,7 +1998,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2013,7 +2013,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2031,7 +2031,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2046,7 +2046,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2064,7 +2064,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2079,7 +2079,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2097,7 +2097,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2112,7 +2112,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2131,7 +2131,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2147,7 +2147,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2166,7 +2166,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2182,7 +2182,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2201,7 +2201,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2217,7 +2217,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2236,7 +2236,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2252,7 +2252,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2272,7 +2272,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2289,7 +2289,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2309,7 +2309,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2326,7 +2326,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2346,7 +2346,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2363,7 +2363,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2383,7 +2383,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2400,7 +2400,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2421,7 +2421,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2439,7 +2439,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2460,7 +2460,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2478,7 +2478,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2499,7 +2499,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2517,7 +2517,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2538,7 +2538,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2556,7 +2556,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2578,7 +2578,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2597,7 +2597,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2619,7 +2619,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2638,7 +2638,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2660,7 +2660,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2679,7 +2679,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2701,7 +2701,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2720,7 +2720,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2737,7 +2737,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -2751,7 +2751,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -2768,7 +2768,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -2782,7 +2782,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -2798,7 +2798,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -2811,7 +2811,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -2827,7 +2827,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -2840,7 +2840,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -2857,7 +2857,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -2871,7 +2871,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -2888,7 +2888,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -2902,7 +2902,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -2919,7 +2919,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -2933,7 +2933,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -2950,7 +2950,7 @@ ; CHECK-NEXT: vmv2r.v v16, v8 ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -2964,7 +2964,7 @@ ; CHECK-NEXT: vmv2r.v v16, v8 ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -2982,7 +2982,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -2997,7 +2997,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3015,7 +3015,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -3030,7 +3030,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3048,7 +3048,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -3063,7 +3063,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -3081,7 +3081,7 @@ ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -3096,7 +3096,7 @@ ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3112,7 +3112,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -3125,7 +3125,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3142,7 +3142,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -3156,7 +3156,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3172,7 +3172,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -3185,7 +3185,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3202,7 +3202,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -3216,7 +3216,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3233,7 +3233,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -3247,7 +3247,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3264,7 +3264,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3278,7 +3278,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3295,7 +3295,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -3309,7 +3309,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3326,7 +3326,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3340,7 +3340,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3358,7 +3358,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -3373,7 +3373,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3391,7 +3391,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3406,7 +3406,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3424,7 +3424,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -3439,7 +3439,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3457,7 +3457,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3472,7 +3472,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3491,7 +3491,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -3507,7 +3507,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3526,7 +3526,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3542,7 +3542,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3561,7 +3561,7 @@ ; CHECK-NEXT: vmv1r.v v18, v16 ; CHECK-NEXT: vmv1r.v v19, v16 ; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -3577,7 +3577,7 @@ ; CHECK-NEXT: vmv1r.v v18, v16 ; CHECK-NEXT: vmv1r.v v19, v16 ; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3596,7 +3596,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3612,7 +3612,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3632,7 +3632,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -3649,7 +3649,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3669,7 +3669,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3686,7 +3686,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3706,7 +3706,7 @@ ; CHECK-NEXT: vmv1r.v v19, v16 ; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -3723,7 +3723,7 @@ ; CHECK-NEXT: vmv1r.v v19, v16 ; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3743,7 +3743,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3760,7 +3760,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3781,7 +3781,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -3799,7 +3799,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3820,7 +3820,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3838,7 +3838,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3859,7 +3859,7 @@ ; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -3877,7 +3877,7 @@ ; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3898,7 +3898,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3916,7 +3916,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3938,7 +3938,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -3957,7 +3957,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3979,7 +3979,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3998,7 +3998,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4020,7 +4020,7 @@ ; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -4039,7 +4039,7 @@ ; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -4061,7 +4061,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4080,7 +4080,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4097,7 +4097,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -4111,7 +4111,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4128,7 +4128,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -4142,7 +4142,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4159,7 +4159,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -4173,7 +4173,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4190,7 +4190,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -4204,7 +4204,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4221,7 +4221,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4235,7 +4235,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4252,7 +4252,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4266,7 +4266,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4283,7 +4283,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4297,7 +4297,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4314,7 +4314,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4328,7 +4328,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4346,7 +4346,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4361,7 +4361,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4379,7 +4379,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4394,7 +4394,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4412,7 +4412,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4427,7 +4427,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4445,7 +4445,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4460,7 +4460,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4479,7 +4479,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4495,7 +4495,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4514,7 +4514,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4530,7 +4530,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4549,7 +4549,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4565,7 +4565,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4584,7 +4584,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4600,7 +4600,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4620,7 +4620,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4637,7 +4637,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4657,7 +4657,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4674,7 +4674,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4694,7 +4694,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4711,7 +4711,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4731,7 +4731,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4748,7 +4748,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4769,7 +4769,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4787,7 +4787,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4808,7 +4808,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4826,7 +4826,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4847,7 +4847,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4865,7 +4865,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4886,7 +4886,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4904,7 +4904,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4926,7 +4926,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4945,7 +4945,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4967,7 +4967,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4986,7 +4986,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5008,7 +5008,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5027,7 +5027,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5049,7 +5049,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5068,7 +5068,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5085,7 +5085,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -5099,7 +5099,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -5116,7 +5116,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -5130,7 +5130,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -5147,7 +5147,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -5161,7 +5161,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -5177,7 +5177,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -5190,7 +5190,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -5207,7 +5207,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5221,7 +5221,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5238,7 +5238,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5252,7 +5252,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5269,7 +5269,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5283,7 +5283,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5300,7 +5300,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -5314,7 +5314,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -5332,7 +5332,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5347,7 +5347,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5365,7 +5365,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5380,7 +5380,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5398,7 +5398,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5413,7 +5413,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5431,7 +5431,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -5446,7 +5446,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -5465,7 +5465,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5481,7 +5481,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5500,7 +5500,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5516,7 +5516,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5535,7 +5535,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5551,7 +5551,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5570,7 +5570,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -5586,7 +5586,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -5606,7 +5606,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5623,7 +5623,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5643,7 +5643,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5660,7 +5660,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5680,7 +5680,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5697,7 +5697,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5717,7 +5717,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -5734,7 +5734,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -5755,7 +5755,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5773,7 +5773,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5794,7 +5794,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5812,7 +5812,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5833,7 +5833,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5851,7 +5851,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5872,7 +5872,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -5890,7 +5890,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -5912,7 +5912,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5931,7 +5931,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5953,7 +5953,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5972,7 +5972,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5994,7 +5994,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6013,7 +6013,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6035,7 +6035,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -6054,7 +6054,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -6070,7 +6070,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -6083,7 +6083,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -6100,7 +6100,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -6114,7 +6114,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -6130,7 +6130,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -6143,7 +6143,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -6159,7 +6159,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -6172,7 +6172,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -6189,7 +6189,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -6203,7 +6203,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -6220,7 +6220,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6234,7 +6234,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6251,7 +6251,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -6265,7 +6265,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -6282,7 +6282,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -6296,7 +6296,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -6314,7 +6314,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -6329,7 +6329,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -6347,7 +6347,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6362,7 +6362,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6380,7 +6380,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -6395,7 +6395,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -6413,7 +6413,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -6428,7 +6428,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -6447,7 +6447,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -6463,7 +6463,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -6482,7 +6482,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6498,7 +6498,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6517,7 +6517,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -6533,7 +6533,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -6552,7 +6552,7 @@ ; CHECK-NEXT: vmv1r.v v18, v16 ; CHECK-NEXT: vmv1r.v v19, v16 ; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -6568,7 +6568,7 @@ ; CHECK-NEXT: vmv1r.v v18, v16 ; CHECK-NEXT: vmv1r.v v19, v16 ; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -6588,7 +6588,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -6605,7 +6605,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -6625,7 +6625,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6642,7 +6642,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6662,7 +6662,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -6679,7 +6679,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -6699,7 +6699,7 @@ ; CHECK-NEXT: vmv1r.v v19, v16 ; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -6716,7 +6716,7 @@ ; CHECK-NEXT: vmv1r.v v19, v16 ; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -6737,7 +6737,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -6755,7 +6755,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -6776,7 +6776,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6794,7 +6794,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6815,7 +6815,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -6833,7 +6833,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -6854,7 +6854,7 @@ ; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -6872,7 +6872,7 @@ ; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -6894,7 +6894,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -6913,7 +6913,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -6935,7 +6935,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6954,7 +6954,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6976,7 +6976,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -6995,7 +6995,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -7017,7 +7017,7 @@ ; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -7036,7 +7036,7 @@ ; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -7053,7 +7053,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -7067,7 +7067,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -7084,7 +7084,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -7098,7 +7098,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -7115,7 +7115,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -7129,7 +7129,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -7146,7 +7146,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -7160,7 +7160,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -7176,7 +7176,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -7189,7 +7189,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -7206,7 +7206,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -7220,7 +7220,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -7236,7 +7236,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -7249,7 +7249,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -7266,7 +7266,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -7280,7 +7280,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -7297,7 +7297,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -7311,7 +7311,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -7328,7 +7328,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -7342,7 +7342,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -7359,7 +7359,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -7373,7 +7373,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -7390,7 +7390,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -7404,7 +7404,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -7422,7 +7422,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -7437,7 +7437,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -7455,7 +7455,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -7470,7 +7470,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -7488,7 +7488,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -7503,7 +7503,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -7521,7 +7521,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -7536,7 +7536,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -7555,7 +7555,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -7571,7 +7571,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -7590,7 +7590,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -7606,7 +7606,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -7625,7 +7625,7 @@ ; CHECK-NEXT: vmv1r.v v18, v16 ; CHECK-NEXT: vmv1r.v v19, v16 ; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -7641,7 +7641,7 @@ ; CHECK-NEXT: vmv1r.v v18, v16 ; CHECK-NEXT: vmv1r.v v19, v16 ; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -7660,7 +7660,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -7676,7 +7676,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -7696,7 +7696,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -7713,7 +7713,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -7733,7 +7733,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -7750,7 +7750,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -7770,7 +7770,7 @@ ; CHECK-NEXT: vmv1r.v v19, v16 ; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -7787,7 +7787,7 @@ ; CHECK-NEXT: vmv1r.v v19, v16 ; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -7807,7 +7807,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -7824,7 +7824,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -7845,7 +7845,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -7863,7 +7863,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -7884,7 +7884,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -7902,7 +7902,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -7923,7 +7923,7 @@ ; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -7941,7 +7941,7 @@ ; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -7962,7 +7962,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -7980,7 +7980,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8002,7 +8002,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -8021,7 +8021,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -8043,7 +8043,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8062,7 +8062,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8084,7 +8084,7 @@ ; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -8103,7 +8103,7 @@ ; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -8125,7 +8125,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8144,7 +8144,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8161,7 +8161,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -8175,7 +8175,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -8192,7 +8192,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -8206,7 +8206,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -8223,7 +8223,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -8237,7 +8237,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -8254,7 +8254,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -8268,7 +8268,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -8285,7 +8285,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8299,7 +8299,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8316,7 +8316,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8330,7 +8330,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8347,7 +8347,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8361,7 +8361,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8378,7 +8378,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8392,7 +8392,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8410,7 +8410,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8425,7 +8425,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8443,7 +8443,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8458,7 +8458,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8476,7 +8476,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8491,7 +8491,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8509,7 +8509,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8524,7 +8524,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8543,7 +8543,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8559,7 +8559,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8578,7 +8578,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8594,7 +8594,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8613,7 +8613,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8629,7 +8629,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8648,7 +8648,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8664,7 +8664,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8684,7 +8684,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8701,7 +8701,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8721,7 +8721,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8738,7 +8738,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8758,7 +8758,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8775,7 +8775,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8795,7 +8795,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8812,7 +8812,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8833,7 +8833,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8851,7 +8851,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8872,7 +8872,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8890,7 +8890,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8911,7 +8911,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8929,7 +8929,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8950,7 +8950,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8968,7 +8968,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8990,7 +8990,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9009,7 +9009,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9031,7 +9031,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9050,7 +9050,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9072,7 +9072,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9091,7 +9091,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9113,7 +9113,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9132,7 +9132,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9149,7 +9149,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -9163,7 +9163,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -9180,7 +9180,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -9194,7 +9194,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -9211,7 +9211,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -9225,7 +9225,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -9241,7 +9241,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -9254,7 +9254,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -9271,7 +9271,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9285,7 +9285,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9302,7 +9302,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9316,7 +9316,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9333,7 +9333,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9347,7 +9347,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9364,7 +9364,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -9378,7 +9378,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -9396,7 +9396,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9411,7 +9411,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9429,7 +9429,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9444,7 +9444,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9462,7 +9462,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9477,7 +9477,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9495,7 +9495,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -9510,7 +9510,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -9529,7 +9529,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9545,7 +9545,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9564,7 +9564,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9580,7 +9580,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9599,7 +9599,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9615,7 +9615,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9634,7 +9634,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -9650,7 +9650,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -9670,7 +9670,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9687,7 +9687,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9707,7 +9707,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9724,7 +9724,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9744,7 +9744,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9761,7 +9761,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9781,7 +9781,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -9798,7 +9798,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -9819,7 +9819,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9837,7 +9837,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9858,7 +9858,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9876,7 +9876,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9897,7 +9897,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9915,7 +9915,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9936,7 +9936,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -9954,7 +9954,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -9976,7 +9976,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9995,7 +9995,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10017,7 +10017,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10036,7 +10036,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10058,7 +10058,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10077,7 +10077,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10099,7 +10099,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -10118,7 +10118,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -10135,7 +10135,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -10149,7 +10149,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -10166,7 +10166,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -10180,7 +10180,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -10196,7 +10196,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -10209,7 +10209,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -10226,7 +10226,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -10240,7 +10240,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -10256,7 +10256,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -10269,7 +10269,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -10286,7 +10286,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -10300,7 +10300,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -10317,7 +10317,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -10331,7 +10331,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -10348,7 +10348,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -10362,7 +10362,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -10379,7 +10379,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -10393,7 +10393,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -10409,7 +10409,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -10422,7 +10422,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -10439,7 +10439,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10453,7 +10453,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10470,7 +10470,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10484,7 +10484,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10501,7 +10501,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10515,7 +10515,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10532,7 +10532,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -10546,7 +10546,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -10564,7 +10564,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10579,7 +10579,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10597,7 +10597,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10612,7 +10612,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10630,7 +10630,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10645,7 +10645,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10663,7 +10663,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -10678,7 +10678,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -10697,7 +10697,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10713,7 +10713,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10732,7 +10732,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10748,7 +10748,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10767,7 +10767,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10783,7 +10783,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10802,7 +10802,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -10818,7 +10818,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -10838,7 +10838,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10855,7 +10855,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10875,7 +10875,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10892,7 +10892,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10912,7 +10912,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10929,7 +10929,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10949,7 +10949,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -10966,7 +10966,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -10987,7 +10987,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -11005,7 +11005,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -11026,7 +11026,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -11044,7 +11044,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -11065,7 +11065,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -11083,7 +11083,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -11104,7 +11104,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11122,7 +11122,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11144,7 +11144,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -11163,7 +11163,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -11185,7 +11185,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -11204,7 +11204,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -11226,7 +11226,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -11245,7 +11245,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -11267,7 +11267,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11286,7 +11286,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11303,7 +11303,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -11317,7 +11317,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -11334,7 +11334,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -11348,7 +11348,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -11365,7 +11365,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -11379,7 +11379,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -11396,7 +11396,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -11410,7 +11410,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -11427,7 +11427,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11441,7 +11441,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11458,7 +11458,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11472,7 +11472,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11489,7 +11489,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11503,7 +11503,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11520,7 +11520,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11534,7 +11534,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11552,7 +11552,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11567,7 +11567,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11585,7 +11585,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11600,7 +11600,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11618,7 +11618,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11633,7 +11633,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11651,7 +11651,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11666,7 +11666,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11683,7 +11683,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -11697,7 +11697,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -11714,7 +11714,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -11728,7 +11728,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -11744,7 +11744,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -11757,7 +11757,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -11774,7 +11774,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -11788,7 +11788,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -11805,7 +11805,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -11819,7 +11819,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -11836,7 +11836,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -11850,7 +11850,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -11867,7 +11867,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -11881,7 +11881,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -11898,7 +11898,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11912,7 +11912,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11929,7 +11929,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11943,7 +11943,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11960,7 +11960,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11974,7 +11974,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11991,7 +11991,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -12005,7 +12005,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -12022,7 +12022,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12036,7 +12036,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12053,7 +12053,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12067,7 +12067,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12084,7 +12084,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12098,7 +12098,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12115,7 +12115,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12129,7 +12129,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12147,7 +12147,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12162,7 +12162,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12180,7 +12180,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12195,7 +12195,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12213,7 +12213,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12228,7 +12228,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12246,7 +12246,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12261,7 +12261,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12280,7 +12280,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12296,7 +12296,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12315,7 +12315,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12331,7 +12331,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12350,7 +12350,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12366,7 +12366,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12385,7 +12385,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12401,7 +12401,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12421,7 +12421,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12438,7 +12438,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12458,7 +12458,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12475,7 +12475,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12495,7 +12495,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12512,7 +12512,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12532,7 +12532,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12549,7 +12549,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12570,7 +12570,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12588,7 +12588,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12609,7 +12609,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12627,7 +12627,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12648,7 +12648,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12666,7 +12666,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12687,7 +12687,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12705,7 +12705,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12727,7 +12727,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12746,7 +12746,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12768,7 +12768,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12787,7 +12787,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12809,7 +12809,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12828,7 +12828,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12850,7 +12850,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12869,7 +12869,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12886,7 +12886,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -12900,7 +12900,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -12917,7 +12917,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -12931,7 +12931,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -12948,7 +12948,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -12962,7 +12962,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -12978,7 +12978,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -12991,7 +12991,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -13008,7 +13008,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -13022,7 +13022,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -13039,7 +13039,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -13053,7 +13053,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -13070,7 +13070,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -13084,7 +13084,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -13101,7 +13101,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -13115,7 +13115,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -13133,7 +13133,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -13148,7 +13148,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -13166,7 +13166,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -13181,7 +13181,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -13199,7 +13199,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -13214,7 +13214,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -13232,7 +13232,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -13247,7 +13247,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -13266,7 +13266,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -13282,7 +13282,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -13301,7 +13301,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -13317,7 +13317,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -13336,7 +13336,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -13352,7 +13352,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -13371,7 +13371,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -13387,7 +13387,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -13407,7 +13407,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -13424,7 +13424,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -13444,7 +13444,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -13461,7 +13461,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -13481,7 +13481,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -13498,7 +13498,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -13518,7 +13518,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -13535,7 +13535,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -13556,7 +13556,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -13574,7 +13574,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -13595,7 +13595,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -13613,7 +13613,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -13634,7 +13634,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -13652,7 +13652,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -13673,7 +13673,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -13691,7 +13691,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -13713,7 +13713,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -13732,7 +13732,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -13754,7 +13754,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -13773,7 +13773,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -13795,7 +13795,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -13814,7 +13814,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -13836,7 +13836,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -13855,7 +13855,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -13872,7 +13872,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -13886,7 +13886,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -13903,7 +13903,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -13917,7 +13917,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -13934,7 +13934,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -13948,7 +13948,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -13965,7 +13965,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -13979,7 +13979,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -13996,7 +13996,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -14010,7 +14010,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -14027,7 +14027,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -14041,7 +14041,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -14058,7 +14058,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -14072,7 +14072,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -14089,7 +14089,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -14103,7 +14103,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -14121,7 +14121,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -14136,7 +14136,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -14154,7 +14154,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -14169,7 +14169,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -14187,7 +14187,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -14202,7 +14202,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -14220,7 +14220,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -14235,7 +14235,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -14254,7 +14254,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -14270,7 +14270,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -14289,7 +14289,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -14305,7 +14305,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -14324,7 +14324,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -14340,7 +14340,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -14359,7 +14359,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -14375,7 +14375,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -14395,7 +14395,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -14412,7 +14412,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -14432,7 +14432,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -14449,7 +14449,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -14469,7 +14469,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -14486,7 +14486,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -14506,7 +14506,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -14523,7 +14523,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -14544,7 +14544,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -14562,7 +14562,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -14583,7 +14583,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -14601,7 +14601,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -14622,7 +14622,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -14640,7 +14640,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -14661,7 +14661,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -14679,7 +14679,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -14701,7 +14701,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -14720,7 +14720,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -14742,7 +14742,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -14761,7 +14761,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -14783,7 +14783,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -14802,7 +14802,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -14824,7 +14824,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -14843,7 +14843,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -14860,7 +14860,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -14874,7 +14874,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -14891,7 +14891,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -14905,7 +14905,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -14922,7 +14922,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -14936,7 +14936,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -14953,7 +14953,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -14967,7 +14967,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -14984,7 +14984,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -14998,7 +14998,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -15015,7 +15015,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -15029,7 +15029,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -15046,7 +15046,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -15060,7 +15060,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -15077,7 +15077,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -15091,7 +15091,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -15109,7 +15109,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -15124,7 +15124,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -15142,7 +15142,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -15157,7 +15157,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -15175,7 +15175,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -15190,7 +15190,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -15208,7 +15208,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -15223,7 +15223,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -15242,7 +15242,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -15258,7 +15258,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -15277,7 +15277,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -15293,7 +15293,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -15312,7 +15312,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -15328,7 +15328,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -15347,7 +15347,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -15363,7 +15363,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -15383,7 +15383,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -15400,7 +15400,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -15420,7 +15420,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -15437,7 +15437,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -15457,7 +15457,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -15474,7 +15474,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -15494,7 +15494,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -15511,7 +15511,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -15532,7 +15532,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -15550,7 +15550,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -15571,7 +15571,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -15589,7 +15589,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -15610,7 +15610,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -15628,7 +15628,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -15649,7 +15649,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -15667,7 +15667,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -15689,7 +15689,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -15708,7 +15708,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -15730,7 +15730,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -15749,7 +15749,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -15771,7 +15771,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -15790,7 +15790,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -15812,7 +15812,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -15831,7 +15831,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -15848,7 +15848,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -15862,7 +15862,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -15879,7 +15879,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -15893,7 +15893,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -15909,7 +15909,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -15922,7 +15922,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -15938,7 +15938,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -15951,7 +15951,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -15968,7 +15968,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -15982,7 +15982,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -15999,7 +15999,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -16013,7 +16013,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -16030,7 +16030,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -16044,7 +16044,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -16061,7 +16061,7 @@ ; CHECK-NEXT: vmv2r.v v16, v8 ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -16075,7 +16075,7 @@ ; CHECK-NEXT: vmv2r.v v16, v8 ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -16093,7 +16093,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -16108,7 +16108,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -16126,7 +16126,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -16141,7 +16141,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -16159,7 +16159,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -16174,7 +16174,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -16192,7 +16192,7 @@ ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -16207,7 +16207,7 @@ ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -16224,7 +16224,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -16238,7 +16238,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -16255,7 +16255,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -16269,7 +16269,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -16285,7 +16285,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -16298,7 +16298,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -16315,7 +16315,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -16329,7 +16329,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -16346,7 +16346,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -16360,7 +16360,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -16377,7 +16377,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -16391,7 +16391,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -16408,7 +16408,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -16422,7 +16422,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -16439,7 +16439,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -16453,7 +16453,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -16470,7 +16470,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -16484,7 +16484,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -16501,7 +16501,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -16515,7 +16515,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -16532,7 +16532,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -16546,7 +16546,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -16563,7 +16563,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -16577,7 +16577,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -16595,7 +16595,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -16610,7 +16610,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -16628,7 +16628,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -16643,7 +16643,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -16661,7 +16661,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -16676,7 +16676,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -16694,7 +16694,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -16709,7 +16709,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -16725,7 +16725,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -16738,7 +16738,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -16755,7 +16755,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -16769,7 +16769,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -16785,7 +16785,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -16798,7 +16798,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -16815,7 +16815,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -16829,7 +16829,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -16846,7 +16846,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -16860,7 +16860,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -16877,7 +16877,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -16891,7 +16891,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -16908,7 +16908,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -16922,7 +16922,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -16939,7 +16939,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -16953,7 +16953,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -16971,7 +16971,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -16986,7 +16986,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -17004,7 +17004,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -17019,7 +17019,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -17037,7 +17037,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -17052,7 +17052,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -17070,7 +17070,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -17085,7 +17085,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -17104,7 +17104,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -17120,7 +17120,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -17139,7 +17139,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -17155,7 +17155,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -17174,7 +17174,7 @@ ; CHECK-NEXT: vmv1r.v v18, v16 ; CHECK-NEXT: vmv1r.v v19, v16 ; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -17190,7 +17190,7 @@ ; CHECK-NEXT: vmv1r.v v18, v16 ; CHECK-NEXT: vmv1r.v v19, v16 ; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -17209,7 +17209,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -17225,7 +17225,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -17245,7 +17245,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -17262,7 +17262,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -17282,7 +17282,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -17299,7 +17299,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -17319,7 +17319,7 @@ ; CHECK-NEXT: vmv1r.v v19, v16 ; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -17336,7 +17336,7 @@ ; CHECK-NEXT: vmv1r.v v19, v16 ; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -17356,7 +17356,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -17373,7 +17373,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -17394,7 +17394,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -17412,7 +17412,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -17433,7 +17433,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -17451,7 +17451,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -17472,7 +17472,7 @@ ; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -17490,7 +17490,7 @@ ; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -17511,7 +17511,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -17529,7 +17529,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -17551,7 +17551,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -17570,7 +17570,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -17592,7 +17592,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -17611,7 +17611,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -17633,7 +17633,7 @@ ; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -17652,7 +17652,7 @@ ; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -17674,7 +17674,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -17693,7 +17693,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -17710,7 +17710,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -17724,7 +17724,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -17741,7 +17741,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -17755,7 +17755,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -17772,7 +17772,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -17786,7 +17786,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -17802,7 +17802,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -17815,7 +17815,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -17832,7 +17832,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -17846,7 +17846,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -17863,7 +17863,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -17877,7 +17877,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -17894,7 +17894,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -17908,7 +17908,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -17925,7 +17925,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -17939,7 +17939,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -17957,7 +17957,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -17972,7 +17972,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -17990,7 +17990,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -18005,7 +18005,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -18023,7 +18023,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -18038,7 +18038,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -18056,7 +18056,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -18071,7 +18071,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -18090,7 +18090,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -18106,7 +18106,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -18125,7 +18125,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -18141,7 +18141,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -18160,7 +18160,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -18176,7 +18176,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -18195,7 +18195,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -18211,7 +18211,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -18231,7 +18231,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -18248,7 +18248,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -18268,7 +18268,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -18285,7 +18285,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -18305,7 +18305,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -18322,7 +18322,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -18342,7 +18342,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -18359,7 +18359,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -18380,7 +18380,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -18398,7 +18398,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -18419,7 +18419,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -18437,7 +18437,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -18458,7 +18458,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -18476,7 +18476,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -18497,7 +18497,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -18515,7 +18515,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -18537,7 +18537,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -18556,7 +18556,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -18578,7 +18578,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -18597,7 +18597,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -18619,7 +18619,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -18638,7 +18638,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -18660,7 +18660,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -18679,7 +18679,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -18696,7 +18696,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -18710,7 +18710,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -18727,7 +18727,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -18741,7 +18741,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -18757,7 +18757,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -18770,7 +18770,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -18787,7 +18787,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -18801,7 +18801,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -18818,7 +18818,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -18832,7 +18832,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -18849,7 +18849,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -18863,7 +18863,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -18880,7 +18880,7 @@ ; CHECK-NEXT: vmv2r.v v16, v8 ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -18894,7 +18894,7 @@ ; CHECK-NEXT: vmv2r.v v16, v8 ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -18911,7 +18911,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -18925,7 +18925,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -18943,7 +18943,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -18958,7 +18958,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -18976,7 +18976,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -18991,7 +18991,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -19009,7 +19009,7 @@ ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -19024,7 +19024,7 @@ ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -19042,7 +19042,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -19057,7 +19057,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsplats-fp.ll b/llvm/test/CodeGen/RISCV/rvv/vsplats-fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsplats-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsplats-fp.ll @@ -7,7 +7,7 @@ define @vsplat_nxv8f16(half %f) { ; CHECK-LABEL: vsplat_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfmv.v.f v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %f, i32 0 @@ -18,7 +18,7 @@ define @vsplat_zero_nxv8f16() { ; CHECK-LABEL: vsplat_zero_nxv8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: ret %head = insertelement poison, half zeroinitializer, i32 0 @@ -29,7 +29,7 @@ define @vsplat_nxv8f32(float %f) { ; CHECK-LABEL: vsplat_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfmv.v.f v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %f, i32 0 @@ -40,7 +40,7 @@ define @vsplat_zero_nxv8f32() { ; CHECK-LABEL: vsplat_zero_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: ret %head = insertelement poison, float zeroinitializer, i32 0 @@ -51,7 +51,7 @@ define @vsplat_nxv8f64(double %f) { ; CHECK-LABEL: vsplat_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfmv.v.f v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %f, i32 0 @@ -62,7 +62,7 @@ define @vsplat_zero_nxv8f64() { ; CHECK-LABEL: vsplat_zero_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: ret %head = insertelement poison, double zeroinitializer, i32 0 @@ -74,7 +74,7 @@ define @vsplat_load_nxv8f32(float* %ptr) { ; CHECK-LABEL: vsplat_load_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vlse32.v v8, (a0), zero ; CHECK-NEXT: ret %f = load float, float* %ptr diff --git a/llvm/test/CodeGen/RISCV/rvv/vsplats-i1.ll b/llvm/test/CodeGen/RISCV/rvv/vsplats-i1.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsplats-i1.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsplats-i1.ll @@ -5,7 +5,7 @@ define @vsplat_nxv1i1_0() { ; CHECK-LABEL: vsplat_nxv1i1_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmclr.m v0 ; CHECK-NEXT: ret %head = insertelement poison, i1 0, i32 0 @@ -16,7 +16,7 @@ define @vsplat_nxv1i1_1() { ; CHECK-LABEL: vsplat_nxv1i1_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: ret %head = insertelement poison, i1 -1, i32 0 @@ -28,7 +28,7 @@ ; CHECK-LABEL: vsplat_nxv1i1_2: ; CHECK: # %bb.0: ; CHECK-NEXT: andi a0, a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -42,7 +42,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: xor a0, a0, a1 ; CHECK-NEXT: snez a0, a0 -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -55,7 +55,7 @@ define @vsplat_nxv2i1_0() { ; CHECK-LABEL: vsplat_nxv2i1_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmclr.m v0 ; CHECK-NEXT: ret %head = insertelement poison, i1 0, i32 0 @@ -66,7 +66,7 @@ define @vsplat_nxv2i1_1() { ; CHECK-LABEL: vsplat_nxv2i1_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: ret %head = insertelement poison, i1 -1, i32 0 @@ -78,7 +78,7 @@ ; CHECK-LABEL: vsplat_nxv2i1_2: ; CHECK: # %bb.0: ; CHECK-NEXT: andi a0, a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -90,7 +90,7 @@ define @vsplat_nxv4i1_0() { ; CHECK-LABEL: vsplat_nxv4i1_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmclr.m v0 ; CHECK-NEXT: ret %head = insertelement poison, i1 0, i32 0 @@ -101,7 +101,7 @@ define @vsplat_nxv4i1_1() { ; CHECK-LABEL: vsplat_nxv4i1_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: ret %head = insertelement poison, i1 -1, i32 0 @@ -113,7 +113,7 @@ ; CHECK-LABEL: vsplat_nxv4i1_2: ; CHECK: # %bb.0: ; CHECK-NEXT: andi a0, a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -125,7 +125,7 @@ define @vsplat_nxv8i1_0() { ; CHECK-LABEL: vsplat_nxv8i1_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmclr.m v0 ; CHECK-NEXT: ret %head = insertelement poison, i1 0, i32 0 @@ -136,7 +136,7 @@ define @vsplat_nxv8i1_1() { ; CHECK-LABEL: vsplat_nxv8i1_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: ret %head = insertelement poison, i1 -1, i32 0 @@ -148,7 +148,7 @@ ; CHECK-LABEL: vsplat_nxv8i1_2: ; CHECK: # %bb.0: ; CHECK-NEXT: andi a0, a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -160,7 +160,7 @@ define @vsplat_nxv16i1_0() { ; CHECK-LABEL: vsplat_nxv16i1_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmclr.m v0 ; CHECK-NEXT: ret %head = insertelement poison, i1 0, i32 0 @@ -171,7 +171,7 @@ define @vsplat_nxv16i1_1() { ; CHECK-LABEL: vsplat_nxv16i1_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: ret %head = insertelement poison, i1 -1, i32 0 @@ -183,7 +183,7 @@ ; CHECK-LABEL: vsplat_nxv16i1_2: ; CHECK: # %bb.0: ; CHECK-NEXT: andi a0, a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -195,14 +195,14 @@ define @splat_idx_nxv4i32( %v, i64 %idx) { ; CHECK-LABEL: splat_idx_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 -; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: andi a0, a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vsplats-i64.ll b/llvm/test/CodeGen/RISCV/rvv/vsplats-i64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsplats-i64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsplats-i64.ll @@ -7,7 +7,7 @@ define @vsplat_nxv8i64_1() { ; CHECK-LABEL: vsplat_nxv8i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmv.v.i v8, -1 ; CHECK-NEXT: ret %head = insertelement poison, i64 -1, i32 0 @@ -18,7 +18,7 @@ define @vsplat_nxv8i64_2() { ; CHECK-LABEL: vsplat_nxv8i64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 4 ; CHECK-NEXT: ret %head = insertelement poison, i64 4, i32 0 @@ -30,7 +30,7 @@ ; CHECK-LABEL: vsplat_nxv8i64_3: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 255 -; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; CHECK-NEXT: vmv.v.x v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 255, i32 0 @@ -48,7 +48,7 @@ ; RV32V-NEXT: addi a0, a0, -1281 ; RV32V-NEXT: sw a0, 8(sp) ; RV32V-NEXT: addi a0, sp, 8 -; RV32V-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32V-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32V-NEXT: vlse64.v v8, (a0), zero ; RV32V-NEXT: addi sp, sp, 16 ; RV32V-NEXT: ret @@ -58,7 +58,7 @@ ; RV64V-NEXT: li a0, 251 ; RV64V-NEXT: slli a0, a0, 24 ; RV64V-NEXT: addi a0, a0, -1281 -; RV64V-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64V-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64V-NEXT: vmv.v.x v8, a0 ; RV64V-NEXT: ret %head = insertelement poison, i64 4211079935, i32 0 @@ -74,14 +74,14 @@ ; RV32V-NEXT: sw a1, 12(sp) ; RV32V-NEXT: sw a0, 8(sp) ; RV32V-NEXT: addi a0, sp, 8 -; RV32V-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32V-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32V-NEXT: vlse64.v v8, (a0), zero ; RV32V-NEXT: addi sp, sp, 16 ; RV32V-NEXT: ret ; ; RV64V-LABEL: vsplat_nxv8i64_5: ; RV64V: # %bb.0: -; RV64V-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64V-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64V-NEXT: vmv.v.x v8, a0 ; RV64V-NEXT: ret %head = insertelement poison, i64 %a, i32 0 @@ -92,7 +92,7 @@ define @vadd_vx_nxv8i64_6( %v) { ; CHECK-LABEL: vadd_vx_nxv8i64_6: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 2 ; CHECK-NEXT: ret %head = insertelement poison, i64 2, i32 0 @@ -104,7 +104,7 @@ define @vadd_vx_nxv8i64_7( %v) { ; CHECK-LABEL: vadd_vx_nxv8i64_7: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -1 ; CHECK-NEXT: ret %head = insertelement poison, i64 -1, i32 0 @@ -117,7 +117,7 @@ ; CHECK-LABEL: vadd_vx_nxv8i64_8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 255 -; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 255, i32 0 @@ -131,7 +131,7 @@ ; RV32V: # %bb.0: ; RV32V-NEXT: lui a0, 503808 ; RV32V-NEXT: addi a0, a0, -1281 -; RV32V-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32V-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32V-NEXT: vadd.vx v8, v8, a0 ; RV32V-NEXT: ret ; @@ -139,7 +139,7 @@ ; RV64V: # %bb.0: ; RV64V-NEXT: lui a0, 503808 ; RV64V-NEXT: addiw a0, a0, -1281 -; RV64V-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64V-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64V-NEXT: vadd.vx v8, v8, a0 ; RV64V-NEXT: ret %head = insertelement poison, i64 2063596287, i32 0 @@ -158,7 +158,7 @@ ; RV32V-NEXT: addi a0, a0, -1281 ; RV32V-NEXT: sw a0, 8(sp) ; RV32V-NEXT: addi a0, sp, 8 -; RV32V-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32V-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32V-NEXT: vlse64.v v16, (a0), zero ; RV32V-NEXT: vadd.vv v8, v8, v16 ; RV32V-NEXT: addi sp, sp, 16 @@ -169,7 +169,7 @@ ; RV64V-NEXT: li a0, 251 ; RV64V-NEXT: slli a0, a0, 24 ; RV64V-NEXT: addi a0, a0, -1281 -; RV64V-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64V-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64V-NEXT: vadd.vx v8, v8, a0 ; RV64V-NEXT: ret %head = insertelement poison, i64 4211079935, i32 0 @@ -189,7 +189,7 @@ ; RV32V-NEXT: addi a0, a0, -1281 ; RV32V-NEXT: sw a0, 8(sp) ; RV32V-NEXT: addi a0, sp, 8 -; RV32V-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32V-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32V-NEXT: vlse64.v v16, (a0), zero ; RV32V-NEXT: vadd.vv v8, v8, v16 ; RV32V-NEXT: addi sp, sp, 16 @@ -200,7 +200,7 @@ ; RV64V-NEXT: li a0, 507 ; RV64V-NEXT: slli a0, a0, 24 ; RV64V-NEXT: addi a0, a0, -1281 -; RV64V-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64V-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64V-NEXT: vadd.vx v8, v8, a0 ; RV64V-NEXT: ret %head = insertelement poison, i64 8506047231, i32 0 @@ -217,7 +217,7 @@ ; RV32V-NEXT: sw a1, 12(sp) ; RV32V-NEXT: sw a0, 8(sp) ; RV32V-NEXT: addi a0, sp, 8 -; RV32V-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32V-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32V-NEXT: vlse64.v v16, (a0), zero ; RV32V-NEXT: vadd.vv v8, v8, v16 ; RV32V-NEXT: addi sp, sp, 16 @@ -225,7 +225,7 @@ ; ; RV64V-LABEL: vadd_vx_nxv8i64_12: ; RV64V: # %bb.0: -; RV64V-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64V-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64V-NEXT: vadd.vx v8, v8, a0 ; RV64V-NEXT: ret %head = insertelement poison, i64 %a, i32 0 @@ -237,14 +237,14 @@ define @vsplat_nxv8i64_13(i32 %a) { ; RV32V-LABEL: vsplat_nxv8i64_13: ; RV32V: # %bb.0: -; RV32V-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32V-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32V-NEXT: vmv.v.x v8, a0 ; RV32V-NEXT: ret ; ; RV64V-LABEL: vsplat_nxv8i64_13: ; RV64V: # %bb.0: ; RV64V-NEXT: sext.w a0, a0 -; RV64V-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64V-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64V-NEXT: vmv.v.x v8, a0 ; RV64V-NEXT: ret %b = sext i32 %a to i64 @@ -261,7 +261,7 @@ ; RV32V-NEXT: sw zero, 12(sp) ; RV32V-NEXT: sw a0, 8(sp) ; RV32V-NEXT: addi a0, sp, 8 -; RV32V-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32V-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32V-NEXT: vlse64.v v8, (a0), zero ; RV32V-NEXT: addi sp, sp, 16 ; RV32V-NEXT: ret @@ -270,7 +270,7 @@ ; RV64V: # %bb.0: ; RV64V-NEXT: slli a0, a0, 32 ; RV64V-NEXT: srli a0, a0, 32 -; RV64V-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64V-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64V-NEXT: vmv.v.x v8, a0 ; RV64V-NEXT: ret %b = zext i32 %a to i64 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vsra_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vsra_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vsra_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vsra_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vsra_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -292,7 +292,7 @@ define @intrinsic_vsra_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vsra_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vsra_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vsra_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vsra_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vsra_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vsra_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vsra_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vsra_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -717,7 +717,7 @@ define @intrinsic_vsra_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -764,7 +764,7 @@ define @intrinsic_vsra_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -811,7 +811,7 @@ define @intrinsic_vsra_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vsra_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vsra_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vsra_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vsra_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1048,7 +1048,7 @@ define @intrinsic_vsra_vx_nxv1i8_nxv1i8( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1095,7 +1095,7 @@ define @intrinsic_vsra_vx_nxv2i8_nxv2i8( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1142,7 +1142,7 @@ define @intrinsic_vsra_vx_nxv4i8_nxv4i8( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1189,7 +1189,7 @@ define @intrinsic_vsra_vx_nxv8i8_nxv8i8( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1236,7 +1236,7 @@ define @intrinsic_vsra_vx_nxv16i8_nxv16i8( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1283,7 +1283,7 @@ define @intrinsic_vsra_vx_nxv32i8_nxv32i8( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1330,7 +1330,7 @@ define @intrinsic_vsra_vx_nxv64i8_nxv64i8( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1377,7 +1377,7 @@ define @intrinsic_vsra_vx_nxv1i16_nxv1i16( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1424,7 +1424,7 @@ define @intrinsic_vsra_vx_nxv2i16_nxv2i16( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1471,7 +1471,7 @@ define @intrinsic_vsra_vx_nxv4i16_nxv4i16( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1518,7 +1518,7 @@ define @intrinsic_vsra_vx_nxv8i16_nxv8i16( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1565,7 +1565,7 @@ define @intrinsic_vsra_vx_nxv16i16_nxv16i16( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1612,7 +1612,7 @@ define @intrinsic_vsra_vx_nxv32i16_nxv32i16( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1659,7 +1659,7 @@ define @intrinsic_vsra_vx_nxv1i32_nxv1i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1706,7 +1706,7 @@ define @intrinsic_vsra_vx_nxv2i32_nxv2i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1753,7 +1753,7 @@ define @intrinsic_vsra_vx_nxv4i32_nxv4i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1800,7 +1800,7 @@ define @intrinsic_vsra_vx_nxv8i32_nxv8i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1847,7 +1847,7 @@ define @intrinsic_vsra_vx_nxv16i32_nxv16i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1894,7 +1894,7 @@ define @intrinsic_vsra_vx_nxv1i64_nxv1i64( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1941,7 +1941,7 @@ define @intrinsic_vsra_vx_nxv2i64_nxv2i64( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1988,7 +1988,7 @@ define @intrinsic_vsra_vx_nxv4i64_nxv4i64( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -2035,7 +2035,7 @@ define @intrinsic_vsra_vx_nxv8i64_nxv8i64( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -2076,7 +2076,7 @@ define @intrinsic_vsra_vi_nxv1i8_nxv1i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsra_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2109,7 +2109,7 @@ define @intrinsic_vsra_vi_nxv2i8_nxv2i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsra_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2142,7 +2142,7 @@ define @intrinsic_vsra_vi_nxv4i8_nxv4i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsra_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2175,7 +2175,7 @@ define @intrinsic_vsra_vi_nxv8i8_nxv8i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsra_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2208,7 +2208,7 @@ define @intrinsic_vsra_vi_nxv16i8_nxv16i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsra_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2241,7 +2241,7 @@ define @intrinsic_vsra_vi_nxv32i8_nxv32i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsra_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2274,7 +2274,7 @@ define @intrinsic_vsra_vi_nxv64i8_nxv64i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsra_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2307,7 +2307,7 @@ define @intrinsic_vsra_vi_nxv1i16_nxv1i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsra_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2340,7 +2340,7 @@ define @intrinsic_vsra_vi_nxv2i16_nxv2i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsra_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2373,7 +2373,7 @@ define @intrinsic_vsra_vi_nxv4i16_nxv4i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsra_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2406,7 +2406,7 @@ define @intrinsic_vsra_vi_nxv8i16_nxv8i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsra_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2439,7 +2439,7 @@ define @intrinsic_vsra_vi_nxv16i16_nxv16i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsra_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2472,7 +2472,7 @@ define @intrinsic_vsra_vi_nxv32i16_nxv32i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsra_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2505,7 +2505,7 @@ define @intrinsic_vsra_vi_nxv1i32_nxv1i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsra_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2538,7 +2538,7 @@ define @intrinsic_vsra_vi_nxv2i32_nxv2i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsra_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2571,7 +2571,7 @@ define @intrinsic_vsra_vi_nxv4i32_nxv4i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsra_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2604,7 +2604,7 @@ define @intrinsic_vsra_vi_nxv8i32_nxv8i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsra_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2637,7 +2637,7 @@ define @intrinsic_vsra_vi_nxv16i32_nxv16i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsra_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2670,7 +2670,7 @@ define @intrinsic_vsra_vi_nxv1i64_nxv1i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsra_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2703,7 +2703,7 @@ define @intrinsic_vsra_vi_nxv2i64_nxv2i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsra_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2736,7 +2736,7 @@ define @intrinsic_vsra_vi_nxv4i64_nxv4i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsra_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2769,7 +2769,7 @@ define @intrinsic_vsra_vi_nxv8i64_nxv8i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsra_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vsra_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vsra_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vsra_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vsra_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vsra_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -292,7 +292,7 @@ define @intrinsic_vsra_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vsra_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vsra_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vsra_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vsra_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vsra_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vsra_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vsra_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vsra_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -717,7 +717,7 @@ define @intrinsic_vsra_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -764,7 +764,7 @@ define @intrinsic_vsra_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -811,7 +811,7 @@ define @intrinsic_vsra_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vsra_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vsra_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vsra_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vsra_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1048,7 +1048,7 @@ define @intrinsic_vsra_vx_nxv1i8_nxv1i8( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1095,7 +1095,7 @@ define @intrinsic_vsra_vx_nxv2i8_nxv2i8( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1142,7 +1142,7 @@ define @intrinsic_vsra_vx_nxv4i8_nxv4i8( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1189,7 +1189,7 @@ define @intrinsic_vsra_vx_nxv8i8_nxv8i8( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1236,7 +1236,7 @@ define @intrinsic_vsra_vx_nxv16i8_nxv16i8( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1283,7 +1283,7 @@ define @intrinsic_vsra_vx_nxv32i8_nxv32i8( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1330,7 +1330,7 @@ define @intrinsic_vsra_vx_nxv64i8_nxv64i8( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1377,7 +1377,7 @@ define @intrinsic_vsra_vx_nxv1i16_nxv1i16( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1424,7 +1424,7 @@ define @intrinsic_vsra_vx_nxv2i16_nxv2i16( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1471,7 +1471,7 @@ define @intrinsic_vsra_vx_nxv4i16_nxv4i16( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1518,7 +1518,7 @@ define @intrinsic_vsra_vx_nxv8i16_nxv8i16( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1565,7 +1565,7 @@ define @intrinsic_vsra_vx_nxv16i16_nxv16i16( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1612,7 +1612,7 @@ define @intrinsic_vsra_vx_nxv32i16_nxv32i16( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1659,7 +1659,7 @@ define @intrinsic_vsra_vx_nxv1i32_nxv1i32( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1706,7 +1706,7 @@ define @intrinsic_vsra_vx_nxv2i32_nxv2i32( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1753,7 +1753,7 @@ define @intrinsic_vsra_vx_nxv4i32_nxv4i32( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1800,7 +1800,7 @@ define @intrinsic_vsra_vx_nxv8i32_nxv8i32( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1847,7 +1847,7 @@ define @intrinsic_vsra_vx_nxv16i32_nxv16i32( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1894,7 +1894,7 @@ define @intrinsic_vsra_vx_nxv1i64_nxv1i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1941,7 +1941,7 @@ define @intrinsic_vsra_vx_nxv2i64_nxv2i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1988,7 +1988,7 @@ define @intrinsic_vsra_vx_nxv4i64_nxv4i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -2035,7 +2035,7 @@ define @intrinsic_vsra_vx_nxv8i64_nxv8i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsra_vx_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -2076,7 +2076,7 @@ define @intrinsic_vsra_vi_nxv1i8_nxv1i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsra_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2109,7 +2109,7 @@ define @intrinsic_vsra_vi_nxv2i8_nxv2i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsra_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2142,7 +2142,7 @@ define @intrinsic_vsra_vi_nxv4i8_nxv4i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsra_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2175,7 +2175,7 @@ define @intrinsic_vsra_vi_nxv8i8_nxv8i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsra_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2208,7 +2208,7 @@ define @intrinsic_vsra_vi_nxv16i8_nxv16i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsra_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2241,7 +2241,7 @@ define @intrinsic_vsra_vi_nxv32i8_nxv32i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsra_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2274,7 +2274,7 @@ define @intrinsic_vsra_vi_nxv64i8_nxv64i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsra_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2307,7 +2307,7 @@ define @intrinsic_vsra_vi_nxv1i16_nxv1i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsra_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2340,7 +2340,7 @@ define @intrinsic_vsra_vi_nxv2i16_nxv2i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsra_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2373,7 +2373,7 @@ define @intrinsic_vsra_vi_nxv4i16_nxv4i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsra_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2406,7 +2406,7 @@ define @intrinsic_vsra_vi_nxv8i16_nxv8i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsra_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2439,7 +2439,7 @@ define @intrinsic_vsra_vi_nxv16i16_nxv16i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsra_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2472,7 +2472,7 @@ define @intrinsic_vsra_vi_nxv32i16_nxv32i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsra_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2505,7 +2505,7 @@ define @intrinsic_vsra_vi_nxv1i32_nxv1i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsra_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2538,7 +2538,7 @@ define @intrinsic_vsra_vi_nxv2i32_nxv2i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsra_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2571,7 +2571,7 @@ define @intrinsic_vsra_vi_nxv4i32_nxv4i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsra_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2604,7 +2604,7 @@ define @intrinsic_vsra_vi_nxv8i32_nxv8i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsra_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2637,7 +2637,7 @@ define @intrinsic_vsra_vi_nxv16i32_nxv16i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsra_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2670,7 +2670,7 @@ define @intrinsic_vsra_vi_nxv1i64_nxv1i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsra_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2703,7 +2703,7 @@ define @intrinsic_vsra_vi_nxv2i64_nxv2i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsra_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2736,7 +2736,7 @@ define @intrinsic_vsra_vi_nxv4i64_nxv4i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsra_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2769,7 +2769,7 @@ define @intrinsic_vsra_vi_nxv8i64_nxv8i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsra_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsra-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsra-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsra-sdnode.ll @@ -5,7 +5,7 @@ define @vsra_vv_nxv1i8( %va, %vb) { ; CHECK-LABEL: vsra_vv_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = ashr %va, %vb @@ -15,7 +15,7 @@ define @vsra_vx_nxv1i8( %va, i8 signext %b) { ; CHECK-LABEL: vsra_vx_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -27,7 +27,7 @@ define @vsra_vi_nxv1i8_0( %va) { ; CHECK-LABEL: vsra_vi_nxv1i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 6 ; CHECK-NEXT: ret %head = insertelement poison, i8 6, i32 0 @@ -39,7 +39,7 @@ define @vsra_vv_nxv2i8( %va, %vb) { ; CHECK-LABEL: vsra_vv_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = ashr %va, %vb @@ -49,7 +49,7 @@ define @vsra_vx_nxv2i8( %va, i8 signext %b) { ; CHECK-LABEL: vsra_vx_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -61,7 +61,7 @@ define @vsra_vi_nxv2i8_0( %va) { ; CHECK-LABEL: vsra_vi_nxv2i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 6 ; CHECK-NEXT: ret %head = insertelement poison, i8 6, i32 0 @@ -73,7 +73,7 @@ define @vsra_vv_nxv4i8( %va, %vb) { ; CHECK-LABEL: vsra_vv_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = ashr %va, %vb @@ -83,7 +83,7 @@ define @vsra_vx_nxv4i8( %va, i8 signext %b) { ; CHECK-LABEL: vsra_vx_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -95,7 +95,7 @@ define @vsra_vi_nxv4i8_0( %va) { ; CHECK-LABEL: vsra_vi_nxv4i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 6 ; CHECK-NEXT: ret %head = insertelement poison, i8 6, i32 0 @@ -107,7 +107,7 @@ define @vsra_vv_nxv8i8( %va, %vb) { ; CHECK-LABEL: vsra_vv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = ashr %va, %vb @@ -117,7 +117,7 @@ define @vsra_vx_nxv8i8( %va, i8 signext %b) { ; CHECK-LABEL: vsra_vx_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -129,7 +129,7 @@ define @vsra_vi_nxv8i8_0( %va) { ; CHECK-LABEL: vsra_vi_nxv8i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 6 ; CHECK-NEXT: ret %head = insertelement poison, i8 6, i32 0 @@ -141,7 +141,7 @@ define @vsra_vv_nxv16i8( %va, %vb) { ; CHECK-LABEL: vsra_vv_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = ashr %va, %vb @@ -151,7 +151,7 @@ define @vsra_vx_nxv16i8( %va, i8 signext %b) { ; CHECK-LABEL: vsra_vx_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -163,7 +163,7 @@ define @vsra_vi_nxv16i8_0( %va) { ; CHECK-LABEL: vsra_vi_nxv16i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 6 ; CHECK-NEXT: ret %head = insertelement poison, i8 6, i32 0 @@ -175,7 +175,7 @@ define @vsra_vv_nxv32i8( %va, %vb) { ; CHECK-LABEL: vsra_vv_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = ashr %va, %vb @@ -185,7 +185,7 @@ define @vsra_vx_nxv32i8( %va, i8 signext %b) { ; CHECK-LABEL: vsra_vx_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -197,7 +197,7 @@ define @vsra_vi_nxv32i8_0( %va) { ; CHECK-LABEL: vsra_vi_nxv32i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 6 ; CHECK-NEXT: ret %head = insertelement poison, i8 6, i32 0 @@ -209,7 +209,7 @@ define @vsra_vv_nxv64i8( %va, %vb) { ; CHECK-LABEL: vsra_vv_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = ashr %va, %vb @@ -219,7 +219,7 @@ define @vsra_vx_nxv64i8( %va, i8 signext %b) { ; CHECK-LABEL: vsra_vx_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -231,7 +231,7 @@ define @vsra_vi_nxv64i8_0( %va) { ; CHECK-LABEL: vsra_vi_nxv64i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 6 ; CHECK-NEXT: ret %head = insertelement poison, i8 6, i32 0 @@ -243,7 +243,7 @@ define @vsra_vv_nxv1i16( %va, %vb) { ; CHECK-LABEL: vsra_vv_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = ashr %va, %vb @@ -253,7 +253,7 @@ define @vsra_vx_nxv1i16( %va, i16 signext %b) { ; CHECK-LABEL: vsra_vx_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -265,7 +265,7 @@ define @vsra_vi_nxv1i16_0( %va) { ; CHECK-LABEL: vsra_vi_nxv1i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 6 ; CHECK-NEXT: ret %head = insertelement poison, i16 6, i32 0 @@ -277,7 +277,7 @@ define @vsra_vv_nxv2i16( %va, %vb) { ; CHECK-LABEL: vsra_vv_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = ashr %va, %vb @@ -287,7 +287,7 @@ define @vsra_vx_nxv2i16( %va, i16 signext %b) { ; CHECK-LABEL: vsra_vx_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -299,7 +299,7 @@ define @vsra_vi_nxv2i16_0( %va) { ; CHECK-LABEL: vsra_vi_nxv2i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 6 ; CHECK-NEXT: ret %head = insertelement poison, i16 6, i32 0 @@ -311,7 +311,7 @@ define @vsra_vv_nxv4i16( %va, %vb) { ; CHECK-LABEL: vsra_vv_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = ashr %va, %vb @@ -321,7 +321,7 @@ define @vsra_vx_nxv4i16( %va, i16 signext %b) { ; CHECK-LABEL: vsra_vx_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -333,7 +333,7 @@ define @vsra_vi_nxv4i16_0( %va) { ; CHECK-LABEL: vsra_vi_nxv4i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 6 ; CHECK-NEXT: ret %head = insertelement poison, i16 6, i32 0 @@ -345,7 +345,7 @@ define @vsra_vv_nxv8i16( %va, %vb) { ; CHECK-LABEL: vsra_vv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = ashr %va, %vb @@ -355,7 +355,7 @@ define @vsra_vx_nxv8i16( %va, i16 signext %b) { ; CHECK-LABEL: vsra_vx_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -367,7 +367,7 @@ define @vsra_vi_nxv8i16_0( %va) { ; CHECK-LABEL: vsra_vi_nxv8i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 6 ; CHECK-NEXT: ret %head = insertelement poison, i16 6, i32 0 @@ -379,7 +379,7 @@ define @vsra_vv_nxv16i16( %va, %vb) { ; CHECK-LABEL: vsra_vv_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = ashr %va, %vb @@ -389,7 +389,7 @@ define @vsra_vx_nxv16i16( %va, i16 signext %b) { ; CHECK-LABEL: vsra_vx_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -401,7 +401,7 @@ define @vsra_vi_nxv16i16_0( %va) { ; CHECK-LABEL: vsra_vi_nxv16i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 6 ; CHECK-NEXT: ret %head = insertelement poison, i16 6, i32 0 @@ -413,7 +413,7 @@ define @vsra_vv_nxv32i16( %va, %vb) { ; CHECK-LABEL: vsra_vv_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = ashr %va, %vb @@ -423,7 +423,7 @@ define @vsra_vx_nxv32i16( %va, i16 signext %b) { ; CHECK-LABEL: vsra_vx_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -435,7 +435,7 @@ define @vsra_vi_nxv32i16_0( %va) { ; CHECK-LABEL: vsra_vi_nxv32i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 6 ; CHECK-NEXT: ret %head = insertelement poison, i16 6, i32 0 @@ -447,7 +447,7 @@ define @vsra_vv_nxv1i32( %va, %vb) { ; CHECK-LABEL: vsra_vv_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = ashr %va, %vb @@ -457,7 +457,7 @@ define @vsra_vx_nxv1i32( %va, i32 signext %b) { ; CHECK-LABEL: vsra_vx_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -469,7 +469,7 @@ define @vsra_vi_nxv1i32_0( %va) { ; CHECK-LABEL: vsra_vi_nxv1i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 31 ; CHECK-NEXT: ret %head = insertelement poison, i32 31, i32 0 @@ -481,7 +481,7 @@ define @vsra_vv_nxv2i32( %va, %vb) { ; CHECK-LABEL: vsra_vv_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = ashr %va, %vb @@ -491,7 +491,7 @@ define @vsra_vx_nxv2i32( %va, i32 signext %b) { ; CHECK-LABEL: vsra_vx_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -503,7 +503,7 @@ define @vsra_vi_nxv2i32_0( %va) { ; CHECK-LABEL: vsra_vi_nxv2i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 31 ; CHECK-NEXT: ret %head = insertelement poison, i32 31, i32 0 @@ -515,7 +515,7 @@ define @vsra_vv_nxv4i32( %va, %vb) { ; CHECK-LABEL: vsra_vv_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = ashr %va, %vb @@ -525,7 +525,7 @@ define @vsra_vx_nxv4i32( %va, i32 signext %b) { ; CHECK-LABEL: vsra_vx_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -537,7 +537,7 @@ define @vsra_vi_nxv4i32_0( %va) { ; CHECK-LABEL: vsra_vi_nxv4i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 31 ; CHECK-NEXT: ret %head = insertelement poison, i32 31, i32 0 @@ -549,7 +549,7 @@ define @vsra_vv_nxv8i32( %va, %vb) { ; CHECK-LABEL: vsra_vv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = ashr %va, %vb @@ -559,7 +559,7 @@ define @vsra_vx_nxv8i32( %va, i32 signext %b) { ; CHECK-LABEL: vsra_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -571,7 +571,7 @@ define @vsra_vi_nxv8i32_0( %va) { ; CHECK-LABEL: vsra_vi_nxv8i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 31 ; CHECK-NEXT: ret %head = insertelement poison, i32 31, i32 0 @@ -583,7 +583,7 @@ define @vsra_vv_nxv16i32( %va, %vb) { ; CHECK-LABEL: vsra_vv_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = ashr %va, %vb @@ -593,7 +593,7 @@ define @vsra_vx_nxv16i32( %va, i32 signext %b) { ; CHECK-LABEL: vsra_vx_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -605,7 +605,7 @@ define @vsra_vi_nxv16i32_0( %va) { ; CHECK-LABEL: vsra_vi_nxv16i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 31 ; CHECK-NEXT: ret %head = insertelement poison, i32 31, i32 0 @@ -617,7 +617,7 @@ define @vsra_vv_nxv1i64( %va, %vb) { ; CHECK-LABEL: vsra_vv_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = ashr %va, %vb @@ -627,7 +627,7 @@ define @vsra_vx_nxv1i64( %va, i64 %b) { ; CHECK-LABEL: vsra_vx_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -639,7 +639,7 @@ define @vsra_vi_nxv1i64_0( %va) { ; CHECK-LABEL: vsra_vi_nxv1i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 31 ; CHECK-NEXT: ret %head = insertelement poison, i64 31, i32 0 @@ -652,7 +652,7 @@ ; CHECK-LABEL: vsra_vi_nxv1i64_1: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 32, i32 0 @@ -664,7 +664,7 @@ define @vsra_vv_nxv2i64( %va, %vb) { ; CHECK-LABEL: vsra_vv_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = ashr %va, %vb @@ -674,7 +674,7 @@ define @vsra_vx_nxv2i64( %va, i64 %b) { ; CHECK-LABEL: vsra_vx_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -686,7 +686,7 @@ define @vsra_vi_nxv2i64_0( %va) { ; CHECK-LABEL: vsra_vi_nxv2i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 31 ; CHECK-NEXT: ret %head = insertelement poison, i64 31, i32 0 @@ -699,7 +699,7 @@ ; CHECK-LABEL: vsra_vi_nxv2i64_1: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 32, i32 0 @@ -711,7 +711,7 @@ define @vsra_vv_nxv4i64( %va, %vb) { ; CHECK-LABEL: vsra_vv_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = ashr %va, %vb @@ -721,7 +721,7 @@ define @vsra_vx_nxv4i64( %va, i64 %b) { ; CHECK-LABEL: vsra_vx_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -733,7 +733,7 @@ define @vsra_vi_nxv4i64_0( %va) { ; CHECK-LABEL: vsra_vi_nxv4i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 31 ; CHECK-NEXT: ret %head = insertelement poison, i64 31, i32 0 @@ -746,7 +746,7 @@ ; CHECK-LABEL: vsra_vi_nxv4i64_1: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 32, i32 0 @@ -758,7 +758,7 @@ define @vsra_vv_nxv8i64( %va, %vb) { ; CHECK-LABEL: vsra_vv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = ashr %va, %vb @@ -768,7 +768,7 @@ define @vsra_vx_nxv8i64( %va, i64 %b) { ; CHECK-LABEL: vsra_vx_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -780,7 +780,7 @@ define @vsra_vi_nxv8i64_0( %va) { ; CHECK-LABEL: vsra_vi_nxv8i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 31 ; CHECK-NEXT: ret %head = insertelement poison, i64 31, i32 0 @@ -793,7 +793,7 @@ ; CHECK-LABEL: vsra_vi_nxv8i64_1: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 32, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsra-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsra-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsra-vp.ll @@ -9,7 +9,7 @@ define @vsra_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_nxv8i7: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vsra.vi v8, v8, 1 ; CHECK-NEXT: vmv.v.x v9, a0 @@ -39,7 +39,7 @@ define @vsra_vv_nxv1i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv1i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -63,7 +63,7 @@ define @vsra_vx_nxv1i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_nxv1i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -89,7 +89,7 @@ define @vsra_vi_nxv1i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_nxv1i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 5, i32 0 @@ -115,7 +115,7 @@ define @vsra_vv_nxv2i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -139,7 +139,7 @@ define @vsra_vx_nxv2i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_nxv2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -165,7 +165,7 @@ define @vsra_vi_nxv2i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_nxv2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 5, i32 0 @@ -191,7 +191,7 @@ define @vsra_vv_nxv4i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -215,7 +215,7 @@ define @vsra_vx_nxv4i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_nxv4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -241,7 +241,7 @@ define @vsra_vi_nxv4i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_nxv4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 5, i32 0 @@ -267,7 +267,7 @@ define @vsra_vv_nxv8i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -291,7 +291,7 @@ define @vsra_vx_nxv8i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_nxv8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -317,7 +317,7 @@ define @vsra_vi_nxv8i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_nxv8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 5, i32 0 @@ -343,7 +343,7 @@ define @vsra_vv_nxv16i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -367,7 +367,7 @@ define @vsra_vx_nxv16i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_nxv16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -393,7 +393,7 @@ define @vsra_vi_nxv16i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_nxv16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 5, i32 0 @@ -419,7 +419,7 @@ define @vsra_vv_nxv32i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv32i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -443,7 +443,7 @@ define @vsra_vx_nxv32i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_nxv32i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -469,7 +469,7 @@ define @vsra_vi_nxv32i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_nxv32i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 5, i32 0 @@ -495,7 +495,7 @@ define @vsra_vv_nxv64i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv64i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -519,7 +519,7 @@ define @vsra_vx_nxv64i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_nxv64i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -545,7 +545,7 @@ define @vsra_vi_nxv64i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_nxv64i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 5, i32 0 @@ -571,7 +571,7 @@ define @vsra_vv_nxv1i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv1i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -595,7 +595,7 @@ define @vsra_vx_nxv1i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_nxv1i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -621,7 +621,7 @@ define @vsra_vi_nxv1i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_nxv1i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 5, i32 0 @@ -647,7 +647,7 @@ define @vsra_vv_nxv2i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -671,7 +671,7 @@ define @vsra_vx_nxv2i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_nxv2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -697,7 +697,7 @@ define @vsra_vi_nxv2i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_nxv2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 5, i32 0 @@ -723,7 +723,7 @@ define @vsra_vv_nxv4i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -747,7 +747,7 @@ define @vsra_vx_nxv4i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_nxv4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -773,7 +773,7 @@ define @vsra_vi_nxv4i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_nxv4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 5, i32 0 @@ -799,7 +799,7 @@ define @vsra_vv_nxv8i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -823,7 +823,7 @@ define @vsra_vx_nxv8i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_nxv8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -849,7 +849,7 @@ define @vsra_vi_nxv8i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_nxv8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 5, i32 0 @@ -875,7 +875,7 @@ define @vsra_vv_nxv16i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -899,7 +899,7 @@ define @vsra_vx_nxv16i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_nxv16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -925,7 +925,7 @@ define @vsra_vi_nxv16i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_nxv16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 5, i32 0 @@ -951,7 +951,7 @@ define @vsra_vv_nxv32i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv32i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -975,7 +975,7 @@ define @vsra_vx_nxv32i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_nxv32i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -1001,7 +1001,7 @@ define @vsra_vi_nxv32i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_nxv32i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 5, i32 0 @@ -1027,7 +1027,7 @@ define @vsra_vv_nxv1i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv1i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1051,7 +1051,7 @@ define @vsra_vx_nxv1i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_nxv1i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1077,7 +1077,7 @@ define @vsra_vi_nxv1i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_nxv1i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 5, i32 0 @@ -1103,7 +1103,7 @@ define @vsra_vv_nxv2i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1127,7 +1127,7 @@ define @vsra_vx_nxv2i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1153,7 +1153,7 @@ define @vsra_vi_nxv2i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 5, i32 0 @@ -1179,7 +1179,7 @@ define @vsra_vv_nxv4i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1203,7 +1203,7 @@ define @vsra_vx_nxv4i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_nxv4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1229,7 +1229,7 @@ define @vsra_vi_nxv4i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_nxv4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 5, i32 0 @@ -1255,7 +1255,7 @@ define @vsra_vv_nxv8i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1279,7 +1279,7 @@ define @vsra_vx_nxv8i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_nxv8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1305,7 +1305,7 @@ define @vsra_vi_nxv8i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_nxv8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 5, i32 0 @@ -1331,7 +1331,7 @@ define @vsra_vv_nxv16i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1355,7 +1355,7 @@ define @vsra_vx_nxv16i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vx_nxv16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vsra.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1381,7 +1381,7 @@ define @vsra_vi_nxv16i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_nxv16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 5, i32 0 @@ -1407,7 +1407,7 @@ define @vsra_vv_nxv1i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv1i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1437,13 +1437,13 @@ define @vsra_vx_nxv1i64_unmasked( %va, i64 %b, i32 zeroext %evl) { ; RV32-LABEL: vsra_vx_nxv1i64_unmasked: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vsra.vx v8, v8, a0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsra_vx_nxv1i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vsra.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1469,7 +1469,7 @@ define @vsra_vi_nxv1i64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_nxv1i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 5, i32 0 @@ -1495,7 +1495,7 @@ define @vsra_vv_nxv2i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1525,13 +1525,13 @@ define @vsra_vx_nxv2i64_unmasked( %va, i64 %b, i32 zeroext %evl) { ; RV32-LABEL: vsra_vx_nxv2i64_unmasked: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vsra.vx v8, v8, a0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsra_vx_nxv2i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vsra.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1557,7 +1557,7 @@ define @vsra_vi_nxv2i64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_nxv2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 5, i32 0 @@ -1583,7 +1583,7 @@ define @vsra_vv_nxv4i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv4i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1613,13 +1613,13 @@ define @vsra_vx_nxv4i64_unmasked( %va, i64 %b, i32 zeroext %evl) { ; RV32-LABEL: vsra_vx_nxv4i64_unmasked: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vsra.vx v8, v8, a0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsra_vx_nxv4i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vsra.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1645,7 +1645,7 @@ define @vsra_vi_nxv4i64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_nxv4i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 5, i32 0 @@ -1683,7 +1683,7 @@ define @vsra_vv_nxv8i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv8i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsra.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1713,13 +1713,13 @@ define @vsra_vx_nxv8i64_unmasked( %va, i64 %b, i32 zeroext %evl) { ; RV32-LABEL: vsra_vx_nxv8i64_unmasked: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vsra.vx v8, v8, a0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsra_vx_nxv8i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vsra.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1745,7 +1745,7 @@ define @vsra_vi_nxv8i64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vi_nxv8i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsra.vi v8, v8, 5 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 5, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vsrl_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vsrl_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vsrl_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vsrl_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vsrl_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -292,7 +292,7 @@ define @intrinsic_vsrl_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vsrl_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vsrl_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vsrl_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vsrl_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vsrl_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vsrl_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vsrl_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vsrl_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -717,7 +717,7 @@ define @intrinsic_vsrl_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -764,7 +764,7 @@ define @intrinsic_vsrl_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -811,7 +811,7 @@ define @intrinsic_vsrl_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vsrl_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vsrl_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vsrl_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vsrl_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1048,7 +1048,7 @@ define @intrinsic_vsrl_vx_nxv1i8_nxv1i8( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1095,7 +1095,7 @@ define @intrinsic_vsrl_vx_nxv2i8_nxv2i8( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1142,7 +1142,7 @@ define @intrinsic_vsrl_vx_nxv4i8_nxv4i8( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1189,7 +1189,7 @@ define @intrinsic_vsrl_vx_nxv8i8_nxv8i8( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1236,7 +1236,7 @@ define @intrinsic_vsrl_vx_nxv16i8_nxv16i8( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1283,7 +1283,7 @@ define @intrinsic_vsrl_vx_nxv32i8_nxv32i8( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1330,7 +1330,7 @@ define @intrinsic_vsrl_vx_nxv64i8_nxv64i8( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1377,7 +1377,7 @@ define @intrinsic_vsrl_vx_nxv1i16_nxv1i16( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1424,7 +1424,7 @@ define @intrinsic_vsrl_vx_nxv2i16_nxv2i16( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1471,7 +1471,7 @@ define @intrinsic_vsrl_vx_nxv4i16_nxv4i16( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1518,7 +1518,7 @@ define @intrinsic_vsrl_vx_nxv8i16_nxv8i16( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1565,7 +1565,7 @@ define @intrinsic_vsrl_vx_nxv16i16_nxv16i16( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1612,7 +1612,7 @@ define @intrinsic_vsrl_vx_nxv32i16_nxv32i16( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1659,7 +1659,7 @@ define @intrinsic_vsrl_vx_nxv1i32_nxv1i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1706,7 +1706,7 @@ define @intrinsic_vsrl_vx_nxv2i32_nxv2i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1753,7 +1753,7 @@ define @intrinsic_vsrl_vx_nxv4i32_nxv4i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1800,7 +1800,7 @@ define @intrinsic_vsrl_vx_nxv8i32_nxv8i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1847,7 +1847,7 @@ define @intrinsic_vsrl_vx_nxv16i32_nxv16i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1894,7 +1894,7 @@ define @intrinsic_vsrl_vx_nxv1i64_nxv1i64( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1941,7 +1941,7 @@ define @intrinsic_vsrl_vx_nxv2i64_nxv2i64( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1988,7 +1988,7 @@ define @intrinsic_vsrl_vx_nxv4i64_nxv4i64( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -2035,7 +2035,7 @@ define @intrinsic_vsrl_vx_nxv8i64_nxv8i64( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -2076,7 +2076,7 @@ define @intrinsic_vsrl_vi_nxv1i8_nxv1i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2109,7 +2109,7 @@ define @intrinsic_vsrl_vi_nxv2i8_nxv2i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2142,7 +2142,7 @@ define @intrinsic_vsrl_vi_nxv4i8_nxv4i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2175,7 +2175,7 @@ define @intrinsic_vsrl_vi_nxv8i8_nxv8i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2208,7 +2208,7 @@ define @intrinsic_vsrl_vi_nxv16i8_nxv16i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2241,7 +2241,7 @@ define @intrinsic_vsrl_vi_nxv32i8_nxv32i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2274,7 +2274,7 @@ define @intrinsic_vsrl_vi_nxv64i8_nxv64i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2307,7 +2307,7 @@ define @intrinsic_vsrl_vi_nxv1i16_nxv1i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2340,7 +2340,7 @@ define @intrinsic_vsrl_vi_nxv2i16_nxv2i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2373,7 +2373,7 @@ define @intrinsic_vsrl_vi_nxv4i16_nxv4i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2406,7 +2406,7 @@ define @intrinsic_vsrl_vi_nxv8i16_nxv8i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2439,7 +2439,7 @@ define @intrinsic_vsrl_vi_nxv16i16_nxv16i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2472,7 +2472,7 @@ define @intrinsic_vsrl_vi_nxv32i16_nxv32i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2505,7 +2505,7 @@ define @intrinsic_vsrl_vi_nxv1i32_nxv1i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2538,7 +2538,7 @@ define @intrinsic_vsrl_vi_nxv2i32_nxv2i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2571,7 +2571,7 @@ define @intrinsic_vsrl_vi_nxv4i32_nxv4i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2604,7 +2604,7 @@ define @intrinsic_vsrl_vi_nxv8i32_nxv8i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2637,7 +2637,7 @@ define @intrinsic_vsrl_vi_nxv16i32_nxv16i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2670,7 +2670,7 @@ define @intrinsic_vsrl_vi_nxv1i64_nxv1i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2703,7 +2703,7 @@ define @intrinsic_vsrl_vi_nxv2i64_nxv2i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2736,7 +2736,7 @@ define @intrinsic_vsrl_vi_nxv4i64_nxv4i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2769,7 +2769,7 @@ define @intrinsic_vsrl_vi_nxv8i64_nxv8i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vsrl_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vsrl_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vsrl_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vsrl_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vsrl_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -292,7 +292,7 @@ define @intrinsic_vsrl_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vsrl_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vsrl_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vsrl_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vsrl_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vsrl_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vsrl_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vsrl_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vsrl_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -717,7 +717,7 @@ define @intrinsic_vsrl_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -764,7 +764,7 @@ define @intrinsic_vsrl_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -811,7 +811,7 @@ define @intrinsic_vsrl_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vsrl_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vsrl_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vsrl_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vsrl_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1048,7 +1048,7 @@ define @intrinsic_vsrl_vx_nxv1i8_nxv1i8( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1095,7 +1095,7 @@ define @intrinsic_vsrl_vx_nxv2i8_nxv2i8( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1142,7 +1142,7 @@ define @intrinsic_vsrl_vx_nxv4i8_nxv4i8( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1189,7 +1189,7 @@ define @intrinsic_vsrl_vx_nxv8i8_nxv8i8( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1236,7 +1236,7 @@ define @intrinsic_vsrl_vx_nxv16i8_nxv16i8( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1283,7 +1283,7 @@ define @intrinsic_vsrl_vx_nxv32i8_nxv32i8( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1330,7 +1330,7 @@ define @intrinsic_vsrl_vx_nxv64i8_nxv64i8( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1377,7 +1377,7 @@ define @intrinsic_vsrl_vx_nxv1i16_nxv1i16( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1424,7 +1424,7 @@ define @intrinsic_vsrl_vx_nxv2i16_nxv2i16( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1471,7 +1471,7 @@ define @intrinsic_vsrl_vx_nxv4i16_nxv4i16( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1518,7 +1518,7 @@ define @intrinsic_vsrl_vx_nxv8i16_nxv8i16( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1565,7 +1565,7 @@ define @intrinsic_vsrl_vx_nxv16i16_nxv16i16( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1612,7 +1612,7 @@ define @intrinsic_vsrl_vx_nxv32i16_nxv32i16( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1659,7 +1659,7 @@ define @intrinsic_vsrl_vx_nxv1i32_nxv1i32( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1706,7 +1706,7 @@ define @intrinsic_vsrl_vx_nxv2i32_nxv2i32( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1753,7 +1753,7 @@ define @intrinsic_vsrl_vx_nxv4i32_nxv4i32( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1800,7 +1800,7 @@ define @intrinsic_vsrl_vx_nxv8i32_nxv8i32( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1847,7 +1847,7 @@ define @intrinsic_vsrl_vx_nxv16i32_nxv16i32( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1894,7 +1894,7 @@ define @intrinsic_vsrl_vx_nxv1i64_nxv1i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1941,7 +1941,7 @@ define @intrinsic_vsrl_vx_nxv2i64_nxv2i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1988,7 +1988,7 @@ define @intrinsic_vsrl_vx_nxv4i64_nxv4i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -2035,7 +2035,7 @@ define @intrinsic_vsrl_vx_nxv8i64_nxv8i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -2076,7 +2076,7 @@ define @intrinsic_vsrl_vi_nxv1i8_nxv1i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2109,7 +2109,7 @@ define @intrinsic_vsrl_vi_nxv2i8_nxv2i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2142,7 +2142,7 @@ define @intrinsic_vsrl_vi_nxv4i8_nxv4i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2175,7 +2175,7 @@ define @intrinsic_vsrl_vi_nxv8i8_nxv8i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2208,7 +2208,7 @@ define @intrinsic_vsrl_vi_nxv16i8_nxv16i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2241,7 +2241,7 @@ define @intrinsic_vsrl_vi_nxv32i8_nxv32i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2274,7 +2274,7 @@ define @intrinsic_vsrl_vi_nxv64i8_nxv64i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2307,7 +2307,7 @@ define @intrinsic_vsrl_vi_nxv1i16_nxv1i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2340,7 +2340,7 @@ define @intrinsic_vsrl_vi_nxv2i16_nxv2i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2373,7 +2373,7 @@ define @intrinsic_vsrl_vi_nxv4i16_nxv4i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2406,7 +2406,7 @@ define @intrinsic_vsrl_vi_nxv8i16_nxv8i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2439,7 +2439,7 @@ define @intrinsic_vsrl_vi_nxv16i16_nxv16i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2472,7 +2472,7 @@ define @intrinsic_vsrl_vi_nxv32i16_nxv32i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2505,7 +2505,7 @@ define @intrinsic_vsrl_vi_nxv1i32_nxv1i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2538,7 +2538,7 @@ define @intrinsic_vsrl_vi_nxv2i32_nxv2i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2571,7 +2571,7 @@ define @intrinsic_vsrl_vi_nxv4i32_nxv4i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2604,7 +2604,7 @@ define @intrinsic_vsrl_vi_nxv8i32_nxv8i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2637,7 +2637,7 @@ define @intrinsic_vsrl_vi_nxv16i32_nxv16i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2670,7 +2670,7 @@ define @intrinsic_vsrl_vi_nxv1i64_nxv1i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2703,7 +2703,7 @@ define @intrinsic_vsrl_vi_nxv2i64_nxv2i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2736,7 +2736,7 @@ define @intrinsic_vsrl_vi_nxv4i64_nxv4i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2769,7 +2769,7 @@ define @intrinsic_vsrl_vi_nxv8i64_nxv8i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vsrl_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsrl-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsrl-sdnode.ll @@ -5,7 +5,7 @@ define @vsrl_vx_nxv1i8( %va, i8 signext %b) { ; CHECK-LABEL: vsrl_vx_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -17,7 +17,7 @@ define @vsrl_vx_nxv1i8_0( %va) { ; CHECK-LABEL: vsrl_vx_nxv1i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 6 ; CHECK-NEXT: ret %head = insertelement poison, i8 6, i32 0 @@ -29,7 +29,7 @@ define @vsrl_vx_nxv2i8( %va, i8 signext %b) { ; CHECK-LABEL: vsrl_vx_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -41,7 +41,7 @@ define @vsrl_vx_nxv2i8_0( %va) { ; CHECK-LABEL: vsrl_vx_nxv2i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 6 ; CHECK-NEXT: ret %head = insertelement poison, i8 6, i32 0 @@ -53,7 +53,7 @@ define @vsrl_vx_nxv4i8( %va, i8 signext %b) { ; CHECK-LABEL: vsrl_vx_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -65,7 +65,7 @@ define @vsrl_vx_nxv4i8_0( %va) { ; CHECK-LABEL: vsrl_vx_nxv4i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 6 ; CHECK-NEXT: ret %head = insertelement poison, i8 6, i32 0 @@ -77,7 +77,7 @@ define @vsrl_vx_nxv8i8( %va, i8 signext %b) { ; CHECK-LABEL: vsrl_vx_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -89,7 +89,7 @@ define @vsrl_vx_nxv8i8_0( %va) { ; CHECK-LABEL: vsrl_vx_nxv8i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 6 ; CHECK-NEXT: ret %head = insertelement poison, i8 6, i32 0 @@ -101,7 +101,7 @@ define @vsrl_vx_nxv16i8( %va, i8 signext %b) { ; CHECK-LABEL: vsrl_vx_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -113,7 +113,7 @@ define @vsrl_vx_nxv16i8_0( %va) { ; CHECK-LABEL: vsrl_vx_nxv16i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 6 ; CHECK-NEXT: ret %head = insertelement poison, i8 6, i32 0 @@ -125,7 +125,7 @@ define @vsrl_vx_nxv32i8( %va, i8 signext %b) { ; CHECK-LABEL: vsrl_vx_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -137,7 +137,7 @@ define @vsrl_vx_nxv32i8_0( %va) { ; CHECK-LABEL: vsrl_vx_nxv32i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 6 ; CHECK-NEXT: ret %head = insertelement poison, i8 6, i32 0 @@ -149,7 +149,7 @@ define @vsrl_vx_nxv64i8( %va, i8 signext %b) { ; CHECK-LABEL: vsrl_vx_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -161,7 +161,7 @@ define @vsrl_vx_nxv64i8_0( %va) { ; CHECK-LABEL: vsrl_vx_nxv64i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 6 ; CHECK-NEXT: ret %head = insertelement poison, i8 6, i32 0 @@ -173,7 +173,7 @@ define @vsrl_vx_nxv1i16( %va, i16 signext %b) { ; CHECK-LABEL: vsrl_vx_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -185,7 +185,7 @@ define @vsrl_vx_nxv1i16_0( %va) { ; CHECK-LABEL: vsrl_vx_nxv1i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 6 ; CHECK-NEXT: ret %head = insertelement poison, i16 6, i32 0 @@ -197,7 +197,7 @@ define @vsrl_vx_nxv2i16( %va, i16 signext %b) { ; CHECK-LABEL: vsrl_vx_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -209,7 +209,7 @@ define @vsrl_vx_nxv2i16_0( %va) { ; CHECK-LABEL: vsrl_vx_nxv2i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 6 ; CHECK-NEXT: ret %head = insertelement poison, i16 6, i32 0 @@ -221,7 +221,7 @@ define @vsrl_vx_nxv4i16( %va, i16 signext %b) { ; CHECK-LABEL: vsrl_vx_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -233,7 +233,7 @@ define @vsrl_vx_nxv4i16_0( %va) { ; CHECK-LABEL: vsrl_vx_nxv4i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 6 ; CHECK-NEXT: ret %head = insertelement poison, i16 6, i32 0 @@ -245,7 +245,7 @@ define @vsrl_vx_nxv8i16( %va, i16 signext %b) { ; CHECK-LABEL: vsrl_vx_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -257,7 +257,7 @@ define @vsrl_vx_nxv8i16_0( %va) { ; CHECK-LABEL: vsrl_vx_nxv8i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 6 ; CHECK-NEXT: ret %head = insertelement poison, i16 6, i32 0 @@ -269,7 +269,7 @@ define @vsrl_vx_nxv16i16( %va, i16 signext %b) { ; CHECK-LABEL: vsrl_vx_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -281,7 +281,7 @@ define @vsrl_vx_nxv16i16_0( %va) { ; CHECK-LABEL: vsrl_vx_nxv16i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 6 ; CHECK-NEXT: ret %head = insertelement poison, i16 6, i32 0 @@ -293,7 +293,7 @@ define @vsrl_vx_nxv32i16( %va, i16 signext %b) { ; CHECK-LABEL: vsrl_vx_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -305,7 +305,7 @@ define @vsrl_vx_nxv32i16_0( %va) { ; CHECK-LABEL: vsrl_vx_nxv32i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 6 ; CHECK-NEXT: ret %head = insertelement poison, i16 6, i32 0 @@ -317,7 +317,7 @@ define @vsrl_vx_nxv1i32( %va, i32 signext %b) { ; CHECK-LABEL: vsrl_vx_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -329,7 +329,7 @@ define @vsrl_vx_nxv1i32_0( %va) { ; CHECK-LABEL: vsrl_vx_nxv1i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 31 ; CHECK-NEXT: ret %head = insertelement poison, i32 31, i32 0 @@ -341,7 +341,7 @@ define @vsrl_vx_nxv2i32( %va, i32 signext %b) { ; CHECK-LABEL: vsrl_vx_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -353,7 +353,7 @@ define @vsrl_vx_nxv2i32_0( %va) { ; CHECK-LABEL: vsrl_vx_nxv2i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 31 ; CHECK-NEXT: ret %head = insertelement poison, i32 31, i32 0 @@ -365,7 +365,7 @@ define @vsrl_vx_nxv4i32( %va, i32 signext %b) { ; CHECK-LABEL: vsrl_vx_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -377,7 +377,7 @@ define @vsrl_vx_nxv4i32_0( %va) { ; CHECK-LABEL: vsrl_vx_nxv4i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 31 ; CHECK-NEXT: ret %head = insertelement poison, i32 31, i32 0 @@ -389,7 +389,7 @@ define @vsrl_vx_nxv8i32( %va, i32 signext %b) { ; CHECK-LABEL: vsrl_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -401,7 +401,7 @@ define @vsrl_vx_nxv8i32_0( %va) { ; CHECK-LABEL: vsrl_vx_nxv8i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 31 ; CHECK-NEXT: ret %head = insertelement poison, i32 31, i32 0 @@ -413,7 +413,7 @@ define @vsrl_vx_nxv16i32( %va, i32 signext %b) { ; CHECK-LABEL: vsrl_vx_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -425,7 +425,7 @@ define @vsrl_vx_nxv16i32_0( %va) { ; CHECK-LABEL: vsrl_vx_nxv16i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 31 ; CHECK-NEXT: ret %head = insertelement poison, i32 31, i32 0 @@ -437,7 +437,7 @@ define @vsrl_vx_nxv1i64( %va, i64 %b) { ; CHECK-LABEL: vsrl_vx_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -449,7 +449,7 @@ define @vsrl_vx_nxv1i64_0( %va) { ; CHECK-LABEL: vsrl_vx_nxv1i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 31 ; CHECK-NEXT: ret %head = insertelement poison, i64 31, i32 0 @@ -462,7 +462,7 @@ ; CHECK-LABEL: vsrl_vx_nxv1i64_1: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 32, i32 0 @@ -474,7 +474,7 @@ define @vsrl_vx_nxv2i64( %va, i64 %b) { ; CHECK-LABEL: vsrl_vx_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -486,7 +486,7 @@ define @vsrl_vx_nxv2i64_0( %va) { ; CHECK-LABEL: vsrl_vx_nxv2i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 31 ; CHECK-NEXT: ret %head = insertelement poison, i64 31, i32 0 @@ -499,7 +499,7 @@ ; CHECK-LABEL: vsrl_vx_nxv2i64_1: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 32, i32 0 @@ -511,7 +511,7 @@ define @vsrl_vx_nxv4i64( %va, i64 %b) { ; CHECK-LABEL: vsrl_vx_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -523,7 +523,7 @@ define @vsrl_vx_nxv4i64_0( %va) { ; CHECK-LABEL: vsrl_vx_nxv4i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 31 ; CHECK-NEXT: ret %head = insertelement poison, i64 31, i32 0 @@ -536,7 +536,7 @@ ; CHECK-LABEL: vsrl_vx_nxv4i64_1: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 32, i32 0 @@ -548,7 +548,7 @@ define @vsrl_vx_nxv8i64( %va, i64 %b) { ; CHECK-LABEL: vsrl_vx_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -560,7 +560,7 @@ define @vsrl_vx_nxv8i64_0( %va) { ; CHECK-LABEL: vsrl_vx_nxv8i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 31 ; CHECK-NEXT: ret %head = insertelement poison, i64 31, i32 0 @@ -573,7 +573,7 @@ ; CHECK-LABEL: vsrl_vx_nxv8i64_1: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 32 -; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 32, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsrl-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsrl-vp.ll @@ -10,7 +10,7 @@ ; CHECK-LABEL: vsrl_vx_nxv8i7: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 127 -; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, ma ; CHECK-NEXT: vand.vx v8, v8, a2 ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vand.vx v9, v9, a2 @@ -38,7 +38,7 @@ define @vsrl_vv_nxv1i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv1i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -62,7 +62,7 @@ define @vsrl_vx_nxv1i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_nxv1i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -88,7 +88,7 @@ define @vsrl_vi_nxv1i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_nxv1i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 4, i32 0 @@ -114,7 +114,7 @@ define @vsrl_vv_nxv2i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -138,7 +138,7 @@ define @vsrl_vx_nxv2i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_nxv2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -164,7 +164,7 @@ define @vsrl_vi_nxv2i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_nxv2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 4, i32 0 @@ -190,7 +190,7 @@ define @vsrl_vv_nxv4i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -214,7 +214,7 @@ define @vsrl_vx_nxv4i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_nxv4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -240,7 +240,7 @@ define @vsrl_vi_nxv4i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_nxv4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 4, i32 0 @@ -266,7 +266,7 @@ define @vsrl_vv_nxv8i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -290,7 +290,7 @@ define @vsrl_vx_nxv8i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_nxv8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -316,7 +316,7 @@ define @vsrl_vi_nxv8i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_nxv8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 4, i32 0 @@ -342,7 +342,7 @@ define @vsrl_vv_nxv16i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -366,7 +366,7 @@ define @vsrl_vx_nxv16i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_nxv16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -392,7 +392,7 @@ define @vsrl_vi_nxv16i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_nxv16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 4, i32 0 @@ -418,7 +418,7 @@ define @vsrl_vv_nxv32i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv32i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -442,7 +442,7 @@ define @vsrl_vx_nxv32i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_nxv32i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -468,7 +468,7 @@ define @vsrl_vi_nxv32i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_nxv32i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 4, i32 0 @@ -494,7 +494,7 @@ define @vsrl_vv_nxv64i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv64i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -518,7 +518,7 @@ define @vsrl_vx_nxv64i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_nxv64i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -544,7 +544,7 @@ define @vsrl_vi_nxv64i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_nxv64i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 4, i32 0 @@ -570,7 +570,7 @@ define @vsrl_vv_nxv1i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv1i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -594,7 +594,7 @@ define @vsrl_vx_nxv1i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_nxv1i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -620,7 +620,7 @@ define @vsrl_vi_nxv1i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_nxv1i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 4, i32 0 @@ -646,7 +646,7 @@ define @vsrl_vv_nxv2i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -670,7 +670,7 @@ define @vsrl_vx_nxv2i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_nxv2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -696,7 +696,7 @@ define @vsrl_vi_nxv2i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_nxv2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 4, i32 0 @@ -722,7 +722,7 @@ define @vsrl_vv_nxv4i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -746,7 +746,7 @@ define @vsrl_vx_nxv4i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_nxv4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -772,7 +772,7 @@ define @vsrl_vi_nxv4i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_nxv4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 4, i32 0 @@ -798,7 +798,7 @@ define @vsrl_vv_nxv8i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -822,7 +822,7 @@ define @vsrl_vx_nxv8i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_nxv8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -848,7 +848,7 @@ define @vsrl_vi_nxv8i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_nxv8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 4, i32 0 @@ -874,7 +874,7 @@ define @vsrl_vv_nxv16i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -898,7 +898,7 @@ define @vsrl_vx_nxv16i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_nxv16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -924,7 +924,7 @@ define @vsrl_vi_nxv16i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_nxv16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 4, i32 0 @@ -950,7 +950,7 @@ define @vsrl_vv_nxv32i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv32i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -974,7 +974,7 @@ define @vsrl_vx_nxv32i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_nxv32i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -1000,7 +1000,7 @@ define @vsrl_vi_nxv32i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_nxv32i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 4, i32 0 @@ -1026,7 +1026,7 @@ define @vsrl_vv_nxv1i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv1i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1050,7 +1050,7 @@ define @vsrl_vx_nxv1i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_nxv1i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1076,7 +1076,7 @@ define @vsrl_vi_nxv1i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_nxv1i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 4, i32 0 @@ -1102,7 +1102,7 @@ define @vsrl_vv_nxv2i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1126,7 +1126,7 @@ define @vsrl_vx_nxv2i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1152,7 +1152,7 @@ define @vsrl_vi_nxv2i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 4, i32 0 @@ -1178,7 +1178,7 @@ define @vsrl_vv_nxv4i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1202,7 +1202,7 @@ define @vsrl_vx_nxv4i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_nxv4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1228,7 +1228,7 @@ define @vsrl_vi_nxv4i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_nxv4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 4, i32 0 @@ -1254,7 +1254,7 @@ define @vsrl_vv_nxv8i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1278,7 +1278,7 @@ define @vsrl_vx_nxv8i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_nxv8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1304,7 +1304,7 @@ define @vsrl_vi_nxv8i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_nxv8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 4, i32 0 @@ -1330,7 +1330,7 @@ define @vsrl_vv_nxv16i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1354,7 +1354,7 @@ define @vsrl_vx_nxv16i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vx_nxv16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vsrl.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1380,7 +1380,7 @@ define @vsrl_vi_nxv16i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_nxv16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 4, i32 0 @@ -1406,7 +1406,7 @@ define @vsrl_vv_nxv1i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv1i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1436,13 +1436,13 @@ define @vsrl_vx_nxv1i64_unmasked( %va, i64 %b, i32 zeroext %evl) { ; RV32-LABEL: vsrl_vx_nxv1i64_unmasked: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsrl_vx_nxv1i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vsrl.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1468,7 +1468,7 @@ define @vsrl_vi_nxv1i64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_nxv1i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 4, i32 0 @@ -1494,7 +1494,7 @@ define @vsrl_vv_nxv2i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1524,13 +1524,13 @@ define @vsrl_vx_nxv2i64_unmasked( %va, i64 %b, i32 zeroext %evl) { ; RV32-LABEL: vsrl_vx_nxv2i64_unmasked: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsrl_vx_nxv2i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vsrl.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1556,7 +1556,7 @@ define @vsrl_vi_nxv2i64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_nxv2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 4, i32 0 @@ -1582,7 +1582,7 @@ define @vsrl_vv_nxv4i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv4i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1612,13 +1612,13 @@ define @vsrl_vx_nxv4i64_unmasked( %va, i64 %b, i32 zeroext %evl) { ; RV32-LABEL: vsrl_vx_nxv4i64_unmasked: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsrl_vx_nxv4i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vsrl.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1644,7 +1644,7 @@ define @vsrl_vi_nxv4i64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_nxv4i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 4, i32 0 @@ -1682,7 +1682,7 @@ define @vsrl_vv_nxv8i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vv_nxv8i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsrl.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1712,13 +1712,13 @@ define @vsrl_vx_nxv8i64_unmasked( %va, i64 %b, i32 zeroext %evl) { ; RV32-LABEL: vsrl_vx_nxv8i64_unmasked: ; RV32: # %bb.0: -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: ret ; ; RV64-LABEL: vsrl_vx_nxv8i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vsrl.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1744,7 +1744,7 @@ define @vsrl_vi_nxv8i64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vsrl_vi_nxv8i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsrl.vi v8, v8, 4 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 4, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsse.ll b/llvm/test/CodeGen/RISCV/rvv/vsse.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsse.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsse.ll @@ -12,7 +12,7 @@ define void @intrinsic_vsse_v_nxv1i64_nxv1i64( %0, * %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vsse64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -35,7 +35,7 @@ define void @intrinsic_vsse_mask_v_nxv1i64_nxv1i64( %0, * %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -58,7 +58,7 @@ define void @intrinsic_vsse_v_nxv2i64_nxv2i64( %0, * %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vsse64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -81,7 +81,7 @@ define void @intrinsic_vsse_mask_v_nxv2i64_nxv2i64( %0, * %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define void @intrinsic_vsse_v_nxv4i64_nxv4i64( %0, * %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vsse64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -127,7 +127,7 @@ define void @intrinsic_vsse_mask_v_nxv4i64_nxv4i64( %0, * %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -150,7 +150,7 @@ define void @intrinsic_vsse_v_nxv8i64_nxv8i64( %0, * %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vsse64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -173,7 +173,7 @@ define void @intrinsic_vsse_mask_v_nxv8i64_nxv8i64( %0, * %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -196,7 +196,7 @@ define void @intrinsic_vsse_v_nxv1f64_nxv1f64( %0, * %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vsse64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -219,7 +219,7 @@ define void @intrinsic_vsse_mask_v_nxv1f64_nxv1f64( %0, * %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1f64_nxv1f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -242,7 +242,7 @@ define void @intrinsic_vsse_v_nxv2f64_nxv2f64( %0, * %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vsse64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -265,7 +265,7 @@ define void @intrinsic_vsse_mask_v_nxv2f64_nxv2f64( %0, * %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2f64_nxv2f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -288,7 +288,7 @@ define void @intrinsic_vsse_v_nxv4f64_nxv4f64( %0, * %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vsse64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -311,7 +311,7 @@ define void @intrinsic_vsse_mask_v_nxv4f64_nxv4f64( %0, * %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4f64_nxv4f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -334,7 +334,7 @@ define void @intrinsic_vsse_v_nxv8f64_nxv8f64( %0, * %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vsse64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -357,7 +357,7 @@ define void @intrinsic_vsse_mask_v_nxv8f64_nxv8f64( %0, * %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -380,7 +380,7 @@ define void @intrinsic_vsse_v_nxv1i32_nxv1i32( %0, * %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vsse32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -403,7 +403,7 @@ define void @intrinsic_vsse_mask_v_nxv1i32_nxv1i32( %0, * %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vsse32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -426,7 +426,7 @@ define void @intrinsic_vsse_v_nxv2i32_nxv2i32( %0, * %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vsse32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -449,7 +449,7 @@ define void @intrinsic_vsse_mask_v_nxv2i32_nxv2i32( %0, * %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vsse32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -472,7 +472,7 @@ define void @intrinsic_vsse_v_nxv4i32_nxv4i32( %0, * %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vsse32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -495,7 +495,7 @@ define void @intrinsic_vsse_mask_v_nxv4i32_nxv4i32( %0, * %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vsse32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -518,7 +518,7 @@ define void @intrinsic_vsse_v_nxv8i32_nxv8i32( %0, * %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma ; CHECK-NEXT: vsse32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -541,7 +541,7 @@ define void @intrinsic_vsse_mask_v_nxv8i32_nxv8i32( %0, * %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma ; CHECK-NEXT: vsse32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -564,7 +564,7 @@ define void @intrinsic_vsse_v_nxv16i32_nxv16i32( %0, * %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: vsse32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -587,7 +587,7 @@ define void @intrinsic_vsse_mask_v_nxv16i32_nxv16i32( %0, * %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: vsse32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -610,7 +610,7 @@ define void @intrinsic_vsse_v_nxv1f32_nxv1f32( %0, * %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vsse32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -633,7 +633,7 @@ define void @intrinsic_vsse_mask_v_nxv1f32_nxv1f32( %0, * %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1f32_nxv1f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vsse32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -656,7 +656,7 @@ define void @intrinsic_vsse_v_nxv2f32_nxv2f32( %0, * %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vsse32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -679,7 +679,7 @@ define void @intrinsic_vsse_mask_v_nxv2f32_nxv2f32( %0, * %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2f32_nxv2f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vsse32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +702,7 @@ define void @intrinsic_vsse_v_nxv4f32_nxv4f32( %0, * %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vsse32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -725,7 +725,7 @@ define void @intrinsic_vsse_mask_v_nxv4f32_nxv4f32( %0, * %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4f32_nxv4f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vsse32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -748,7 +748,7 @@ define void @intrinsic_vsse_v_nxv8f32_nxv8f32( %0, * %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma ; CHECK-NEXT: vsse32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -771,7 +771,7 @@ define void @intrinsic_vsse_mask_v_nxv8f32_nxv8f32( %0, * %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8f32_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma ; CHECK-NEXT: vsse32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -794,7 +794,7 @@ define void @intrinsic_vsse_v_nxv16f32_nxv16f32( %0, * %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: vsse32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -817,7 +817,7 @@ define void @intrinsic_vsse_mask_v_nxv16f32_nxv16f32( %0, * %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: vsse32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -840,7 +840,7 @@ define void @intrinsic_vsse_v_nxv1i16_nxv1i16( %0, * %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vsse16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -863,7 +863,7 @@ define void @intrinsic_vsse_mask_v_nxv1i16_nxv1i16( %0, * %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -886,7 +886,7 @@ define void @intrinsic_vsse_v_nxv2i16_nxv2i16( %0, * %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vsse16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -909,7 +909,7 @@ define void @intrinsic_vsse_mask_v_nxv2i16_nxv2i16( %0, * %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -932,7 +932,7 @@ define void @intrinsic_vsse_v_nxv4i16_nxv4i16( %0, * %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vsse16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -955,7 +955,7 @@ define void @intrinsic_vsse_mask_v_nxv4i16_nxv4i16( %0, * %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -978,7 +978,7 @@ define void @intrinsic_vsse_v_nxv8i16_nxv8i16( %0, * %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-NEXT: vsse16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1001,7 +1001,7 @@ define void @intrinsic_vsse_mask_v_nxv8i16_nxv8i16( %0, * %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1024,7 +1024,7 @@ define void @intrinsic_vsse_v_nxv16i16_nxv16i16( %0, * %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vsse16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1047,7 +1047,7 @@ define void @intrinsic_vsse_mask_v_nxv16i16_nxv16i16( %0, * %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1070,7 +1070,7 @@ define void @intrinsic_vsse_v_nxv32i16_nxv32i16( %0, * %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma ; CHECK-NEXT: vsse16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1093,7 +1093,7 @@ define void @intrinsic_vsse_mask_v_nxv32i16_nxv32i16( %0, * %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma ; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1116,7 +1116,7 @@ define void @intrinsic_vsse_v_nxv1f16_nxv1f16( %0, * %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vsse16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1139,7 +1139,7 @@ define void @intrinsic_vsse_mask_v_nxv1f16_nxv1f16( %0, * %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1162,7 +1162,7 @@ define void @intrinsic_vsse_v_nxv2f16_nxv2f16( %0, * %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vsse16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1185,7 +1185,7 @@ define void @intrinsic_vsse_mask_v_nxv2f16_nxv2f16( %0, * %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2f16_nxv2f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1208,7 +1208,7 @@ define void @intrinsic_vsse_v_nxv4f16_nxv4f16( %0, * %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vsse16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1231,7 +1231,7 @@ define void @intrinsic_vsse_mask_v_nxv4f16_nxv4f16( %0, * %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4f16_nxv4f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1254,7 +1254,7 @@ define void @intrinsic_vsse_v_nxv8f16_nxv8f16( %0, * %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-NEXT: vsse16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1277,7 +1277,7 @@ define void @intrinsic_vsse_mask_v_nxv8f16_nxv8f16( %0, * %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8f16_nxv8f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1300,7 +1300,7 @@ define void @intrinsic_vsse_v_nxv16f16_nxv16f16( %0, * %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vsse16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1323,7 +1323,7 @@ define void @intrinsic_vsse_mask_v_nxv16f16_nxv16f16( %0, * %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16f16_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1346,7 +1346,7 @@ define void @intrinsic_vsse_v_nxv32f16_nxv32f16( %0, * %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma ; CHECK-NEXT: vsse16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1369,7 +1369,7 @@ define void @intrinsic_vsse_mask_v_nxv32f16_nxv32f16( %0, * %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma ; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1392,7 +1392,7 @@ define void @intrinsic_vsse_v_nxv1i8_nxv1i8( %0, * %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vsse8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1415,7 +1415,7 @@ define void @intrinsic_vsse_mask_v_nxv1i8_nxv1i8( %0, * %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vsse8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1438,7 +1438,7 @@ define void @intrinsic_vsse_v_nxv2i8_nxv2i8( %0, * %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vsse8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1461,7 +1461,7 @@ define void @intrinsic_vsse_mask_v_nxv2i8_nxv2i8( %0, * %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vsse8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1484,7 +1484,7 @@ define void @intrinsic_vsse_v_nxv4i8_nxv4i8( %0, * %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-NEXT: vsse8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1507,7 +1507,7 @@ define void @intrinsic_vsse_mask_v_nxv4i8_nxv4i8( %0, * %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-NEXT: vsse8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1530,7 +1530,7 @@ define void @intrinsic_vsse_v_nxv8i8_nxv8i8( %0, * %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vsse8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1553,7 +1553,7 @@ define void @intrinsic_vsse_mask_v_nxv8i8_nxv8i8( %0, * %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vsse8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1576,7 +1576,7 @@ define void @intrinsic_vsse_v_nxv16i8_nxv16i8( %0, * %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vsse8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1599,7 +1599,7 @@ define void @intrinsic_vsse_mask_v_nxv16i8_nxv16i8( %0, * %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vsse8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1622,7 +1622,7 @@ define void @intrinsic_vsse_v_nxv32i8_nxv32i8( %0, * %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma ; CHECK-NEXT: vsse8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1645,7 +1645,7 @@ define void @intrinsic_vsse_mask_v_nxv32i8_nxv32i8( %0, * %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma ; CHECK-NEXT: vsse8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1668,7 +1668,7 @@ define void @intrinsic_vsse_v_nxv64i8_nxv64i8( %0, * %1, iXLen %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsse_v_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma ; CHECK-NEXT: vsse8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1691,7 +1691,7 @@ define void @intrinsic_vsse_mask_v_nxv64i8_nxv64i8( %0, * %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma ; CHECK-NEXT: vsse8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsseg-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsseg-rv32.ll @@ -10,7 +10,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -23,7 +23,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -39,7 +39,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsseg2e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -52,7 +52,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -69,7 +69,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsseg3e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -83,7 +83,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsseg3e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -101,7 +101,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsseg4e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -116,7 +116,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -135,7 +135,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsseg5e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsseg5e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -171,7 +171,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsseg6e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -188,7 +188,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsseg6e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -209,7 +209,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsseg7e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -227,7 +227,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsseg7e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -249,7 +249,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsseg8e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -268,7 +268,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsseg8e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -284,7 +284,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsseg2e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -297,7 +297,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -314,7 +314,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsseg3e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -328,7 +328,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsseg3e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -346,7 +346,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsseg4e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -361,7 +361,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -377,7 +377,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg2e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -390,7 +390,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -407,7 +407,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg3e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -421,7 +421,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -439,7 +439,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg4e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -454,7 +454,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -473,7 +473,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg5e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -489,7 +489,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg5e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -509,7 +509,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg6e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -526,7 +526,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg6e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -547,7 +547,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg7e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -565,7 +565,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg7e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -587,7 +587,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg8e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -606,7 +606,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg8e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -622,7 +622,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -635,7 +635,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -652,7 +652,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg3e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -666,7 +666,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -684,7 +684,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg4e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -699,7 +699,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -718,7 +718,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg5e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -734,7 +734,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -754,7 +754,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg6e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -771,7 +771,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -792,7 +792,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg7e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -810,7 +810,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -832,7 +832,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg8e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -851,7 +851,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -867,7 +867,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg2e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -880,7 +880,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -897,7 +897,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg3e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -911,7 +911,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -929,7 +929,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg4e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -944,7 +944,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -963,7 +963,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg5e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -979,7 +979,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg5e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -999,7 +999,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg6e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1016,7 +1016,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg6e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1037,7 +1037,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg7e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1055,7 +1055,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg7e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1077,7 +1077,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg8e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1096,7 +1096,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg8e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1112,7 +1112,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1125,7 +1125,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1142,7 +1142,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsseg3e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1156,7 +1156,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1174,7 +1174,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsseg4e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1189,7 +1189,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1205,7 +1205,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsseg2e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1218,7 +1218,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1235,7 +1235,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsseg3e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1249,7 +1249,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsseg3e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1267,7 +1267,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsseg4e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1282,7 +1282,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1301,7 +1301,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsseg5e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1317,7 +1317,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsseg5e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1337,7 +1337,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsseg6e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1354,7 +1354,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsseg6e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1375,7 +1375,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsseg7e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1393,7 +1393,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsseg7e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1415,7 +1415,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsseg8e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1434,7 +1434,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsseg8e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1450,7 +1450,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsseg2e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1463,7 +1463,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1479,7 +1479,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsseg2e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1492,7 +1492,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1509,7 +1509,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsseg3e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1523,7 +1523,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsseg3e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1541,7 +1541,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsseg4e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1556,7 +1556,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1575,7 +1575,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsseg5e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1591,7 +1591,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsseg5e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1611,7 +1611,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsseg6e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1628,7 +1628,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsseg6e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1649,7 +1649,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsseg7e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1667,7 +1667,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsseg7e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1689,7 +1689,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsseg8e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1708,7 +1708,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsseg8e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1724,7 +1724,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1737,7 +1737,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1754,7 +1754,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg3e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1768,7 +1768,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1786,7 +1786,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg4e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1801,7 +1801,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1820,7 +1820,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg5e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1836,7 +1836,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1856,7 +1856,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg6e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1873,7 +1873,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1894,7 +1894,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg7e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1912,7 +1912,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1934,7 +1934,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg8e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1953,7 +1953,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1969,7 +1969,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsseg2e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1982,7 +1982,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1998,7 +1998,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsseg2e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2011,7 +2011,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2028,7 +2028,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsseg3e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2042,7 +2042,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsseg3e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2060,7 +2060,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsseg4e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2075,7 +2075,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2094,7 +2094,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsseg5e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2110,7 +2110,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsseg5e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2130,7 +2130,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsseg6e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2147,7 +2147,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsseg6e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2168,7 +2168,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsseg7e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2186,7 +2186,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsseg7e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2208,7 +2208,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsseg8e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2227,7 +2227,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsseg8e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2243,7 +2243,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2256,7 +2256,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2273,7 +2273,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg3e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2287,7 +2287,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2305,7 +2305,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg4e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2320,7 +2320,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2339,7 +2339,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg5e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2355,7 +2355,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2375,7 +2375,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg6e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2392,7 +2392,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2413,7 +2413,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg7e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2431,7 +2431,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2453,7 +2453,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg8e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2472,7 +2472,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2488,7 +2488,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsseg2e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2501,7 +2501,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2518,7 +2518,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsseg3e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2532,7 +2532,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2550,7 +2550,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsseg4e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2565,7 +2565,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2581,7 +2581,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2594,7 +2594,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2610,7 +2610,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsseg2e64.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2623,7 +2623,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2639,7 +2639,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsseg2e64.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2652,7 +2652,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2669,7 +2669,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsseg3e64.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2683,7 +2683,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsseg3e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2701,7 +2701,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsseg4e64.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2716,7 +2716,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsseg4e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2735,7 +2735,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsseg5e64.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2751,7 +2751,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsseg5e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2771,7 +2771,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsseg6e64.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2788,7 +2788,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsseg6e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2809,7 +2809,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsseg7e64.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2827,7 +2827,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsseg7e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2849,7 +2849,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsseg8e64.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2868,7 +2868,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsseg8e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2884,7 +2884,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg2e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2897,7 +2897,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2914,7 +2914,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg3e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2928,7 +2928,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2946,7 +2946,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg4e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2961,7 +2961,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2980,7 +2980,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg5e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2996,7 +2996,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg5e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3016,7 +3016,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg6e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3033,7 +3033,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg6e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3054,7 +3054,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg7e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3072,7 +3072,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg7e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3094,7 +3094,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg8e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3113,7 +3113,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg8e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3129,7 +3129,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3142,7 +3142,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3159,7 +3159,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg3e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3173,7 +3173,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3191,7 +3191,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg4e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3206,7 +3206,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3225,7 +3225,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg5e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3241,7 +3241,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3261,7 +3261,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg6e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3278,7 +3278,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3299,7 +3299,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg7e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3317,7 +3317,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3339,7 +3339,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg8e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3358,7 +3358,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3374,7 +3374,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg2e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3387,7 +3387,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3404,7 +3404,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg3e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3418,7 +3418,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3436,7 +3436,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg4e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3451,7 +3451,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3470,7 +3470,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg5e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3486,7 +3486,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg5e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3506,7 +3506,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg6e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3523,7 +3523,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg6e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3544,7 +3544,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg7e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3562,7 +3562,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg7e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3584,7 +3584,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg8e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3603,7 +3603,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg8e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3619,7 +3619,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3632,7 +3632,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3649,7 +3649,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsseg3e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3663,7 +3663,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3681,7 +3681,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsseg4e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3696,7 +3696,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3712,7 +3712,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsseg2e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3725,7 +3725,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3741,7 +3741,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsseg2e64.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3754,7 +3754,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3771,7 +3771,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsseg3e64.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3785,7 +3785,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsseg3e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3803,7 +3803,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsseg4e64.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3818,7 +3818,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsseg4e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3834,7 +3834,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3847,7 +3847,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3864,7 +3864,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg3e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3878,7 +3878,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3896,7 +3896,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg4e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3911,7 +3911,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3930,7 +3930,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg5e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3946,7 +3946,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3966,7 +3966,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg6e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3983,7 +3983,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -4004,7 +4004,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg7e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -4022,7 +4022,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -4044,7 +4044,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg8e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -4063,7 +4063,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -4079,7 +4079,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -4092,7 +4092,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -4109,7 +4109,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg3e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -4123,7 +4123,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -4141,7 +4141,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg4e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -4156,7 +4156,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -4175,7 +4175,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg5e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -4191,7 +4191,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -4211,7 +4211,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg6e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -4228,7 +4228,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -4249,7 +4249,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg7e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -4267,7 +4267,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -4289,7 +4289,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg8e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -4308,7 +4308,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -4324,7 +4324,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsseg2e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -4337,7 +4337,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -4354,7 +4354,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsseg3e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -4368,7 +4368,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -4386,7 +4386,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsseg4e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -4401,7 +4401,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsseg-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsseg-rv64.ll @@ -10,7 +10,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -23,7 +23,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -39,7 +39,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsseg2e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -52,7 +52,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -69,7 +69,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsseg3e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -83,7 +83,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -101,7 +101,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsseg4e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -116,7 +116,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -132,7 +132,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsseg2e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -145,7 +145,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -162,7 +162,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsseg3e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -176,7 +176,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsseg3e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -194,7 +194,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsseg4e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -209,7 +209,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -225,7 +225,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsseg2e64.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -238,7 +238,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -255,7 +255,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsseg3e64.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -269,7 +269,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsseg3e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -287,7 +287,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsseg4e64.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -302,7 +302,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsseg4e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -321,7 +321,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsseg5e64.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -337,7 +337,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsseg5e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -357,7 +357,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsseg6e64.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -374,7 +374,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsseg6e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -395,7 +395,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsseg7e64.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -413,7 +413,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsseg7e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -435,7 +435,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsseg8e64.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -454,7 +454,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsseg8e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -470,7 +470,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg2e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -483,7 +483,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -500,7 +500,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg3e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -514,7 +514,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -532,7 +532,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg4e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -547,7 +547,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -566,7 +566,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg5e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -582,7 +582,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg5e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -602,7 +602,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg6e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -619,7 +619,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg6e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -640,7 +640,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg7e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -658,7 +658,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg7e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -680,7 +680,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg8e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -699,7 +699,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg8e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -715,7 +715,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -728,7 +728,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -745,7 +745,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsseg3e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -759,7 +759,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -777,7 +777,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsseg4e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -792,7 +792,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -808,7 +808,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsseg2e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -821,7 +821,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -838,7 +838,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsseg3e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -852,7 +852,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsseg3e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -870,7 +870,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsseg4e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -885,7 +885,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -904,7 +904,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsseg5e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -920,7 +920,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsseg5e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -940,7 +940,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsseg6e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -957,7 +957,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsseg6e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -978,7 +978,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsseg7e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -996,7 +996,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsseg7e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1018,7 +1018,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsseg8e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1037,7 +1037,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsseg8e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1053,7 +1053,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1066,7 +1066,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1083,7 +1083,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg3e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1097,7 +1097,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1115,7 +1115,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg4e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1130,7 +1130,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1149,7 +1149,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg5e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1165,7 +1165,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1185,7 +1185,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg6e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1202,7 +1202,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1223,7 +1223,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg7e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1241,7 +1241,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1263,7 +1263,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg8e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1282,7 +1282,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1298,7 +1298,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg2e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1311,7 +1311,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1328,7 +1328,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg3e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1342,7 +1342,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1360,7 +1360,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg4e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1375,7 +1375,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1394,7 +1394,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg5e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1410,7 +1410,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg5e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1430,7 +1430,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg6e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1447,7 +1447,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg6e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1468,7 +1468,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg7e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1486,7 +1486,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg7e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1508,7 +1508,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg8e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1527,7 +1527,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg8e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1543,7 +1543,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsseg2e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1556,7 +1556,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1573,7 +1573,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsseg3e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1587,7 +1587,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsseg3e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1605,7 +1605,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsseg4e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1620,7 +1620,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1639,7 +1639,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsseg5e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1655,7 +1655,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsseg5e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1675,7 +1675,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsseg6e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1692,7 +1692,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsseg6e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1713,7 +1713,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsseg7e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1731,7 +1731,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsseg7e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1753,7 +1753,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsseg8e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1772,7 +1772,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsseg8e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1788,7 +1788,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsseg2e64.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1801,7 +1801,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1817,7 +1817,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1830,7 +1830,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1847,7 +1847,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg3e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1861,7 +1861,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1879,7 +1879,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg4e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1894,7 +1894,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1913,7 +1913,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg5e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1929,7 +1929,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1949,7 +1949,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg6e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -1966,7 +1966,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -1987,7 +1987,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg7e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2005,7 +2005,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2027,7 +2027,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg8e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2046,7 +2046,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2062,7 +2062,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsseg2e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2075,7 +2075,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2092,7 +2092,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsseg3e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2106,7 +2106,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsseg3e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2124,7 +2124,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsseg4e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2139,7 +2139,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2158,7 +2158,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsseg5e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2174,7 +2174,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsseg5e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2194,7 +2194,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsseg6e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2211,7 +2211,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsseg6e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2232,7 +2232,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsseg7e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2250,7 +2250,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsseg7e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2272,7 +2272,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsseg8e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2291,7 +2291,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsseg8e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2307,7 +2307,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsseg2e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2320,7 +2320,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2337,7 +2337,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsseg3e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2351,7 +2351,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsseg3e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2369,7 +2369,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsseg4e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2384,7 +2384,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2403,7 +2403,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsseg5e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2419,7 +2419,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsseg5e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2439,7 +2439,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsseg6e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2456,7 +2456,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsseg6e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2477,7 +2477,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsseg7e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2495,7 +2495,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsseg7e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2517,7 +2517,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsseg8e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2536,7 +2536,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsseg8e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2552,7 +2552,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsseg2e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2565,7 +2565,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2581,7 +2581,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsseg2e8.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2594,7 +2594,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2610,7 +2610,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2623,7 +2623,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2640,7 +2640,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg3e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2654,7 +2654,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2672,7 +2672,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg4e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2687,7 +2687,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2706,7 +2706,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg5e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2722,7 +2722,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2742,7 +2742,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg6e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2759,7 +2759,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2780,7 +2780,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg7e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2798,7 +2798,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2820,7 +2820,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg8e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2839,7 +2839,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2855,7 +2855,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsseg2e64.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2868,7 +2868,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2885,7 +2885,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsseg3e64.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2899,7 +2899,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsseg3e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2917,7 +2917,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsseg4e64.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2932,7 +2932,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsseg4e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2948,7 +2948,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2961,7 +2961,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -2977,7 +2977,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsseg2e64.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -2990,7 +2990,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3006,7 +3006,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsseg2e64.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3019,7 +3019,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3036,7 +3036,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsseg3e64.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3050,7 +3050,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsseg3e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3068,7 +3068,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsseg4e64.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3083,7 +3083,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsseg4e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3102,7 +3102,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsseg5e64.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3118,7 +3118,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsseg5e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3138,7 +3138,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsseg6e64.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3155,7 +3155,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsseg6e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3176,7 +3176,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsseg7e64.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3194,7 +3194,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsseg7e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3216,7 +3216,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsseg8e64.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3235,7 +3235,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsseg8e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3251,7 +3251,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg2e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3264,7 +3264,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3281,7 +3281,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg3e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3295,7 +3295,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3313,7 +3313,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg4e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3328,7 +3328,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3347,7 +3347,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg5e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3363,7 +3363,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg5e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3383,7 +3383,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg6e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3400,7 +3400,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg6e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3421,7 +3421,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg7e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3439,7 +3439,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg7e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3461,7 +3461,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg8e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3480,7 +3480,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsseg8e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3496,7 +3496,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3509,7 +3509,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3526,7 +3526,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg3e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3540,7 +3540,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3558,7 +3558,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg4e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3573,7 +3573,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3592,7 +3592,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg5e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3608,7 +3608,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3628,7 +3628,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg6e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3645,7 +3645,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3666,7 +3666,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg7e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3684,7 +3684,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3706,7 +3706,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg8e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3725,7 +3725,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3741,7 +3741,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg2e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3754,7 +3754,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3771,7 +3771,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg3e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3785,7 +3785,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3803,7 +3803,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg4e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3818,7 +3818,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3837,7 +3837,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg5e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3853,7 +3853,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg5e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3873,7 +3873,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg6e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3890,7 +3890,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg6e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3911,7 +3911,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg7e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3929,7 +3929,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg7e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3951,7 +3951,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg8e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3970,7 +3970,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsseg8e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -3986,7 +3986,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -3999,7 +3999,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -4016,7 +4016,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsseg3e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -4030,7 +4030,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -4048,7 +4048,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsseg4e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -4063,7 +4063,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -4079,7 +4079,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsseg2e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -4092,7 +4092,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -4108,7 +4108,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsseg2e64.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -4121,7 +4121,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -4138,7 +4138,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsseg3e64.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -4152,7 +4152,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsseg3e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -4170,7 +4170,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsseg4e64.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -4185,7 +4185,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsseg4e64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -4201,7 +4201,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -4214,7 +4214,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -4231,7 +4231,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg3e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -4245,7 +4245,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -4263,7 +4263,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg4e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -4278,7 +4278,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -4297,7 +4297,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg5e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -4313,7 +4313,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -4333,7 +4333,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg6e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -4350,7 +4350,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -4371,7 +4371,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg7e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -4389,7 +4389,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -4411,7 +4411,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg8e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -4430,7 +4430,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -4446,7 +4446,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -4459,7 +4459,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -4476,7 +4476,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg3e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -4490,7 +4490,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg3e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -4508,7 +4508,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg4e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -4523,7 +4523,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg4e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -4542,7 +4542,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg5e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -4558,7 +4558,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg5e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -4578,7 +4578,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg6e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -4595,7 +4595,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg6e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -4616,7 +4616,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg7e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -4634,7 +4634,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg7e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -4656,7 +4656,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg8e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -4675,7 +4675,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsseg8e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -4691,7 +4691,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsseg2e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -4704,7 +4704,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -4721,7 +4721,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsseg3e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -4735,7 +4735,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsseg3e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -4753,7 +4753,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsseg4e32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -4768,7 +4768,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsseg4e32.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vssra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vssra_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vssra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vssra_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vssra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vssra_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vssra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vssra_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vssra.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vssra_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vssra.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -292,7 +292,7 @@ define @intrinsic_vssra_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vssra.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vssra_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vssra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vssra_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vssra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vssra_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vssra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vssra_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vssra.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vssra_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vssra.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vssra_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vssra.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vssra_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vssra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vssra_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vssra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -717,7 +717,7 @@ define @intrinsic_vssra_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vssra.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -764,7 +764,7 @@ define @intrinsic_vssra_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vssra.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -811,7 +811,7 @@ define @intrinsic_vssra_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vssra.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vssra_vx_nxv1i8_nxv1i8( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vssra_vx_nxv2i8_nxv2i8( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vssra_vx_nxv4i8_nxv4i8( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vssra_vx_nxv8i8_nxv8i8( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1047,7 +1047,7 @@ define @intrinsic_vssra_vx_nxv16i8_nxv16i8( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1094,7 +1094,7 @@ define @intrinsic_vssra_vx_nxv32i8_nxv32i8( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1141,7 +1141,7 @@ define @intrinsic_vssra_vx_nxv64i8_nxv64i8( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1188,7 +1188,7 @@ define @intrinsic_vssra_vx_nxv1i16_nxv1i16( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1235,7 +1235,7 @@ define @intrinsic_vssra_vx_nxv2i16_nxv2i16( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1282,7 +1282,7 @@ define @intrinsic_vssra_vx_nxv4i16_nxv4i16( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1329,7 +1329,7 @@ define @intrinsic_vssra_vx_nxv8i16_nxv8i16( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1376,7 +1376,7 @@ define @intrinsic_vssra_vx_nxv16i16_nxv16i16( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1423,7 +1423,7 @@ define @intrinsic_vssra_vx_nxv32i16_nxv32i16( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1470,7 +1470,7 @@ define @intrinsic_vssra_vx_nxv1i32_nxv1i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1517,7 +1517,7 @@ define @intrinsic_vssra_vx_nxv2i32_nxv2i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1564,7 +1564,7 @@ define @intrinsic_vssra_vx_nxv4i32_nxv4i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1611,7 +1611,7 @@ define @intrinsic_vssra_vx_nxv8i32_nxv8i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1658,7 +1658,7 @@ define @intrinsic_vssra_vx_nxv16i32_nxv16i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1705,7 +1705,7 @@ define @intrinsic_vssra_vx_nxv1i64_nxv1i64( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1752,7 +1752,7 @@ define @intrinsic_vssra_vx_nxv2i64_nxv2i64( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1799,7 +1799,7 @@ define @intrinsic_vssra_vx_nxv4i64_nxv4i64( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1846,7 +1846,7 @@ define @intrinsic_vssra_vx_nxv8i64_nxv8i64( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vx_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1887,7 +1887,7 @@ define @intrinsic_vssra_vi_nxv1i8_nxv1i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vssra_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1920,7 +1920,7 @@ define @intrinsic_vssra_vi_nxv2i8_nxv2i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vssra_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1953,7 +1953,7 @@ define @intrinsic_vssra_vi_nxv4i8_nxv4i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vssra_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1986,7 +1986,7 @@ define @intrinsic_vssra_vi_nxv8i8_nxv8i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vssra_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2019,7 +2019,7 @@ define @intrinsic_vssra_vi_nxv16i8_nxv16i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vssra_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2052,7 +2052,7 @@ define @intrinsic_vssra_vi_nxv32i8_nxv32i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vssra_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2085,7 +2085,7 @@ define @intrinsic_vssra_vi_nxv64i8_nxv64i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vssra_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2118,7 +2118,7 @@ define @intrinsic_vssra_vi_nxv1i16_nxv1i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vssra_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2151,7 +2151,7 @@ define @intrinsic_vssra_vi_nxv2i16_nxv2i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vssra_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2184,7 +2184,7 @@ define @intrinsic_vssra_vi_nxv4i16_nxv4i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vssra_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2217,7 +2217,7 @@ define @intrinsic_vssra_vi_nxv8i16_nxv8i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vssra_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2250,7 +2250,7 @@ define @intrinsic_vssra_vi_nxv16i16_nxv16i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vssra_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2283,7 +2283,7 @@ define @intrinsic_vssra_vi_nxv32i16_nxv32i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vssra_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2316,7 +2316,7 @@ define @intrinsic_vssra_vi_nxv1i32_nxv1i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vssra_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2349,7 +2349,7 @@ define @intrinsic_vssra_vi_nxv2i32_nxv2i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vssra_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2382,7 +2382,7 @@ define @intrinsic_vssra_vi_nxv4i32_nxv4i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vssra_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2415,7 +2415,7 @@ define @intrinsic_vssra_vi_nxv8i32_nxv8i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vssra_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2448,7 +2448,7 @@ define @intrinsic_vssra_vi_nxv16i32_nxv16i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vssra_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vssra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vssra_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vssra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vssra_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vssra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vssra_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vssra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vssra_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vssra.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vssra_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vssra.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -292,7 +292,7 @@ define @intrinsic_vssra_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vssra.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vssra_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vssra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vssra_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vssra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vssra_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vssra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vssra_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vssra.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vssra_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vssra.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vssra_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vssra.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vssra_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vssra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vssra_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vssra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -717,7 +717,7 @@ define @intrinsic_vssra_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vssra.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -764,7 +764,7 @@ define @intrinsic_vssra_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vssra.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -811,7 +811,7 @@ define @intrinsic_vssra_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vssra.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vssra_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vssra.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vssra_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vssra.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vssra_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vssra.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vssra_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vssra.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1048,7 +1048,7 @@ define @intrinsic_vssra_vx_nxv1i8_nxv1i8( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1095,7 +1095,7 @@ define @intrinsic_vssra_vx_nxv2i8_nxv2i8( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1142,7 +1142,7 @@ define @intrinsic_vssra_vx_nxv4i8_nxv4i8( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1189,7 +1189,7 @@ define @intrinsic_vssra_vx_nxv8i8_nxv8i8( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1236,7 +1236,7 @@ define @intrinsic_vssra_vx_nxv16i8_nxv16i8( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1283,7 +1283,7 @@ define @intrinsic_vssra_vx_nxv32i8_nxv32i8( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1330,7 +1330,7 @@ define @intrinsic_vssra_vx_nxv64i8_nxv64i8( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1377,7 +1377,7 @@ define @intrinsic_vssra_vx_nxv1i16_nxv1i16( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1424,7 +1424,7 @@ define @intrinsic_vssra_vx_nxv2i16_nxv2i16( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1471,7 +1471,7 @@ define @intrinsic_vssra_vx_nxv4i16_nxv4i16( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1518,7 +1518,7 @@ define @intrinsic_vssra_vx_nxv8i16_nxv8i16( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1565,7 +1565,7 @@ define @intrinsic_vssra_vx_nxv16i16_nxv16i16( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1612,7 +1612,7 @@ define @intrinsic_vssra_vx_nxv32i16_nxv32i16( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1659,7 +1659,7 @@ define @intrinsic_vssra_vx_nxv1i32_nxv1i32( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1706,7 +1706,7 @@ define @intrinsic_vssra_vx_nxv2i32_nxv2i32( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1753,7 +1753,7 @@ define @intrinsic_vssra_vx_nxv4i32_nxv4i32( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1800,7 +1800,7 @@ define @intrinsic_vssra_vx_nxv8i32_nxv8i32( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1847,7 +1847,7 @@ define @intrinsic_vssra_vx_nxv16i32_nxv16i32( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1894,7 +1894,7 @@ define @intrinsic_vssra_vx_nxv1i64_nxv1i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1941,7 +1941,7 @@ define @intrinsic_vssra_vx_nxv2i64_nxv2i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1988,7 +1988,7 @@ define @intrinsic_vssra_vx_nxv4i64_nxv4i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -2035,7 +2035,7 @@ define @intrinsic_vssra_vx_nxv8i64_nxv8i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssra_vx_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vssra.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -2076,7 +2076,7 @@ define @intrinsic_vssra_vi_nxv1i8_nxv1i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vssra_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2109,7 +2109,7 @@ define @intrinsic_vssra_vi_nxv2i8_nxv2i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vssra_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2142,7 +2142,7 @@ define @intrinsic_vssra_vi_nxv4i8_nxv4i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vssra_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2175,7 +2175,7 @@ define @intrinsic_vssra_vi_nxv8i8_nxv8i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vssra_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2208,7 +2208,7 @@ define @intrinsic_vssra_vi_nxv16i8_nxv16i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vssra_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2241,7 +2241,7 @@ define @intrinsic_vssra_vi_nxv32i8_nxv32i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vssra_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2274,7 +2274,7 @@ define @intrinsic_vssra_vi_nxv64i8_nxv64i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vssra_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2307,7 +2307,7 @@ define @intrinsic_vssra_vi_nxv1i16_nxv1i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vssra_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2340,7 +2340,7 @@ define @intrinsic_vssra_vi_nxv2i16_nxv2i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vssra_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2373,7 +2373,7 @@ define @intrinsic_vssra_vi_nxv4i16_nxv4i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vssra_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2406,7 +2406,7 @@ define @intrinsic_vssra_vi_nxv8i16_nxv8i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vssra_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2439,7 +2439,7 @@ define @intrinsic_vssra_vi_nxv16i16_nxv16i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vssra_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2472,7 +2472,7 @@ define @intrinsic_vssra_vi_nxv32i16_nxv32i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vssra_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2505,7 +2505,7 @@ define @intrinsic_vssra_vi_nxv1i32_nxv1i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vssra_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2538,7 +2538,7 @@ define @intrinsic_vssra_vi_nxv2i32_nxv2i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vssra_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2571,7 +2571,7 @@ define @intrinsic_vssra_vi_nxv4i32_nxv4i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vssra_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2604,7 +2604,7 @@ define @intrinsic_vssra_vi_nxv8i32_nxv8i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vssra_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2637,7 +2637,7 @@ define @intrinsic_vssra_vi_nxv16i32_nxv16i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vssra_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2670,7 +2670,7 @@ define @intrinsic_vssra_vi_nxv1i64_nxv1i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vssra_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2703,7 +2703,7 @@ define @intrinsic_vssra_vi_nxv2i64_nxv2i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vssra_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2736,7 +2736,7 @@ define @intrinsic_vssra_vi_nxv4i64_nxv4i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vssra_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2769,7 +2769,7 @@ define @intrinsic_vssra_vi_nxv8i64_nxv8i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vssra_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vssra.vi v8, v8, 9 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vssrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vssrl_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vssrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vssrl_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vssrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vssrl_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vssrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vssrl_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vssrl.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vssrl_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vssrl.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -292,7 +292,7 @@ define @intrinsic_vssrl_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vssrl.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vssrl_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vssrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vssrl_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vssrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vssrl_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vssrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vssrl_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vssrl.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vssrl_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vssrl.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vssrl_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vssrl.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vssrl_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vssrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vssrl_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vssrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -717,7 +717,7 @@ define @intrinsic_vssrl_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vssrl.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -764,7 +764,7 @@ define @intrinsic_vssrl_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vssrl.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -811,7 +811,7 @@ define @intrinsic_vssrl_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vssrl.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vssrl_vx_nxv1i8_nxv1i8( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vssrl_vx_nxv2i8_nxv2i8( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vssrl_vx_nxv4i8_nxv4i8( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vssrl_vx_nxv8i8_nxv8i8( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1047,7 +1047,7 @@ define @intrinsic_vssrl_vx_nxv16i8_nxv16i8( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1094,7 +1094,7 @@ define @intrinsic_vssrl_vx_nxv32i8_nxv32i8( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1141,7 +1141,7 @@ define @intrinsic_vssrl_vx_nxv64i8_nxv64i8( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1188,7 +1188,7 @@ define @intrinsic_vssrl_vx_nxv1i16_nxv1i16( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1235,7 +1235,7 @@ define @intrinsic_vssrl_vx_nxv2i16_nxv2i16( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1282,7 +1282,7 @@ define @intrinsic_vssrl_vx_nxv4i16_nxv4i16( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1329,7 +1329,7 @@ define @intrinsic_vssrl_vx_nxv8i16_nxv8i16( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1376,7 +1376,7 @@ define @intrinsic_vssrl_vx_nxv16i16_nxv16i16( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1423,7 +1423,7 @@ define @intrinsic_vssrl_vx_nxv32i16_nxv32i16( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1470,7 +1470,7 @@ define @intrinsic_vssrl_vx_nxv1i32_nxv1i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1517,7 +1517,7 @@ define @intrinsic_vssrl_vx_nxv2i32_nxv2i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1564,7 +1564,7 @@ define @intrinsic_vssrl_vx_nxv4i32_nxv4i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1611,7 +1611,7 @@ define @intrinsic_vssrl_vx_nxv8i32_nxv8i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1658,7 +1658,7 @@ define @intrinsic_vssrl_vx_nxv16i32_nxv16i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1705,7 +1705,7 @@ define @intrinsic_vssrl_vx_nxv1i64_nxv1i64( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1752,7 +1752,7 @@ define @intrinsic_vssrl_vx_nxv2i64_nxv2i64( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1799,7 +1799,7 @@ define @intrinsic_vssrl_vx_nxv4i64_nxv4i64( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1846,7 +1846,7 @@ define @intrinsic_vssrl_vx_nxv8i64_nxv8i64( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1887,7 +1887,7 @@ define @intrinsic_vssrl_vi_nxv1i8_nxv1i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1920,7 +1920,7 @@ define @intrinsic_vssrl_vi_nxv2i8_nxv2i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1953,7 +1953,7 @@ define @intrinsic_vssrl_vi_nxv4i8_nxv4i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -1986,7 +1986,7 @@ define @intrinsic_vssrl_vi_nxv8i8_nxv8i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2019,7 +2019,7 @@ define @intrinsic_vssrl_vi_nxv16i8_nxv16i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2052,7 +2052,7 @@ define @intrinsic_vssrl_vi_nxv32i8_nxv32i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2085,7 +2085,7 @@ define @intrinsic_vssrl_vi_nxv64i8_nxv64i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2118,7 +2118,7 @@ define @intrinsic_vssrl_vi_nxv1i16_nxv1i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2151,7 +2151,7 @@ define @intrinsic_vssrl_vi_nxv2i16_nxv2i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2184,7 +2184,7 @@ define @intrinsic_vssrl_vi_nxv4i16_nxv4i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2217,7 +2217,7 @@ define @intrinsic_vssrl_vi_nxv8i16_nxv8i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2250,7 +2250,7 @@ define @intrinsic_vssrl_vi_nxv16i16_nxv16i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2283,7 +2283,7 @@ define @intrinsic_vssrl_vi_nxv32i16_nxv32i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2316,7 +2316,7 @@ define @intrinsic_vssrl_vi_nxv1i32_nxv1i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2349,7 +2349,7 @@ define @intrinsic_vssrl_vi_nxv2i32_nxv2i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2382,7 +2382,7 @@ define @intrinsic_vssrl_vi_nxv4i32_nxv4i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2415,7 +2415,7 @@ define @intrinsic_vssrl_vi_nxv8i32_nxv8i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2448,7 +2448,7 @@ define @intrinsic_vssrl_vi_nxv16i32_nxv16i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vssrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vssrl_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vssrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vssrl_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vssrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vssrl_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vssrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vssrl_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vssrl.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vssrl_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vssrl.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -292,7 +292,7 @@ define @intrinsic_vssrl_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vssrl.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vssrl_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vssrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vssrl_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vssrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vssrl_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vssrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vssrl_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vssrl.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vssrl_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vssrl.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vssrl_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vssrl.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vssrl_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vssrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vssrl_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vssrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -717,7 +717,7 @@ define @intrinsic_vssrl_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vssrl.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -764,7 +764,7 @@ define @intrinsic_vssrl_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vssrl.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -811,7 +811,7 @@ define @intrinsic_vssrl_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vssrl.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vssrl_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vssrl.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vssrl_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vssrl.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vssrl_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vssrl.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vssrl_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vssrl.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1048,7 +1048,7 @@ define @intrinsic_vssrl_vx_nxv1i8_nxv1i8( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1095,7 +1095,7 @@ define @intrinsic_vssrl_vx_nxv2i8_nxv2i8( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1142,7 +1142,7 @@ define @intrinsic_vssrl_vx_nxv4i8_nxv4i8( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1189,7 +1189,7 @@ define @intrinsic_vssrl_vx_nxv8i8_nxv8i8( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1236,7 +1236,7 @@ define @intrinsic_vssrl_vx_nxv16i8_nxv16i8( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vx_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1283,7 +1283,7 @@ define @intrinsic_vssrl_vx_nxv32i8_nxv32i8( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vx_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1330,7 +1330,7 @@ define @intrinsic_vssrl_vx_nxv64i8_nxv64i8( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vx_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1377,7 +1377,7 @@ define @intrinsic_vssrl_vx_nxv1i16_nxv1i16( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1424,7 +1424,7 @@ define @intrinsic_vssrl_vx_nxv2i16_nxv2i16( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1471,7 +1471,7 @@ define @intrinsic_vssrl_vx_nxv4i16_nxv4i16( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1518,7 +1518,7 @@ define @intrinsic_vssrl_vx_nxv8i16_nxv8i16( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1565,7 +1565,7 @@ define @intrinsic_vssrl_vx_nxv16i16_nxv16i16( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vx_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1612,7 +1612,7 @@ define @intrinsic_vssrl_vx_nxv32i16_nxv32i16( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vx_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1659,7 +1659,7 @@ define @intrinsic_vssrl_vx_nxv1i32_nxv1i32( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1706,7 +1706,7 @@ define @intrinsic_vssrl_vx_nxv2i32_nxv2i32( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1753,7 +1753,7 @@ define @intrinsic_vssrl_vx_nxv4i32_nxv4i32( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1800,7 +1800,7 @@ define @intrinsic_vssrl_vx_nxv8i32_nxv8i32( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1847,7 +1847,7 @@ define @intrinsic_vssrl_vx_nxv16i32_nxv16i32( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vx_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1894,7 +1894,7 @@ define @intrinsic_vssrl_vx_nxv1i64_nxv1i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vx_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1941,7 +1941,7 @@ define @intrinsic_vssrl_vx_nxv2i64_nxv2i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vx_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1988,7 +1988,7 @@ define @intrinsic_vssrl_vx_nxv4i64_nxv4i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vx_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -2035,7 +2035,7 @@ define @intrinsic_vssrl_vx_nxv8i64_nxv8i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vx_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vssrl.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -2076,7 +2076,7 @@ define @intrinsic_vssrl_vi_nxv1i8_nxv1i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2109,7 +2109,7 @@ define @intrinsic_vssrl_vi_nxv2i8_nxv2i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2142,7 +2142,7 @@ define @intrinsic_vssrl_vi_nxv4i8_nxv4i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2175,7 +2175,7 @@ define @intrinsic_vssrl_vi_nxv8i8_nxv8i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2208,7 +2208,7 @@ define @intrinsic_vssrl_vi_nxv16i8_nxv16i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2241,7 +2241,7 @@ define @intrinsic_vssrl_vi_nxv32i8_nxv32i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2274,7 +2274,7 @@ define @intrinsic_vssrl_vi_nxv64i8_nxv64i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2307,7 +2307,7 @@ define @intrinsic_vssrl_vi_nxv1i16_nxv1i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2340,7 +2340,7 @@ define @intrinsic_vssrl_vi_nxv2i16_nxv2i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2373,7 +2373,7 @@ define @intrinsic_vssrl_vi_nxv4i16_nxv4i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2406,7 +2406,7 @@ define @intrinsic_vssrl_vi_nxv8i16_nxv8i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2439,7 +2439,7 @@ define @intrinsic_vssrl_vi_nxv16i16_nxv16i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2472,7 +2472,7 @@ define @intrinsic_vssrl_vi_nxv32i16_nxv32i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2505,7 +2505,7 @@ define @intrinsic_vssrl_vi_nxv1i32_nxv1i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2538,7 +2538,7 @@ define @intrinsic_vssrl_vi_nxv2i32_nxv2i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2571,7 +2571,7 @@ define @intrinsic_vssrl_vi_nxv4i32_nxv4i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2604,7 +2604,7 @@ define @intrinsic_vssrl_vi_nxv8i32_nxv8i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2637,7 +2637,7 @@ define @intrinsic_vssrl_vi_nxv16i32_nxv16i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2670,7 +2670,7 @@ define @intrinsic_vssrl_vi_nxv1i64_nxv1i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2703,7 +2703,7 @@ define @intrinsic_vssrl_vi_nxv2i64_nxv2i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2736,7 +2736,7 @@ define @intrinsic_vssrl_vi_nxv4i64_nxv4i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2769,7 +2769,7 @@ define @intrinsic_vssrl_vi_nxv8i64_nxv8i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vssrl_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vssrl.vi v8, v8, 9 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vssseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssseg-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssseg-rv32.ll @@ -10,7 +10,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -23,7 +23,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -39,7 +39,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -52,7 +52,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -69,7 +69,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -83,7 +83,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -101,7 +101,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -116,7 +116,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -135,7 +135,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vssseg5e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vssseg5e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -171,7 +171,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vssseg6e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -188,7 +188,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vssseg6e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -209,7 +209,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vssseg7e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -227,7 +227,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vssseg7e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -249,7 +249,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vssseg8e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -268,7 +268,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vssseg8e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -284,7 +284,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -297,7 +297,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -314,7 +314,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -328,7 +328,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -346,7 +346,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -361,7 +361,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -377,7 +377,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -390,7 +390,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -407,7 +407,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -421,7 +421,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -439,7 +439,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -454,7 +454,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -473,7 +473,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg5e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -489,7 +489,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg5e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -509,7 +509,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg6e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -526,7 +526,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg6e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -547,7 +547,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg7e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -565,7 +565,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg7e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -587,7 +587,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg8e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -606,7 +606,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg8e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -622,7 +622,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -635,7 +635,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -652,7 +652,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -666,7 +666,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -684,7 +684,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -699,7 +699,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -718,7 +718,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -734,7 +734,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -754,7 +754,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -771,7 +771,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -792,7 +792,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -810,7 +810,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -832,7 +832,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -851,7 +851,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -867,7 +867,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -880,7 +880,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -897,7 +897,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -911,7 +911,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -929,7 +929,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -944,7 +944,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -963,7 +963,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg5e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -979,7 +979,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg5e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -999,7 +999,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg6e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1016,7 +1016,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg6e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1037,7 +1037,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg7e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1055,7 +1055,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg7e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1077,7 +1077,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg8e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1096,7 +1096,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg8e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1112,7 +1112,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1125,7 +1125,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1142,7 +1142,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1156,7 +1156,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1174,7 +1174,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1189,7 +1189,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1205,7 +1205,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1218,7 +1218,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1235,7 +1235,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1249,7 +1249,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1267,7 +1267,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1282,7 +1282,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1301,7 +1301,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vssseg5e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1317,7 +1317,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vssseg5e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1337,7 +1337,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vssseg6e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1354,7 +1354,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vssseg6e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1375,7 +1375,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vssseg7e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1393,7 +1393,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vssseg7e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1415,7 +1415,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vssseg8e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1434,7 +1434,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vssseg8e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1450,7 +1450,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1463,7 +1463,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1479,7 +1479,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1492,7 +1492,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1509,7 +1509,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1523,7 +1523,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1541,7 +1541,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1556,7 +1556,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1575,7 +1575,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-NEXT: vssseg5e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1591,7 +1591,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-NEXT: vssseg5e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1611,7 +1611,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-NEXT: vssseg6e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1628,7 +1628,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-NEXT: vssseg6e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1649,7 +1649,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-NEXT: vssseg7e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1667,7 +1667,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-NEXT: vssseg7e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1689,7 +1689,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-NEXT: vssseg8e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1708,7 +1708,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-NEXT: vssseg8e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1724,7 +1724,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1737,7 +1737,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1754,7 +1754,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1768,7 +1768,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1786,7 +1786,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1801,7 +1801,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1820,7 +1820,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1836,7 +1836,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1856,7 +1856,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1873,7 +1873,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1894,7 +1894,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1912,7 +1912,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1934,7 +1934,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1953,7 +1953,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1969,7 +1969,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1982,7 +1982,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1998,7 +1998,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2011,7 +2011,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2028,7 +2028,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2042,7 +2042,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2060,7 +2060,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2075,7 +2075,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2094,7 +2094,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vssseg5e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2110,7 +2110,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vssseg5e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2130,7 +2130,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vssseg6e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2147,7 +2147,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vssseg6e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2168,7 +2168,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vssseg7e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2186,7 +2186,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vssseg7e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2208,7 +2208,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vssseg8e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2227,7 +2227,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vssseg8e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2243,7 +2243,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2256,7 +2256,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2273,7 +2273,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2287,7 +2287,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2305,7 +2305,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2320,7 +2320,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2339,7 +2339,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2355,7 +2355,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2375,7 +2375,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2392,7 +2392,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2413,7 +2413,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2431,7 +2431,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2453,7 +2453,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2472,7 +2472,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2488,7 +2488,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2501,7 +2501,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2518,7 +2518,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2532,7 +2532,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2550,7 +2550,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2565,7 +2565,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2581,7 +2581,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2594,7 +2594,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2610,7 +2610,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vssseg2e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2623,7 +2623,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2639,7 +2639,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vssseg2e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2652,7 +2652,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2669,7 +2669,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vssseg3e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2683,7 +2683,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vssseg3e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2701,7 +2701,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vssseg4e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2716,7 +2716,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vssseg4e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2735,7 +2735,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vssseg5e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2751,7 +2751,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vssseg5e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2771,7 +2771,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vssseg6e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2788,7 +2788,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vssseg6e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2809,7 +2809,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vssseg7e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2827,7 +2827,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vssseg7e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2849,7 +2849,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vssseg8e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2868,7 +2868,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vssseg8e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2884,7 +2884,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2897,7 +2897,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2914,7 +2914,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2928,7 +2928,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2946,7 +2946,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2961,7 +2961,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2980,7 +2980,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg5e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2996,7 +2996,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg5e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3016,7 +3016,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg6e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3033,7 +3033,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg6e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3054,7 +3054,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg7e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3072,7 +3072,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg7e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3094,7 +3094,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg8e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3113,7 +3113,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg8e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3129,7 +3129,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3142,7 +3142,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3159,7 +3159,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3173,7 +3173,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3191,7 +3191,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3206,7 +3206,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3225,7 +3225,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3241,7 +3241,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3261,7 +3261,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3278,7 +3278,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3299,7 +3299,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3317,7 +3317,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3339,7 +3339,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3358,7 +3358,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3374,7 +3374,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3387,7 +3387,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3404,7 +3404,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3418,7 +3418,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3436,7 +3436,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3451,7 +3451,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3470,7 +3470,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg5e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3486,7 +3486,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg5e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3506,7 +3506,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg6e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3523,7 +3523,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg6e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3544,7 +3544,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg7e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3562,7 +3562,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg7e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3584,7 +3584,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg8e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3603,7 +3603,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg8e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3619,7 +3619,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3632,7 +3632,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3649,7 +3649,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3663,7 +3663,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3681,7 +3681,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3696,7 +3696,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3712,7 +3712,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3725,7 +3725,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3741,7 +3741,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vssseg2e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3754,7 +3754,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3771,7 +3771,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vssseg3e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3785,7 +3785,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vssseg3e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3803,7 +3803,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vssseg4e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3818,7 +3818,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vssseg4e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3834,7 +3834,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3847,7 +3847,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3864,7 +3864,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3878,7 +3878,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3896,7 +3896,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3911,7 +3911,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3930,7 +3930,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3946,7 +3946,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3966,7 +3966,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3983,7 +3983,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -4004,7 +4004,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -4022,7 +4022,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -4044,7 +4044,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -4063,7 +4063,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -4079,7 +4079,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -4092,7 +4092,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -4109,7 +4109,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -4123,7 +4123,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -4141,7 +4141,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -4156,7 +4156,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -4175,7 +4175,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -4191,7 +4191,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -4211,7 +4211,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -4228,7 +4228,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -4249,7 +4249,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -4267,7 +4267,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -4289,7 +4289,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -4308,7 +4308,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -4324,7 +4324,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -4337,7 +4337,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -4354,7 +4354,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -4368,7 +4368,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -4386,7 +4386,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -4401,7 +4401,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vssseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssseg-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssseg-rv64.ll @@ -10,7 +10,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -23,7 +23,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -39,7 +39,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -52,7 +52,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -69,7 +69,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -83,7 +83,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -101,7 +101,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -116,7 +116,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -132,7 +132,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -145,7 +145,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -162,7 +162,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -176,7 +176,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -194,7 +194,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -209,7 +209,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -225,7 +225,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vssseg2e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -238,7 +238,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -255,7 +255,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vssseg3e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -269,7 +269,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vssseg3e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -287,7 +287,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vssseg4e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -302,7 +302,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vssseg4e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -321,7 +321,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vssseg5e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -337,7 +337,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vssseg5e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -357,7 +357,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vssseg6e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -374,7 +374,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vssseg6e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -395,7 +395,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vssseg7e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -413,7 +413,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vssseg7e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -435,7 +435,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vssseg8e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -454,7 +454,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vssseg8e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -470,7 +470,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -483,7 +483,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -500,7 +500,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -514,7 +514,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -532,7 +532,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -547,7 +547,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -566,7 +566,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg5e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -582,7 +582,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg5e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -602,7 +602,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg6e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -619,7 +619,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg6e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -640,7 +640,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg7e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -658,7 +658,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg7e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -680,7 +680,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg8e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -699,7 +699,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg8e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -715,7 +715,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -728,7 +728,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -745,7 +745,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -759,7 +759,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -777,7 +777,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -792,7 +792,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -808,7 +808,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -821,7 +821,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -838,7 +838,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -852,7 +852,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -870,7 +870,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -885,7 +885,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -904,7 +904,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-NEXT: vssseg5e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -920,7 +920,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-NEXT: vssseg5e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -940,7 +940,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-NEXT: vssseg6e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -957,7 +957,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-NEXT: vssseg6e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -978,7 +978,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-NEXT: vssseg7e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -996,7 +996,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-NEXT: vssseg7e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1018,7 +1018,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-NEXT: vssseg8e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1037,7 +1037,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-NEXT: vssseg8e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1053,7 +1053,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1066,7 +1066,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1083,7 +1083,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1097,7 +1097,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1115,7 +1115,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1130,7 +1130,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1149,7 +1149,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1165,7 +1165,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1185,7 +1185,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1202,7 +1202,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1223,7 +1223,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1241,7 +1241,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1263,7 +1263,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1282,7 +1282,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1298,7 +1298,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1311,7 +1311,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1328,7 +1328,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1342,7 +1342,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1360,7 +1360,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1375,7 +1375,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1394,7 +1394,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg5e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1410,7 +1410,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg5e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1430,7 +1430,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg6e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1447,7 +1447,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg6e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1468,7 +1468,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg7e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1486,7 +1486,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg7e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1508,7 +1508,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg8e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1527,7 +1527,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg8e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1543,7 +1543,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1556,7 +1556,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1573,7 +1573,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1587,7 +1587,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1605,7 +1605,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1620,7 +1620,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1639,7 +1639,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vssseg5e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1655,7 +1655,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vssseg5e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1675,7 +1675,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vssseg6e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1692,7 +1692,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vssseg6e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1713,7 +1713,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vssseg7e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1731,7 +1731,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vssseg7e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1753,7 +1753,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vssseg8e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1772,7 +1772,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vssseg8e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1788,7 +1788,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vssseg2e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1801,7 +1801,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1817,7 +1817,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1830,7 +1830,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1847,7 +1847,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1861,7 +1861,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1879,7 +1879,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1894,7 +1894,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1913,7 +1913,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1929,7 +1929,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1949,7 +1949,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -1966,7 +1966,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -1987,7 +1987,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2005,7 +2005,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2027,7 +2027,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2046,7 +2046,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2062,7 +2062,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2075,7 +2075,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2092,7 +2092,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2106,7 +2106,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2124,7 +2124,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2139,7 +2139,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2158,7 +2158,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vssseg5e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2174,7 +2174,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vssseg5e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2194,7 +2194,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vssseg6e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2211,7 +2211,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vssseg6e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2232,7 +2232,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vssseg7e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2250,7 +2250,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vssseg7e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2272,7 +2272,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vssseg8e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2291,7 +2291,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vssseg8e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2307,7 +2307,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2320,7 +2320,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2337,7 +2337,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2351,7 +2351,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vssseg3e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2369,7 +2369,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2384,7 +2384,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vssseg4e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2403,7 +2403,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vssseg5e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2419,7 +2419,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vssseg5e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2439,7 +2439,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vssseg6e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2456,7 +2456,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vssseg6e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2477,7 +2477,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vssseg7e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2495,7 +2495,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vssseg7e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2517,7 +2517,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vssseg8e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2536,7 +2536,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vssseg8e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2552,7 +2552,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2565,7 +2565,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2581,7 +2581,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2594,7 +2594,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma ; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2610,7 +2610,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2623,7 +2623,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2640,7 +2640,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2654,7 +2654,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2672,7 +2672,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2687,7 +2687,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2706,7 +2706,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2722,7 +2722,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2742,7 +2742,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2759,7 +2759,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2780,7 +2780,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2798,7 +2798,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2820,7 +2820,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2839,7 +2839,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2855,7 +2855,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vssseg2e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2868,7 +2868,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2885,7 +2885,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vssseg3e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2899,7 +2899,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vssseg3e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2917,7 +2917,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vssseg4e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2932,7 +2932,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vssseg4e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2948,7 +2948,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2961,7 +2961,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -2977,7 +2977,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vssseg2e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -2990,7 +2990,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3006,7 +3006,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vssseg2e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3019,7 +3019,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3036,7 +3036,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vssseg3e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3050,7 +3050,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vssseg3e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3068,7 +3068,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vssseg4e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3083,7 +3083,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vssseg4e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3102,7 +3102,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vssseg5e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3118,7 +3118,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vssseg5e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3138,7 +3138,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vssseg6e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3155,7 +3155,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vssseg6e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3176,7 +3176,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vssseg7e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3194,7 +3194,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vssseg7e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3216,7 +3216,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vssseg8e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3235,7 +3235,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vssseg8e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3251,7 +3251,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3264,7 +3264,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3281,7 +3281,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3295,7 +3295,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3313,7 +3313,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3328,7 +3328,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3347,7 +3347,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg5e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3363,7 +3363,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg5e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3383,7 +3383,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg6e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3400,7 +3400,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg6e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3421,7 +3421,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg7e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3439,7 +3439,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg7e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3461,7 +3461,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg8e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3480,7 +3480,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vssseg8e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3496,7 +3496,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3509,7 +3509,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3526,7 +3526,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3540,7 +3540,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3558,7 +3558,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3573,7 +3573,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3592,7 +3592,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3608,7 +3608,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3628,7 +3628,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3645,7 +3645,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3666,7 +3666,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3684,7 +3684,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3706,7 +3706,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3725,7 +3725,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3741,7 +3741,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3754,7 +3754,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3771,7 +3771,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3785,7 +3785,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3803,7 +3803,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3818,7 +3818,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3837,7 +3837,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg5e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3853,7 +3853,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg5e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3873,7 +3873,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg6e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3890,7 +3890,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg6e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3911,7 +3911,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg7e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3929,7 +3929,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg7e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3951,7 +3951,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg8e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3970,7 +3970,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vssseg8e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -3986,7 +3986,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -3999,7 +3999,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -4016,7 +4016,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -4030,7 +4030,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -4048,7 +4048,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -4063,7 +4063,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -4079,7 +4079,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -4092,7 +4092,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -4108,7 +4108,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vssseg2e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -4121,7 +4121,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -4138,7 +4138,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vssseg3e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -4152,7 +4152,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vssseg3e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -4170,7 +4170,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vssseg4e64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -4185,7 +4185,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vssseg4e64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -4201,7 +4201,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -4214,7 +4214,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -4231,7 +4231,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -4245,7 +4245,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -4263,7 +4263,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -4278,7 +4278,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -4297,7 +4297,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -4313,7 +4313,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -4333,7 +4333,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -4350,7 +4350,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -4371,7 +4371,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -4389,7 +4389,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -4411,7 +4411,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -4430,7 +4430,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -4446,7 +4446,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -4459,7 +4459,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -4476,7 +4476,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -4490,7 +4490,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg3e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -4508,7 +4508,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -4523,7 +4523,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg4e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -4542,7 +4542,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -4558,7 +4558,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg5e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -4578,7 +4578,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -4595,7 +4595,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg6e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -4616,7 +4616,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -4634,7 +4634,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg7e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -4656,7 +4656,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -4675,7 +4675,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vssseg8e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -4691,7 +4691,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -4704,7 +4704,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -4721,7 +4721,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -4735,7 +4735,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vssseg3e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -4753,7 +4753,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -4768,7 +4768,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vssseg4e32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vssub_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vssub_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vssub_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vssub_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vssub_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -292,7 +292,7 @@ define @intrinsic_vssub_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vssub_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vssub_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vssub_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vssub_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vssub_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vssub_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vssub_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vssub_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -717,7 +717,7 @@ define @intrinsic_vssub_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -764,7 +764,7 @@ define @intrinsic_vssub_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -811,7 +811,7 @@ define @intrinsic_vssub_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vssub_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vssub_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vssub_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vssub_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1048,7 +1048,7 @@ define @intrinsic_vssub_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1095,7 +1095,7 @@ define @intrinsic_vssub_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1142,7 +1142,7 @@ define @intrinsic_vssub_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1189,7 +1189,7 @@ define @intrinsic_vssub_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1236,7 +1236,7 @@ define @intrinsic_vssub_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1283,7 +1283,7 @@ define @intrinsic_vssub_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1330,7 +1330,7 @@ define @intrinsic_vssub_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1377,7 +1377,7 @@ define @intrinsic_vssub_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1424,7 +1424,7 @@ define @intrinsic_vssub_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1471,7 +1471,7 @@ define @intrinsic_vssub_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1518,7 +1518,7 @@ define @intrinsic_vssub_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1565,7 +1565,7 @@ define @intrinsic_vssub_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1612,7 +1612,7 @@ define @intrinsic_vssub_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1659,7 +1659,7 @@ define @intrinsic_vssub_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1706,7 +1706,7 @@ define @intrinsic_vssub_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1753,7 +1753,7 @@ define @intrinsic_vssub_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1800,7 +1800,7 @@ define @intrinsic_vssub_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1847,7 +1847,7 @@ define @intrinsic_vssub_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1898,7 +1898,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 @@ -1957,7 +1957,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vssub.vv v8, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 @@ -2016,7 +2016,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vssub.vv v8, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 @@ -2075,7 +2075,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vssub.vv v8, v8, v16 ; CHECK-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vssub_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vssub_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vssub_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vssub_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vssub_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -292,7 +292,7 @@ define @intrinsic_vssub_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vssub_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vssub_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vssub_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vssub_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vssub_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vssub_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vssub_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vssub_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -717,7 +717,7 @@ define @intrinsic_vssub_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -764,7 +764,7 @@ define @intrinsic_vssub_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -811,7 +811,7 @@ define @intrinsic_vssub_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vssub_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vssub_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vssub_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vssub_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1048,7 +1048,7 @@ define @intrinsic_vssub_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1095,7 +1095,7 @@ define @intrinsic_vssub_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1142,7 +1142,7 @@ define @intrinsic_vssub_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1189,7 +1189,7 @@ define @intrinsic_vssub_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1236,7 +1236,7 @@ define @intrinsic_vssub_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1283,7 +1283,7 @@ define @intrinsic_vssub_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1330,7 +1330,7 @@ define @intrinsic_vssub_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1377,7 +1377,7 @@ define @intrinsic_vssub_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1424,7 +1424,7 @@ define @intrinsic_vssub_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1471,7 +1471,7 @@ define @intrinsic_vssub_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1518,7 +1518,7 @@ define @intrinsic_vssub_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1565,7 +1565,7 @@ define @intrinsic_vssub_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1612,7 +1612,7 @@ define @intrinsic_vssub_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1659,7 +1659,7 @@ define @intrinsic_vssub_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1706,7 +1706,7 @@ define @intrinsic_vssub_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1753,7 +1753,7 @@ define @intrinsic_vssub_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1800,7 +1800,7 @@ define @intrinsic_vssub_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1847,7 +1847,7 @@ define @intrinsic_vssub_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1894,7 +1894,7 @@ define @intrinsic_vssub_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1941,7 +1941,7 @@ define @intrinsic_vssub_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1988,7 +1988,7 @@ define @intrinsic_vssub_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -2035,7 +2035,7 @@ define @intrinsic_vssub_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssub_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssub-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssub-sdnode.ll @@ -9,7 +9,7 @@ define @ssub_nxv1i8_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv1i8_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.ssub.sat.nxv1i8( %va, %b) @@ -19,7 +19,7 @@ define @ssub_nxv1i8_vx( %va, i8 %b) { ; CHECK-LABEL: ssub_nxv1i8_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -32,7 +32,7 @@ ; CHECK-LABEL: ssub_nxv1i8_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 1, i32 0 @@ -46,7 +46,7 @@ define @ssub_nxv2i8_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv2i8_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.ssub.sat.nxv2i8( %va, %b) @@ -56,7 +56,7 @@ define @ssub_nxv2i8_vx( %va, i8 %b) { ; CHECK-LABEL: ssub_nxv2i8_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -69,7 +69,7 @@ ; CHECK-LABEL: ssub_nxv2i8_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 1, i32 0 @@ -83,7 +83,7 @@ define @ssub_nxv4i8_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv4i8_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.ssub.sat.nxv4i8( %va, %b) @@ -93,7 +93,7 @@ define @ssub_nxv4i8_vx( %va, i8 %b) { ; CHECK-LABEL: ssub_nxv4i8_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -106,7 +106,7 @@ ; CHECK-LABEL: ssub_nxv4i8_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 1, i32 0 @@ -120,7 +120,7 @@ define @ssub_nxv8i8_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv8i8_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.ssub.sat.nxv8i8( %va, %b) @@ -130,7 +130,7 @@ define @ssub_nxv8i8_vx( %va, i8 %b) { ; CHECK-LABEL: ssub_nxv8i8_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -143,7 +143,7 @@ ; CHECK-LABEL: ssub_nxv8i8_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 1, i32 0 @@ -157,7 +157,7 @@ define @ssub_nxv16i8_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv16i8_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v10 ; CHECK-NEXT: ret %v = call @llvm.ssub.sat.nxv16i8( %va, %b) @@ -167,7 +167,7 @@ define @ssub_nxv16i8_vx( %va, i8 %b) { ; CHECK-LABEL: ssub_nxv16i8_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -180,7 +180,7 @@ ; CHECK-LABEL: ssub_nxv16i8_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 1, i32 0 @@ -194,7 +194,7 @@ define @ssub_nxv32i8_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv32i8_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v12 ; CHECK-NEXT: ret %v = call @llvm.ssub.sat.nxv32i8( %va, %b) @@ -204,7 +204,7 @@ define @ssub_nxv32i8_vx( %va, i8 %b) { ; CHECK-LABEL: ssub_nxv32i8_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -217,7 +217,7 @@ ; CHECK-LABEL: ssub_nxv32i8_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 1, i32 0 @@ -231,7 +231,7 @@ define @ssub_nxv64i8_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv64i8_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v16 ; CHECK-NEXT: ret %v = call @llvm.ssub.sat.nxv64i8( %va, %b) @@ -241,7 +241,7 @@ define @ssub_nxv64i8_vx( %va, i8 %b) { ; CHECK-LABEL: ssub_nxv64i8_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -254,7 +254,7 @@ ; CHECK-LABEL: ssub_nxv64i8_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 1, i32 0 @@ -268,7 +268,7 @@ define @ssub_nxv1i16_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv1i16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.ssub.sat.nxv1i16( %va, %b) @@ -278,7 +278,7 @@ define @ssub_nxv1i16_vx( %va, i16 %b) { ; CHECK-LABEL: ssub_nxv1i16_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -291,7 +291,7 @@ ; CHECK-LABEL: ssub_nxv1i16_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 1, i32 0 @@ -305,7 +305,7 @@ define @ssub_nxv2i16_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv2i16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.ssub.sat.nxv2i16( %va, %b) @@ -315,7 +315,7 @@ define @ssub_nxv2i16_vx( %va, i16 %b) { ; CHECK-LABEL: ssub_nxv2i16_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -328,7 +328,7 @@ ; CHECK-LABEL: ssub_nxv2i16_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 1, i32 0 @@ -342,7 +342,7 @@ define @ssub_nxv4i16_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv4i16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.ssub.sat.nxv4i16( %va, %b) @@ -352,7 +352,7 @@ define @ssub_nxv4i16_vx( %va, i16 %b) { ; CHECK-LABEL: ssub_nxv4i16_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -365,7 +365,7 @@ ; CHECK-LABEL: ssub_nxv4i16_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 1, i32 0 @@ -379,7 +379,7 @@ define @ssub_nxv8i16_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv8i16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v10 ; CHECK-NEXT: ret %v = call @llvm.ssub.sat.nxv8i16( %va, %b) @@ -389,7 +389,7 @@ define @ssub_nxv8i16_vx( %va, i16 %b) { ; CHECK-LABEL: ssub_nxv8i16_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -402,7 +402,7 @@ ; CHECK-LABEL: ssub_nxv8i16_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 1, i32 0 @@ -416,7 +416,7 @@ define @ssub_nxv16i16_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv16i16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v12 ; CHECK-NEXT: ret %v = call @llvm.ssub.sat.nxv16i16( %va, %b) @@ -426,7 +426,7 @@ define @ssub_nxv16i16_vx( %va, i16 %b) { ; CHECK-LABEL: ssub_nxv16i16_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -439,7 +439,7 @@ ; CHECK-LABEL: ssub_nxv16i16_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 1, i32 0 @@ -453,7 +453,7 @@ define @ssub_nxv32i16_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv32i16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v16 ; CHECK-NEXT: ret %v = call @llvm.ssub.sat.nxv32i16( %va, %b) @@ -463,7 +463,7 @@ define @ssub_nxv32i16_vx( %va, i16 %b) { ; CHECK-LABEL: ssub_nxv32i16_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -476,7 +476,7 @@ ; CHECK-LABEL: ssub_nxv32i16_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 1, i32 0 @@ -490,7 +490,7 @@ define @ssub_nxv1i32_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv1i32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.ssub.sat.nxv1i32( %va, %b) @@ -500,7 +500,7 @@ define @ssub_nxv1i32_vx( %va, i32 %b) { ; CHECK-LABEL: ssub_nxv1i32_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -513,7 +513,7 @@ ; CHECK-LABEL: ssub_nxv1i32_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 1, i32 0 @@ -527,7 +527,7 @@ define @ssub_nxv2i32_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv2i32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.ssub.sat.nxv2i32( %va, %b) @@ -537,7 +537,7 @@ define @ssub_nxv2i32_vx( %va, i32 %b) { ; CHECK-LABEL: ssub_nxv2i32_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -550,7 +550,7 @@ ; CHECK-LABEL: ssub_nxv2i32_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 1, i32 0 @@ -564,7 +564,7 @@ define @ssub_nxv4i32_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv4i32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v10 ; CHECK-NEXT: ret %v = call @llvm.ssub.sat.nxv4i32( %va, %b) @@ -574,7 +574,7 @@ define @ssub_nxv4i32_vx( %va, i32 %b) { ; CHECK-LABEL: ssub_nxv4i32_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -587,7 +587,7 @@ ; CHECK-LABEL: ssub_nxv4i32_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 1, i32 0 @@ -601,7 +601,7 @@ define @ssub_nxv8i32_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv8i32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v12 ; CHECK-NEXT: ret %v = call @llvm.ssub.sat.nxv8i32( %va, %b) @@ -611,7 +611,7 @@ define @ssub_nxv8i32_vx( %va, i32 %b) { ; CHECK-LABEL: ssub_nxv8i32_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -624,7 +624,7 @@ ; CHECK-LABEL: ssub_nxv8i32_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 1, i32 0 @@ -638,7 +638,7 @@ define @ssub_nxv16i32_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv16i32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v16 ; CHECK-NEXT: ret %v = call @llvm.ssub.sat.nxv16i32( %va, %b) @@ -648,7 +648,7 @@ define @ssub_nxv16i32_vx( %va, i32 %b) { ; CHECK-LABEL: ssub_nxv16i32_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -661,7 +661,7 @@ ; CHECK-LABEL: ssub_nxv16i32_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 1, i32 0 @@ -675,7 +675,7 @@ define @ssub_nxv1i64_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv1i64_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.ssub.sat.nxv1i64( %va, %b) @@ -690,7 +690,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vssub.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -698,7 +698,7 @@ ; ; RV64-LABEL: ssub_nxv1i64_vx: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV64-NEXT: vssub.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -711,7 +711,7 @@ ; CHECK-LABEL: ssub_nxv1i64_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 1, i32 0 @@ -725,7 +725,7 @@ define @ssub_nxv2i64_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv2i64_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v10 ; CHECK-NEXT: ret %v = call @llvm.ssub.sat.nxv2i64( %va, %b) @@ -740,7 +740,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vssub.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -748,7 +748,7 @@ ; ; RV64-LABEL: ssub_nxv2i64_vx: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV64-NEXT: vssub.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -761,7 +761,7 @@ ; CHECK-LABEL: ssub_nxv2i64_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 1, i32 0 @@ -775,7 +775,7 @@ define @ssub_nxv4i64_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv4i64_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v12 ; CHECK-NEXT: ret %v = call @llvm.ssub.sat.nxv4i64( %va, %b) @@ -790,7 +790,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vssub.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -798,7 +798,7 @@ ; ; RV64-LABEL: ssub_nxv4i64_vx: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV64-NEXT: vssub.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -811,7 +811,7 @@ ; CHECK-LABEL: ssub_nxv4i64_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 1, i32 0 @@ -825,7 +825,7 @@ define @ssub_nxv8i64_vv( %va, %b) { ; CHECK-LABEL: ssub_nxv8i64_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vssub.vv v8, v8, v16 ; CHECK-NEXT: ret %v = call @llvm.ssub.sat.nxv8i64( %va, %b) @@ -840,7 +840,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vssub.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -848,7 +848,7 @@ ; ; RV64-LABEL: ssub_nxv8i64_vx: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vssub.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -861,7 +861,7 @@ ; CHECK-LABEL: ssub_nxv8i64_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; CHECK-NEXT: vssub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 1, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vssubu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vssubu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vssubu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vssubu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vssubu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -292,7 +292,7 @@ define @intrinsic_vssubu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vssubu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vssubu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vssubu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vssubu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vssubu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vssubu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vssubu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vssubu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -717,7 +717,7 @@ define @intrinsic_vssubu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -764,7 +764,7 @@ define @intrinsic_vssubu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -811,7 +811,7 @@ define @intrinsic_vssubu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vssubu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vssubu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vssubu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vssubu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1048,7 +1048,7 @@ define @intrinsic_vssubu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1095,7 +1095,7 @@ define @intrinsic_vssubu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1142,7 +1142,7 @@ define @intrinsic_vssubu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1189,7 +1189,7 @@ define @intrinsic_vssubu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1236,7 +1236,7 @@ define @intrinsic_vssubu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1283,7 +1283,7 @@ define @intrinsic_vssubu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1330,7 +1330,7 @@ define @intrinsic_vssubu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1377,7 +1377,7 @@ define @intrinsic_vssubu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1424,7 +1424,7 @@ define @intrinsic_vssubu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1471,7 +1471,7 @@ define @intrinsic_vssubu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1518,7 +1518,7 @@ define @intrinsic_vssubu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1565,7 +1565,7 @@ define @intrinsic_vssubu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1612,7 +1612,7 @@ define @intrinsic_vssubu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1659,7 +1659,7 @@ define @intrinsic_vssubu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1706,7 +1706,7 @@ define @intrinsic_vssubu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1753,7 +1753,7 @@ define @intrinsic_vssubu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1800,7 +1800,7 @@ define @intrinsic_vssubu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1847,7 +1847,7 @@ define @intrinsic_vssubu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1898,7 +1898,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 @@ -1957,7 +1957,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vssubu.vv v8, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 @@ -2016,7 +2016,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vssubu.vv v8, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 @@ -2075,7 +2075,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vssubu.vv v8, v8, v16 ; CHECK-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vssubu_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vssubu_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vssubu_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vssubu_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vssubu_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -292,7 +292,7 @@ define @intrinsic_vssubu_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vssubu_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vssubu_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vssubu_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vssubu_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vssubu_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vssubu_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vssubu_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vssubu_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -717,7 +717,7 @@ define @intrinsic_vssubu_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -764,7 +764,7 @@ define @intrinsic_vssubu_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -811,7 +811,7 @@ define @intrinsic_vssubu_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vssubu_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vssubu_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vssubu_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vssubu_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1048,7 +1048,7 @@ define @intrinsic_vssubu_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1095,7 +1095,7 @@ define @intrinsic_vssubu_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1142,7 +1142,7 @@ define @intrinsic_vssubu_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1189,7 +1189,7 @@ define @intrinsic_vssubu_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1236,7 +1236,7 @@ define @intrinsic_vssubu_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1283,7 +1283,7 @@ define @intrinsic_vssubu_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1330,7 +1330,7 @@ define @intrinsic_vssubu_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1377,7 +1377,7 @@ define @intrinsic_vssubu_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1424,7 +1424,7 @@ define @intrinsic_vssubu_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1471,7 +1471,7 @@ define @intrinsic_vssubu_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1518,7 +1518,7 @@ define @intrinsic_vssubu_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1565,7 +1565,7 @@ define @intrinsic_vssubu_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1612,7 +1612,7 @@ define @intrinsic_vssubu_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1659,7 +1659,7 @@ define @intrinsic_vssubu_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1706,7 +1706,7 @@ define @intrinsic_vssubu_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1753,7 +1753,7 @@ define @intrinsic_vssubu_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1800,7 +1800,7 @@ define @intrinsic_vssubu_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1847,7 +1847,7 @@ define @intrinsic_vssubu_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1894,7 +1894,7 @@ define @intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1941,7 +1941,7 @@ define @intrinsic_vssubu_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1988,7 +1988,7 @@ define @intrinsic_vssubu_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -2035,7 +2035,7 @@ define @intrinsic_vssubu_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vssubu_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssubu-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-sdnode.ll @@ -9,7 +9,7 @@ define @usub_nxv1i8_vv( %va, %b) { ; CHECK-LABEL: usub_nxv1i8_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.usub.sat.nxv1i8( %va, %b) @@ -19,7 +19,7 @@ define @usub_nxv1i8_vx( %va, i8 %b) { ; CHECK-LABEL: usub_nxv1i8_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -32,7 +32,7 @@ ; CHECK-LABEL: usub_nxv1i8_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 2, i32 0 @@ -46,7 +46,7 @@ define @usub_nxv2i8_vv( %va, %b) { ; CHECK-LABEL: usub_nxv2i8_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.usub.sat.nxv2i8( %va, %b) @@ -56,7 +56,7 @@ define @usub_nxv2i8_vx( %va, i8 %b) { ; CHECK-LABEL: usub_nxv2i8_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -69,7 +69,7 @@ ; CHECK-LABEL: usub_nxv2i8_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 2, i32 0 @@ -83,7 +83,7 @@ define @usub_nxv4i8_vv( %va, %b) { ; CHECK-LABEL: usub_nxv4i8_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.usub.sat.nxv4i8( %va, %b) @@ -93,7 +93,7 @@ define @usub_nxv4i8_vx( %va, i8 %b) { ; CHECK-LABEL: usub_nxv4i8_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -106,7 +106,7 @@ ; CHECK-LABEL: usub_nxv4i8_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 2, i32 0 @@ -120,7 +120,7 @@ define @usub_nxv8i8_vv( %va, %b) { ; CHECK-LABEL: usub_nxv8i8_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.usub.sat.nxv8i8( %va, %b) @@ -130,7 +130,7 @@ define @usub_nxv8i8_vx( %va, i8 %b) { ; CHECK-LABEL: usub_nxv8i8_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -143,7 +143,7 @@ ; CHECK-LABEL: usub_nxv8i8_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 2, i32 0 @@ -157,7 +157,7 @@ define @usub_nxv16i8_vv( %va, %b) { ; CHECK-LABEL: usub_nxv16i8_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v10 ; CHECK-NEXT: ret %v = call @llvm.usub.sat.nxv16i8( %va, %b) @@ -167,7 +167,7 @@ define @usub_nxv16i8_vx( %va, i8 %b) { ; CHECK-LABEL: usub_nxv16i8_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -180,7 +180,7 @@ ; CHECK-LABEL: usub_nxv16i8_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 2, i32 0 @@ -194,7 +194,7 @@ define @usub_nxv32i8_vv( %va, %b) { ; CHECK-LABEL: usub_nxv32i8_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v12 ; CHECK-NEXT: ret %v = call @llvm.usub.sat.nxv32i8( %va, %b) @@ -204,7 +204,7 @@ define @usub_nxv32i8_vx( %va, i8 %b) { ; CHECK-LABEL: usub_nxv32i8_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -217,7 +217,7 @@ ; CHECK-LABEL: usub_nxv32i8_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 2, i32 0 @@ -231,7 +231,7 @@ define @usub_nxv64i8_vv( %va, %b) { ; CHECK-LABEL: usub_nxv64i8_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v16 ; CHECK-NEXT: ret %v = call @llvm.usub.sat.nxv64i8( %va, %b) @@ -241,7 +241,7 @@ define @usub_nxv64i8_vx( %va, i8 %b) { ; CHECK-LABEL: usub_nxv64i8_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -254,7 +254,7 @@ ; CHECK-LABEL: usub_nxv64i8_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 2, i32 0 @@ -268,7 +268,7 @@ define @usub_nxv1i16_vv( %va, %b) { ; CHECK-LABEL: usub_nxv1i16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.usub.sat.nxv1i16( %va, %b) @@ -278,7 +278,7 @@ define @usub_nxv1i16_vx( %va, i16 %b) { ; CHECK-LABEL: usub_nxv1i16_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -291,7 +291,7 @@ ; CHECK-LABEL: usub_nxv1i16_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 2, i32 0 @@ -305,7 +305,7 @@ define @usub_nxv2i16_vv( %va, %b) { ; CHECK-LABEL: usub_nxv2i16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.usub.sat.nxv2i16( %va, %b) @@ -315,7 +315,7 @@ define @usub_nxv2i16_vx( %va, i16 %b) { ; CHECK-LABEL: usub_nxv2i16_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -328,7 +328,7 @@ ; CHECK-LABEL: usub_nxv2i16_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 2, i32 0 @@ -342,7 +342,7 @@ define @usub_nxv4i16_vv( %va, %b) { ; CHECK-LABEL: usub_nxv4i16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.usub.sat.nxv4i16( %va, %b) @@ -352,7 +352,7 @@ define @usub_nxv4i16_vx( %va, i16 %b) { ; CHECK-LABEL: usub_nxv4i16_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -365,7 +365,7 @@ ; CHECK-LABEL: usub_nxv4i16_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 2, i32 0 @@ -379,7 +379,7 @@ define @usub_nxv8i16_vv( %va, %b) { ; CHECK-LABEL: usub_nxv8i16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v10 ; CHECK-NEXT: ret %v = call @llvm.usub.sat.nxv8i16( %va, %b) @@ -389,7 +389,7 @@ define @usub_nxv8i16_vx( %va, i16 %b) { ; CHECK-LABEL: usub_nxv8i16_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -402,7 +402,7 @@ ; CHECK-LABEL: usub_nxv8i16_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 2, i32 0 @@ -416,7 +416,7 @@ define @usub_nxv16i16_vv( %va, %b) { ; CHECK-LABEL: usub_nxv16i16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v12 ; CHECK-NEXT: ret %v = call @llvm.usub.sat.nxv16i16( %va, %b) @@ -426,7 +426,7 @@ define @usub_nxv16i16_vx( %va, i16 %b) { ; CHECK-LABEL: usub_nxv16i16_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -439,7 +439,7 @@ ; CHECK-LABEL: usub_nxv16i16_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 2, i32 0 @@ -453,7 +453,7 @@ define @usub_nxv32i16_vv( %va, %b) { ; CHECK-LABEL: usub_nxv32i16_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v16 ; CHECK-NEXT: ret %v = call @llvm.usub.sat.nxv32i16( %va, %b) @@ -463,7 +463,7 @@ define @usub_nxv32i16_vx( %va, i16 %b) { ; CHECK-LABEL: usub_nxv32i16_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -476,7 +476,7 @@ ; CHECK-LABEL: usub_nxv32i16_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 2, i32 0 @@ -490,7 +490,7 @@ define @usub_nxv1i32_vv( %va, %b) { ; CHECK-LABEL: usub_nxv1i32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.usub.sat.nxv1i32( %va, %b) @@ -500,7 +500,7 @@ define @usub_nxv1i32_vx( %va, i32 %b) { ; CHECK-LABEL: usub_nxv1i32_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -513,7 +513,7 @@ ; CHECK-LABEL: usub_nxv1i32_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 2, i32 0 @@ -527,7 +527,7 @@ define @usub_nxv2i32_vv( %va, %b) { ; CHECK-LABEL: usub_nxv2i32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.usub.sat.nxv2i32( %va, %b) @@ -537,7 +537,7 @@ define @usub_nxv2i32_vx( %va, i32 %b) { ; CHECK-LABEL: usub_nxv2i32_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -550,7 +550,7 @@ ; CHECK-LABEL: usub_nxv2i32_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 2, i32 0 @@ -564,7 +564,7 @@ define @usub_nxv4i32_vv( %va, %b) { ; CHECK-LABEL: usub_nxv4i32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v10 ; CHECK-NEXT: ret %v = call @llvm.usub.sat.nxv4i32( %va, %b) @@ -574,7 +574,7 @@ define @usub_nxv4i32_vx( %va, i32 %b) { ; CHECK-LABEL: usub_nxv4i32_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -587,7 +587,7 @@ ; CHECK-LABEL: usub_nxv4i32_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 2, i32 0 @@ -601,7 +601,7 @@ define @usub_nxv8i32_vv( %va, %b) { ; CHECK-LABEL: usub_nxv8i32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v12 ; CHECK-NEXT: ret %v = call @llvm.usub.sat.nxv8i32( %va, %b) @@ -611,7 +611,7 @@ define @usub_nxv8i32_vx( %va, i32 %b) { ; CHECK-LABEL: usub_nxv8i32_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -624,7 +624,7 @@ ; CHECK-LABEL: usub_nxv8i32_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 2, i32 0 @@ -638,7 +638,7 @@ define @usub_nxv16i32_vv( %va, %b) { ; CHECK-LABEL: usub_nxv16i32_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v16 ; CHECK-NEXT: ret %v = call @llvm.usub.sat.nxv16i32( %va, %b) @@ -648,7 +648,7 @@ define @usub_nxv16i32_vx( %va, i32 %b) { ; CHECK-LABEL: usub_nxv16i32_vx: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -661,7 +661,7 @@ ; CHECK-LABEL: usub_nxv16i32_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 2, i32 0 @@ -675,7 +675,7 @@ define @usub_nxv1i64_vv( %va, %b) { ; CHECK-LABEL: usub_nxv1i64_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v9 ; CHECK-NEXT: ret %v = call @llvm.usub.sat.nxv1i64( %va, %b) @@ -690,7 +690,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vssubu.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -698,7 +698,7 @@ ; ; RV64-LABEL: usub_nxv1i64_vx: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV64-NEXT: vssubu.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -711,7 +711,7 @@ ; CHECK-LABEL: usub_nxv1i64_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 2, i32 0 @@ -725,7 +725,7 @@ define @usub_nxv2i64_vv( %va, %b) { ; CHECK-LABEL: usub_nxv2i64_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v10 ; CHECK-NEXT: ret %v = call @llvm.usub.sat.nxv2i64( %va, %b) @@ -740,7 +740,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vssubu.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -748,7 +748,7 @@ ; ; RV64-LABEL: usub_nxv2i64_vx: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV64-NEXT: vssubu.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -761,7 +761,7 @@ ; CHECK-LABEL: usub_nxv2i64_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 2, i32 0 @@ -775,7 +775,7 @@ define @usub_nxv4i64_vv( %va, %b) { ; CHECK-LABEL: usub_nxv4i64_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v12 ; CHECK-NEXT: ret %v = call @llvm.usub.sat.nxv4i64( %va, %b) @@ -790,7 +790,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vssubu.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -798,7 +798,7 @@ ; ; RV64-LABEL: usub_nxv4i64_vx: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV64-NEXT: vssubu.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -811,7 +811,7 @@ ; CHECK-LABEL: usub_nxv4i64_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 2, i32 0 @@ -825,7 +825,7 @@ define @usub_nxv8i64_vv( %va, %b) { ; CHECK-LABEL: usub_nxv8i64_vv: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v16 ; CHECK-NEXT: ret %v = call @llvm.usub.sat.nxv8i64( %va, %b) @@ -840,7 +840,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vssubu.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -848,7 +848,7 @@ ; ; RV64-LABEL: usub_nxv8i64_vx: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vssubu.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -861,7 +861,7 @@ ; CHECK-LABEL: usub_nxv8i64_vi: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 2 -; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; CHECK-NEXT: vssubu.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 2, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsub-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsub-sdnode.ll @@ -5,7 +5,7 @@ define @vsub_vv_nxv1i8( %va, %vb) { ; CHECK-LABEL: vsub_vv_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = sub %va, %vb @@ -15,7 +15,7 @@ define @vsub_vx_nxv1i8( %va, i8 signext %b) { ; CHECK-LABEL: vsub_vx_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -28,7 +28,7 @@ ; CHECK-LABEL: vsub_vx_nxv1i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 1, i32 0 @@ -41,7 +41,7 @@ define @vsub_ii_nxv1i8_1() { ; CHECK-LABEL: vsub_ii_nxv1i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v8, -1 ; CHECK-NEXT: ret %heada = insertelement poison, i8 2, i32 0 @@ -55,7 +55,7 @@ define @vsub_vv_nxv2i8( %va, %vb) { ; CHECK-LABEL: vsub_vv_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = sub %va, %vb @@ -65,7 +65,7 @@ define @vsub_vx_nxv2i8( %va, i8 signext %b) { ; CHECK-LABEL: vsub_vx_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -78,7 +78,7 @@ ; CHECK-LABEL: vsub_vx_nxv2i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 1, i32 0 @@ -90,7 +90,7 @@ define @vsub_vv_nxv4i8( %va, %vb) { ; CHECK-LABEL: vsub_vv_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = sub %va, %vb @@ -100,7 +100,7 @@ define @vsub_vx_nxv4i8( %va, i8 signext %b) { ; CHECK-LABEL: vsub_vx_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -113,7 +113,7 @@ ; CHECK-LABEL: vsub_vx_nxv4i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 1, i32 0 @@ -125,7 +125,7 @@ define @vsub_vv_nxv8i8( %va, %vb) { ; CHECK-LABEL: vsub_vv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = sub %va, %vb @@ -135,7 +135,7 @@ define @vsub_vx_nxv8i8( %va, i8 signext %b) { ; CHECK-LABEL: vsub_vx_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -148,7 +148,7 @@ ; CHECK-LABEL: vsub_vx_nxv8i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 1, i32 0 @@ -160,7 +160,7 @@ define @vsub_vv_nxv16i8( %va, %vb) { ; CHECK-LABEL: vsub_vv_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = sub %va, %vb @@ -170,7 +170,7 @@ define @vsub_vx_nxv16i8( %va, i8 signext %b) { ; CHECK-LABEL: vsub_vx_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -183,7 +183,7 @@ ; CHECK-LABEL: vsub_vx_nxv16i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 1, i32 0 @@ -195,7 +195,7 @@ define @vsub_vv_nxv32i8( %va, %vb) { ; CHECK-LABEL: vsub_vv_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = sub %va, %vb @@ -205,7 +205,7 @@ define @vsub_vx_nxv32i8( %va, i8 signext %b) { ; CHECK-LABEL: vsub_vx_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -218,7 +218,7 @@ ; CHECK-LABEL: vsub_vx_nxv32i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 1, i32 0 @@ -230,7 +230,7 @@ define @vsub_vv_nxv64i8( %va, %vb) { ; CHECK-LABEL: vsub_vv_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = sub %va, %vb @@ -240,7 +240,7 @@ define @vsub_vx_nxv64i8( %va, i8 signext %b) { ; CHECK-LABEL: vsub_vx_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -253,7 +253,7 @@ ; CHECK-LABEL: vsub_vx_nxv64i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 1, i32 0 @@ -265,7 +265,7 @@ define @vsub_vv_nxv1i16( %va, %vb) { ; CHECK-LABEL: vsub_vv_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = sub %va, %vb @@ -275,7 +275,7 @@ define @vsub_vx_nxv1i16( %va, i16 signext %b) { ; CHECK-LABEL: vsub_vx_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -288,7 +288,7 @@ ; CHECK-LABEL: vsub_vx_nxv1i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 1, i32 0 @@ -300,7 +300,7 @@ define @vsub_vv_nxv2i16( %va, %vb) { ; CHECK-LABEL: vsub_vv_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = sub %va, %vb @@ -310,7 +310,7 @@ define @vsub_vx_nxv2i16( %va, i16 signext %b) { ; CHECK-LABEL: vsub_vx_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -323,7 +323,7 @@ ; CHECK-LABEL: vsub_vx_nxv2i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 1, i32 0 @@ -335,7 +335,7 @@ define @vsub_vv_nxv4i16( %va, %vb) { ; CHECK-LABEL: vsub_vv_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = sub %va, %vb @@ -345,7 +345,7 @@ define @vsub_vx_nxv4i16( %va, i16 signext %b) { ; CHECK-LABEL: vsub_vx_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -358,7 +358,7 @@ ; CHECK-LABEL: vsub_vx_nxv4i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 1, i32 0 @@ -370,7 +370,7 @@ define @vsub_vv_nxv8i16( %va, %vb) { ; CHECK-LABEL: vsub_vv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = sub %va, %vb @@ -380,7 +380,7 @@ define @vsub_vx_nxv8i16( %va, i16 signext %b) { ; CHECK-LABEL: vsub_vx_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -393,7 +393,7 @@ ; CHECK-LABEL: vsub_vx_nxv8i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 1, i32 0 @@ -405,7 +405,7 @@ define @vsub_vv_nxv16i16( %va, %vb) { ; CHECK-LABEL: vsub_vv_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = sub %va, %vb @@ -415,7 +415,7 @@ define @vsub_vx_nxv16i16( %va, i16 signext %b) { ; CHECK-LABEL: vsub_vx_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -428,7 +428,7 @@ ; CHECK-LABEL: vsub_vx_nxv16i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 1, i32 0 @@ -440,7 +440,7 @@ define @vsub_vv_nxv32i16( %va, %vb) { ; CHECK-LABEL: vsub_vv_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = sub %va, %vb @@ -450,7 +450,7 @@ define @vsub_vx_nxv32i16( %va, i16 signext %b) { ; CHECK-LABEL: vsub_vx_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -463,7 +463,7 @@ ; CHECK-LABEL: vsub_vx_nxv32i16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 1, i32 0 @@ -475,7 +475,7 @@ define @vsub_vv_nxv1i32( %va, %vb) { ; CHECK-LABEL: vsub_vv_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = sub %va, %vb @@ -485,7 +485,7 @@ define @vsub_vx_nxv1i32( %va, i32 signext %b) { ; CHECK-LABEL: vsub_vx_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -498,7 +498,7 @@ ; CHECK-LABEL: vsub_vx_nxv1i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 1, i32 0 @@ -510,7 +510,7 @@ define @vsub_vv_nxv2i32( %va, %vb) { ; CHECK-LABEL: vsub_vv_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = sub %va, %vb @@ -520,7 +520,7 @@ define @vsub_vx_nxv2i32( %va, i32 signext %b) { ; CHECK-LABEL: vsub_vx_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -533,7 +533,7 @@ ; CHECK-LABEL: vsub_vx_nxv2i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 1, i32 0 @@ -545,7 +545,7 @@ define @vsub_vv_nxv4i32( %va, %vb) { ; CHECK-LABEL: vsub_vv_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = sub %va, %vb @@ -555,7 +555,7 @@ define @vsub_vx_nxv4i32( %va, i32 signext %b) { ; CHECK-LABEL: vsub_vx_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -568,7 +568,7 @@ ; CHECK-LABEL: vsub_vx_nxv4i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 1, i32 0 @@ -580,7 +580,7 @@ define @vsub_vv_nxv8i32( %va, %vb) { ; CHECK-LABEL: vsub_vv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = sub %va, %vb @@ -590,7 +590,7 @@ define @vsub_vx_nxv8i32( %va, i32 signext %b) { ; CHECK-LABEL: vsub_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -603,7 +603,7 @@ ; CHECK-LABEL: vsub_vx_nxv8i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 1, i32 0 @@ -615,7 +615,7 @@ define @vsub_vv_nxv16i32( %va, %vb) { ; CHECK-LABEL: vsub_vv_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = sub %va, %vb @@ -625,7 +625,7 @@ define @vsub_vx_nxv16i32( %va, i32 signext %b) { ; CHECK-LABEL: vsub_vx_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -638,7 +638,7 @@ ; CHECK-LABEL: vsub_vx_nxv16i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 1, i32 0 @@ -650,7 +650,7 @@ define @vsub_vv_nxv1i64( %va, %vb) { ; CHECK-LABEL: vsub_vv_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = sub %va, %vb @@ -665,7 +665,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsub.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -673,7 +673,7 @@ ; ; RV64-LABEL: vsub_vx_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV64-NEXT: vsub.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -686,7 +686,7 @@ ; CHECK-LABEL: vsub_vx_nxv1i64_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 1, i32 0 @@ -698,7 +698,7 @@ define @vsub_vv_nxv2i64( %va, %vb) { ; CHECK-LABEL: vsub_vv_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = sub %va, %vb @@ -713,7 +713,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsub.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -721,7 +721,7 @@ ; ; RV64-LABEL: vsub_vx_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV64-NEXT: vsub.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -734,7 +734,7 @@ ; CHECK-LABEL: vsub_vx_nxv2i64_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 1, i32 0 @@ -746,7 +746,7 @@ define @vsub_vv_nxv4i64( %va, %vb) { ; CHECK-LABEL: vsub_vv_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = sub %va, %vb @@ -761,7 +761,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsub.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -769,7 +769,7 @@ ; ; RV64-LABEL: vsub_vx_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV64-NEXT: vsub.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -782,7 +782,7 @@ ; CHECK-LABEL: vsub_vx_nxv4i64_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 1, i32 0 @@ -794,7 +794,7 @@ define @vsub_vv_nxv8i64( %va, %vb) { ; CHECK-LABEL: vsub_vv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = sub %va, %vb @@ -809,7 +809,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsub.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -817,7 +817,7 @@ ; ; RV64-LABEL: vsub_vx_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vsub.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -830,7 +830,7 @@ ; CHECK-LABEL: vsub_vx_nxv8i64_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 1, i32 0 @@ -846,7 +846,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v8, (a0), zero ; RV32-NEXT: sw a3, 12(sp) ; RV32-NEXT: sw a2, 8(sp) @@ -858,7 +858,7 @@ ; RV64-LABEL: vsub_xx_nxv8i64: ; RV64: # %bb.0: ; RV64-NEXT: sub a0, a0, a1 -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vmv.v.x v8, a0 ; RV64-NEXT: ret %head1 = insertelement poison, i64 %a, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll @@ -33,7 +33,7 @@ define @vsub_vv_nxv1i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv1i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -57,7 +57,7 @@ define @vsub_vx_nxv1i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_nxv1i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -83,7 +83,7 @@ define @vsub_vv_nxv2i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -107,7 +107,7 @@ define @vsub_vx_nxv2i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_nxv2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -133,7 +133,7 @@ define @vsub_vv_nxv4i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -157,7 +157,7 @@ define @vsub_vx_nxv4i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_nxv4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -183,7 +183,7 @@ define @vsub_vv_nxv5i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv5i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -207,7 +207,7 @@ define @vsub_vx_nxv5i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_nxv5i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -233,7 +233,7 @@ define @vsub_vv_nxv8i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -257,7 +257,7 @@ define @vsub_vx_nxv8i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_nxv8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -283,7 +283,7 @@ define @vsub_vv_nxv16i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -307,7 +307,7 @@ define @vsub_vx_nxv16i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_nxv16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -333,7 +333,7 @@ define @vsub_vv_nxv32i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv32i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -357,7 +357,7 @@ define @vsub_vx_nxv32i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_nxv32i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -383,7 +383,7 @@ define @vsub_vv_nxv64i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv64i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -407,7 +407,7 @@ define @vsub_vx_nxv64i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_nxv64i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -433,7 +433,7 @@ define @vsub_vv_nxv1i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv1i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -457,7 +457,7 @@ define @vsub_vx_nxv1i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_nxv1i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -483,7 +483,7 @@ define @vsub_vv_nxv2i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -507,7 +507,7 @@ define @vsub_vx_nxv2i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_nxv2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -533,7 +533,7 @@ define @vsub_vv_nxv4i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -557,7 +557,7 @@ define @vsub_vx_nxv4i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_nxv4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -583,7 +583,7 @@ define @vsub_vv_nxv8i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -607,7 +607,7 @@ define @vsub_vx_nxv8i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_nxv8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -633,7 +633,7 @@ define @vsub_vv_nxv16i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -657,7 +657,7 @@ define @vsub_vx_nxv16i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_nxv16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -683,7 +683,7 @@ define @vsub_vv_nxv32i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv32i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -707,7 +707,7 @@ define @vsub_vx_nxv32i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_nxv32i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -733,7 +733,7 @@ define @vsub_vv_nxv1i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv1i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -757,7 +757,7 @@ define @vsub_vx_nxv1i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_nxv1i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -783,7 +783,7 @@ define @vsub_vv_nxv2i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -807,7 +807,7 @@ define @vsub_vx_nxv2i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -833,7 +833,7 @@ define @vsub_vv_nxv4i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -857,7 +857,7 @@ define @vsub_vx_nxv4i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_nxv4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -883,7 +883,7 @@ define @vsub_vv_nxv8i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -907,7 +907,7 @@ define @vsub_vx_nxv8i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_nxv8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -933,7 +933,7 @@ define @vsub_vv_nxv16i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -957,7 +957,7 @@ define @vsub_vx_nxv16i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vx_nxv16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -983,7 +983,7 @@ define @vsub_vv_nxv1i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv1i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1000,7 +1000,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vsub.vv v8, v8, v9, v0.t @@ -1026,16 +1026,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vsub.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vsub_vx_nxv1i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vsub.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1061,7 +1061,7 @@ define @vsub_vv_nxv2i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1078,7 +1078,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vsub.vv v8, v8, v10, v0.t @@ -1104,16 +1104,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vsub.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vsub_vx_nxv2i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vsub.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1139,7 +1139,7 @@ define @vsub_vv_nxv4i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv4i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1156,7 +1156,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vsub.vv v8, v8, v12, v0.t @@ -1182,16 +1182,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vsub.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vsub_vx_nxv4i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vsub.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -1217,7 +1217,7 @@ define @vsub_vv_nxv8i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv8i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1234,7 +1234,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vsub.vv v8, v8, v16, v0.t @@ -1260,16 +1260,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vsub.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vsub_vx_nxv8i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vsub.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub.ll b/llvm/test/CodeGen/RISCV/rvv/vsub.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsub.ll @@ -12,7 +12,7 @@ define @intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -58,7 +58,7 @@ define @intrinsic_vsub_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vsub_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -150,7 +150,7 @@ define @intrinsic_vsub_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -196,7 +196,7 @@ define @intrinsic_vsub_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -242,7 +242,7 @@ define @intrinsic_vsub_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -288,7 +288,7 @@ define @intrinsic_vsub_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -335,7 +335,7 @@ define @intrinsic_vsub_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -381,7 +381,7 @@ define @intrinsic_vsub_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -427,7 +427,7 @@ define @intrinsic_vsub_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -473,7 +473,7 @@ define @intrinsic_vsub_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -519,7 +519,7 @@ define @intrinsic_vsub_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -565,7 +565,7 @@ define @intrinsic_vsub_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -612,7 +612,7 @@ define @intrinsic_vsub_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -658,7 +658,7 @@ define @intrinsic_vsub_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -704,7 +704,7 @@ define @intrinsic_vsub_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -750,7 +750,7 @@ define @intrinsic_vsub_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -796,7 +796,7 @@ define @intrinsic_vsub_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -843,7 +843,7 @@ define @intrinsic_vsub_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -889,7 +889,7 @@ define @intrinsic_vsub_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -935,7 +935,7 @@ define @intrinsic_vsub_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -981,7 +981,7 @@ define @intrinsic_vsub_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vsub.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1028,7 +1028,7 @@ define @intrinsic_vsub_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1074,7 +1074,7 @@ define @intrinsic_vsub_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1120,7 +1120,7 @@ define @intrinsic_vsub_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1166,7 +1166,7 @@ define @intrinsic_vsub_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1212,7 +1212,7 @@ define @intrinsic_vsub_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1258,7 +1258,7 @@ define @intrinsic_vsub_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1304,7 +1304,7 @@ define @intrinsic_vsub_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1350,7 +1350,7 @@ define @intrinsic_vsub_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1396,7 +1396,7 @@ define @intrinsic_vsub_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1442,7 +1442,7 @@ define @intrinsic_vsub_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1488,7 +1488,7 @@ define @intrinsic_vsub_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1534,7 +1534,7 @@ define @intrinsic_vsub_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1580,7 +1580,7 @@ define @intrinsic_vsub_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1626,7 +1626,7 @@ define @intrinsic_vsub_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1672,7 +1672,7 @@ define @intrinsic_vsub_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1718,7 +1718,7 @@ define @intrinsic_vsub_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1764,7 +1764,7 @@ define @intrinsic_vsub_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1810,7 +1810,7 @@ define @intrinsic_vsub_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vsub_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1860,7 +1860,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsub.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -1868,7 +1868,7 @@ ; ; RV64-LABEL: intrinsic_vsub_vx_nxv1i64_nxv1i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vsub.vx v8, v8, a0 ; RV64-NEXT: ret entry: @@ -1930,7 +1930,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsub.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -1938,7 +1938,7 @@ ; ; RV64-LABEL: intrinsic_vsub_vx_nxv2i64_nxv2i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vsub.vx v8, v8, a0 ; RV64-NEXT: ret entry: @@ -2000,7 +2000,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsub.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -2008,7 +2008,7 @@ ; ; RV64-LABEL: intrinsic_vsub_vx_nxv4i64_nxv4i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vsub.vx v8, v8, a0 ; RV64-NEXT: ret entry: @@ -2070,7 +2070,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsub.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -2078,7 +2078,7 @@ ; ; RV64-LABEL: intrinsic_vsub_vx_nxv8i64_nxv8i64_i64: ; RV64: # %bb.0: # %entry -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vsub.vx v8, v8, a0 ; RV64-NEXT: ret entry: @@ -2130,7 +2130,7 @@ define @intrinsic_vsub_vi_nxv1i8_nxv1i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsub_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -9 ; CHECK-NEXT: ret entry: @@ -2163,7 +2163,7 @@ define @intrinsic_vsub_vi_nxv2i8_nxv2i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsub_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -9 ; CHECK-NEXT: ret entry: @@ -2196,7 +2196,7 @@ define @intrinsic_vsub_vi_nxv4i8_nxv4i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsub_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -9 ; CHECK-NEXT: ret entry: @@ -2229,7 +2229,7 @@ define @intrinsic_vsub_vi_nxv8i8_nxv8i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsub_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -9 ; CHECK-NEXT: ret entry: @@ -2262,7 +2262,7 @@ define @intrinsic_vsub_vi_nxv16i8_nxv16i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsub_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -9 ; CHECK-NEXT: ret entry: @@ -2295,7 +2295,7 @@ define @intrinsic_vsub_vi_nxv32i8_nxv32i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsub_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -9 ; CHECK-NEXT: ret entry: @@ -2328,7 +2328,7 @@ define @intrinsic_vsub_vi_nxv64i8_nxv64i8_i8( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsub_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2361,7 +2361,7 @@ define @intrinsic_vsub_vi_nxv1i16_nxv1i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsub_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -9 ; CHECK-NEXT: ret entry: @@ -2394,7 +2394,7 @@ define @intrinsic_vsub_vi_nxv2i16_nxv2i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsub_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -9 ; CHECK-NEXT: ret entry: @@ -2427,7 +2427,7 @@ define @intrinsic_vsub_vi_nxv4i16_nxv4i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsub_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -9 ; CHECK-NEXT: ret entry: @@ -2460,7 +2460,7 @@ define @intrinsic_vsub_vi_nxv8i16_nxv8i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsub_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -9 ; CHECK-NEXT: ret entry: @@ -2493,7 +2493,7 @@ define @intrinsic_vsub_vi_nxv16i16_nxv16i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsub_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -9 ; CHECK-NEXT: ret entry: @@ -2526,7 +2526,7 @@ define @intrinsic_vsub_vi_nxv32i16_nxv32i16_i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsub_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -9 ; CHECK-NEXT: ret entry: @@ -2559,7 +2559,7 @@ define @intrinsic_vsub_vi_nxv1i32_nxv1i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsub_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -9 ; CHECK-NEXT: ret entry: @@ -2592,7 +2592,7 @@ define @intrinsic_vsub_vi_nxv2i32_nxv2i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsub_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -9 ; CHECK-NEXT: ret entry: @@ -2625,7 +2625,7 @@ define @intrinsic_vsub_vi_nxv4i32_nxv4i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsub_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -9 ; CHECK-NEXT: ret entry: @@ -2658,7 +2658,7 @@ define @intrinsic_vsub_vi_nxv8i32_nxv8i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsub_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -9 ; CHECK-NEXT: ret entry: @@ -2691,7 +2691,7 @@ define @intrinsic_vsub_vi_nxv16i32_nxv16i32_i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsub_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -9 ; CHECK-NEXT: ret entry: @@ -2724,7 +2724,7 @@ define @intrinsic_vsub_vi_nxv1i64_nxv1i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsub_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -9 ; CHECK-NEXT: ret entry: @@ -2757,7 +2757,7 @@ define @intrinsic_vsub_vi_nxv2i64_nxv2i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsub_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -9 ; CHECK-NEXT: ret entry: @@ -2790,7 +2790,7 @@ define @intrinsic_vsub_vi_nxv4i64_nxv4i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsub_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -9 ; CHECK-NEXT: ret entry: @@ -2823,7 +2823,7 @@ define @intrinsic_vsub_vi_nxv8i64_nxv8i64_i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vsub_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vadd.vi v8, v8, -9 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll @@ -13,7 +13,7 @@ define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -36,7 +36,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -59,7 +59,7 @@ define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -82,7 +82,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -105,7 +105,7 @@ define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -128,7 +128,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -174,7 +174,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -197,7 +197,7 @@ define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -220,7 +220,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -243,7 +243,7 @@ define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -266,7 +266,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -289,7 +289,7 @@ define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -312,7 +312,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -335,7 +335,7 @@ define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -358,7 +358,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -381,7 +381,7 @@ define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -404,7 +404,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -427,7 +427,7 @@ define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -450,7 +450,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -473,7 +473,7 @@ define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -496,7 +496,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -519,7 +519,7 @@ define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -542,7 +542,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -565,7 +565,7 @@ define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -588,7 +588,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -611,7 +611,7 @@ define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -634,7 +634,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -657,7 +657,7 @@ define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -680,7 +680,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -703,7 +703,7 @@ define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -726,7 +726,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -749,7 +749,7 @@ define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -772,7 +772,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -795,7 +795,7 @@ define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -818,7 +818,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -841,7 +841,7 @@ define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -864,7 +864,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -887,7 +887,7 @@ define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -910,7 +910,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -933,7 +933,7 @@ define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -956,7 +956,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -979,7 +979,7 @@ define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -1002,7 +1002,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1025,7 +1025,7 @@ define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -1048,7 +1048,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -1071,7 +1071,7 @@ define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -1094,7 +1094,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -1117,7 +1117,7 @@ define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1140,7 +1140,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1163,7 +1163,7 @@ define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -1186,7 +1186,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1209,7 +1209,7 @@ define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -1232,7 +1232,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -1255,7 +1255,7 @@ define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i64( %0, * %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -1278,7 +1278,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vsuxei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsuxei.ll b/llvm/test/CodeGen/RISCV/rvv/vsuxei.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsuxei.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsuxei.ll @@ -12,7 +12,7 @@ define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -35,7 +35,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -58,7 +58,7 @@ define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -81,7 +81,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -127,7 +127,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -150,7 +150,7 @@ define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -173,7 +173,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -196,7 +196,7 @@ define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -219,7 +219,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -242,7 +242,7 @@ define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -265,7 +265,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -288,7 +288,7 @@ define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -311,7 +311,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -334,7 +334,7 @@ define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -357,7 +357,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -380,7 +380,7 @@ define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -403,7 +403,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -426,7 +426,7 @@ define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -449,7 +449,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -472,7 +472,7 @@ define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -495,7 +495,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -518,7 +518,7 @@ define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -541,7 +541,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -564,7 +564,7 @@ define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -587,7 +587,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -610,7 +610,7 @@ define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -633,7 +633,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -656,7 +656,7 @@ define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -679,7 +679,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -702,7 +702,7 @@ define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -725,7 +725,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -748,7 +748,7 @@ define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -771,7 +771,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -794,7 +794,7 @@ define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -817,7 +817,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -840,7 +840,7 @@ define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -863,7 +863,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -886,7 +886,7 @@ define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -909,7 +909,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -932,7 +932,7 @@ define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -955,7 +955,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -978,7 +978,7 @@ define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -1001,7 +1001,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1024,7 +1024,7 @@ define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -1047,7 +1047,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -1070,7 +1070,7 @@ define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -1093,7 +1093,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -1116,7 +1116,7 @@ define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1139,7 +1139,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1162,7 +1162,7 @@ define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1185,7 +1185,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1208,7 +1208,7 @@ define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -1231,7 +1231,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1254,7 +1254,7 @@ define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -1277,7 +1277,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -1300,7 +1300,7 @@ define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -1323,7 +1323,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -1346,7 +1346,7 @@ define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1369,7 +1369,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1392,7 +1392,7 @@ define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -1415,7 +1415,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1438,7 +1438,7 @@ define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -1461,7 +1461,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -1484,7 +1484,7 @@ define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i32( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -1507,7 +1507,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i32( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vsuxei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -1530,7 +1530,7 @@ define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1553,7 +1553,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1576,7 +1576,7 @@ define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1599,7 +1599,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1622,7 +1622,7 @@ define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1645,7 +1645,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1668,7 +1668,7 @@ define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -1691,7 +1691,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1714,7 +1714,7 @@ define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -1737,7 +1737,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -1760,7 +1760,7 @@ define void @intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -1783,7 +1783,7 @@ define void @intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -1806,7 +1806,7 @@ define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1829,7 +1829,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1852,7 +1852,7 @@ define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1875,7 +1875,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1898,7 +1898,7 @@ define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1921,7 +1921,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1944,7 +1944,7 @@ define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -1967,7 +1967,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1990,7 +1990,7 @@ define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -2013,7 +2013,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -2036,7 +2036,7 @@ define void @intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -2059,7 +2059,7 @@ define void @intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -2082,7 +2082,7 @@ define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2105,7 +2105,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2128,7 +2128,7 @@ define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2151,7 +2151,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2174,7 +2174,7 @@ define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -2197,7 +2197,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -2220,7 +2220,7 @@ define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -2243,7 +2243,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -2266,7 +2266,7 @@ define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -2289,7 +2289,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -2312,7 +2312,7 @@ define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2335,7 +2335,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2358,7 +2358,7 @@ define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -2381,7 +2381,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -2404,7 +2404,7 @@ define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -2427,7 +2427,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -2450,7 +2450,7 @@ define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -2473,7 +2473,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -2496,7 +2496,7 @@ define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2519,7 +2519,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2542,7 +2542,7 @@ define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2565,7 +2565,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2588,7 +2588,7 @@ define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2611,7 +2611,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2634,7 +2634,7 @@ define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -2657,7 +2657,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -2680,7 +2680,7 @@ define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -2703,7 +2703,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -2726,7 +2726,7 @@ define void @intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -2749,7 +2749,7 @@ define void @intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -2772,7 +2772,7 @@ define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2795,7 +2795,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2818,7 +2818,7 @@ define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2841,7 +2841,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2864,7 +2864,7 @@ define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -2887,7 +2887,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -2910,7 +2910,7 @@ define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -2933,7 +2933,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -2956,7 +2956,7 @@ define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -2979,7 +2979,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -3002,7 +3002,7 @@ define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3025,7 +3025,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3048,7 +3048,7 @@ define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -3071,7 +3071,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3094,7 +3094,7 @@ define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -3117,7 +3117,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3140,7 +3140,7 @@ define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i16( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -3163,7 +3163,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i16( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vsuxei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -3186,7 +3186,7 @@ define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3209,7 +3209,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3232,7 +3232,7 @@ define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3255,7 +3255,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3278,7 +3278,7 @@ define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3301,7 +3301,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3324,7 +3324,7 @@ define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3347,7 +3347,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3370,7 +3370,7 @@ define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -3393,7 +3393,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3416,7 +3416,7 @@ define void @intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -3439,7 +3439,7 @@ define void @intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3462,7 +3462,7 @@ define void @intrinsic_vsuxei_v_nxv64i8_nxv64i8_nxv64i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -3485,7 +3485,7 @@ define void @intrinsic_vsuxei_mask_v_nxv64i8_nxv64i8_nxv64i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -3508,7 +3508,7 @@ define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3531,7 +3531,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3554,7 +3554,7 @@ define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3577,7 +3577,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3600,7 +3600,7 @@ define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3623,7 +3623,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3646,7 +3646,7 @@ define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -3669,7 +3669,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3692,7 +3692,7 @@ define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -3715,7 +3715,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3738,7 +3738,7 @@ define void @intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -3761,7 +3761,7 @@ define void @intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -3784,7 +3784,7 @@ define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3807,7 +3807,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3830,7 +3830,7 @@ define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3853,7 +3853,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3876,7 +3876,7 @@ define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -3899,7 +3899,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3922,7 +3922,7 @@ define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -3945,7 +3945,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3968,7 +3968,7 @@ define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -3991,7 +3991,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -4014,7 +4014,7 @@ define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4037,7 +4037,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4060,7 +4060,7 @@ define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -4083,7 +4083,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4106,7 +4106,7 @@ define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -4129,7 +4129,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -4152,7 +4152,7 @@ define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -4175,7 +4175,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -4198,7 +4198,7 @@ define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4221,7 +4221,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4244,7 +4244,7 @@ define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4267,7 +4267,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4290,7 +4290,7 @@ define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4313,7 +4313,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4336,7 +4336,7 @@ define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -4359,7 +4359,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4382,7 +4382,7 @@ define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -4405,7 +4405,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -4428,7 +4428,7 @@ define void @intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -4451,7 +4451,7 @@ define void @intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -4474,7 +4474,7 @@ define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4497,7 +4497,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4520,7 +4520,7 @@ define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4543,7 +4543,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4566,7 +4566,7 @@ define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -4589,7 +4589,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4612,7 +4612,7 @@ define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -4635,7 +4635,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -4658,7 +4658,7 @@ define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -4681,7 +4681,7 @@ define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -4704,7 +4704,7 @@ define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4727,7 +4727,7 @@ define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4750,7 +4750,7 @@ define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -4773,7 +4773,7 @@ define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4796,7 +4796,7 @@ define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -4819,7 +4819,7 @@ define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -4842,7 +4842,7 @@ define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i8( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -4865,7 +4865,7 @@ define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i8( %0, * %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vsuxei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv32.ll @@ -11,7 +11,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -25,7 +25,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -42,7 +42,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -56,7 +56,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -72,7 +72,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -85,7 +85,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -102,7 +102,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -116,7 +116,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -133,7 +133,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -147,7 +147,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -164,7 +164,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -178,7 +178,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -195,7 +195,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -209,7 +209,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -226,7 +226,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -240,7 +240,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -257,7 +257,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -271,7 +271,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -289,7 +289,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -304,7 +304,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -322,7 +322,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -337,7 +337,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -370,7 +370,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -389,7 +389,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -405,7 +405,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -424,7 +424,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -440,7 +440,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -459,7 +459,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -475,7 +475,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -495,7 +495,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -512,7 +512,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -532,7 +532,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -549,7 +549,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -569,7 +569,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -586,7 +586,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -607,7 +607,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -625,7 +625,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -646,7 +646,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -664,7 +664,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -685,7 +685,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -703,7 +703,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -725,7 +725,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -744,7 +744,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -766,7 +766,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -785,7 +785,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -807,7 +807,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -826,7 +826,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -842,7 +842,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -855,7 +855,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -872,7 +872,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -886,7 +886,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -902,7 +902,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -915,7 +915,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -932,7 +932,7 @@ ; CHECK-NEXT: vmv2r.v v16, v8 ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -946,7 +946,7 @@ ; CHECK-NEXT: vmv2r.v v16, v8 ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -963,7 +963,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -977,7 +977,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -994,7 +994,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -1008,7 +1008,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -1026,7 +1026,7 @@ ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -1041,7 +1041,7 @@ ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -1059,7 +1059,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -1074,7 +1074,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1092,7 +1092,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -1107,7 +1107,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -1124,7 +1124,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -1138,7 +1138,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1155,7 +1155,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -1169,7 +1169,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1186,7 +1186,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -1200,7 +1200,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1217,7 +1217,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1231,7 +1231,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1248,7 +1248,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1262,7 +1262,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1279,7 +1279,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1293,7 +1293,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1311,7 +1311,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1326,7 +1326,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1344,7 +1344,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1359,7 +1359,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1377,7 +1377,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1392,7 +1392,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1411,7 +1411,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1427,7 +1427,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1446,7 +1446,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1462,7 +1462,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1481,7 +1481,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1497,7 +1497,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1517,7 +1517,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1534,7 +1534,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1554,7 +1554,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1571,7 +1571,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1591,7 +1591,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1608,7 +1608,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1629,7 +1629,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1647,7 +1647,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1668,7 +1668,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1686,7 +1686,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1707,7 +1707,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1725,7 +1725,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1747,7 +1747,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1766,7 +1766,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1788,7 +1788,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1807,7 +1807,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1829,7 +1829,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1848,7 +1848,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1865,7 +1865,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -1879,7 +1879,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1896,7 +1896,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -1910,7 +1910,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1926,7 +1926,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -1939,7 +1939,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1956,7 +1956,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1970,7 +1970,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1987,7 +1987,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2001,7 +2001,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2018,7 +2018,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -2032,7 +2032,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -2050,7 +2050,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2065,7 +2065,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2083,7 +2083,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2098,7 +2098,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2116,7 +2116,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -2131,7 +2131,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -2150,7 +2150,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2166,7 +2166,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2185,7 +2185,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2201,7 +2201,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2220,7 +2220,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -2236,7 +2236,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -2256,7 +2256,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2273,7 +2273,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2293,7 +2293,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2310,7 +2310,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2330,7 +2330,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -2347,7 +2347,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -2368,7 +2368,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2386,7 +2386,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2407,7 +2407,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2425,7 +2425,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2446,7 +2446,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -2464,7 +2464,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -2486,7 +2486,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2505,7 +2505,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2527,7 +2527,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2546,7 +2546,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2568,7 +2568,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -2587,7 +2587,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -2604,7 +2604,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -2618,7 +2618,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -2635,7 +2635,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -2649,7 +2649,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -2666,7 +2666,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -2680,7 +2680,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -2697,7 +2697,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2711,7 +2711,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2728,7 +2728,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2742,7 +2742,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2759,7 +2759,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2773,7 +2773,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2791,7 +2791,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2806,7 +2806,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2824,7 +2824,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2839,7 +2839,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2857,7 +2857,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2872,7 +2872,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2891,7 +2891,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2907,7 +2907,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2926,7 +2926,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2942,7 +2942,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2961,7 +2961,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2977,7 +2977,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2997,7 +2997,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3014,7 +3014,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3034,7 +3034,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3051,7 +3051,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3071,7 +3071,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3088,7 +3088,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3109,7 +3109,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3127,7 +3127,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3148,7 +3148,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3166,7 +3166,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3187,7 +3187,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3205,7 +3205,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3227,7 +3227,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3246,7 +3246,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3268,7 +3268,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3287,7 +3287,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3309,7 +3309,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3328,7 +3328,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3345,7 +3345,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -3359,7 +3359,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3376,7 +3376,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -3390,7 +3390,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3406,7 +3406,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -3419,7 +3419,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3436,7 +3436,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -3450,7 +3450,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3467,7 +3467,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -3481,7 +3481,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3498,7 +3498,7 @@ ; CHECK-NEXT: vmv2r.v v16, v8 ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -3512,7 +3512,7 @@ ; CHECK-NEXT: vmv2r.v v16, v8 ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3530,7 +3530,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -3545,7 +3545,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3563,7 +3563,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -3578,7 +3578,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3596,7 +3596,7 @@ ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -3611,7 +3611,7 @@ ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3627,7 +3627,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -3640,7 +3640,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3657,7 +3657,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -3671,7 +3671,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3687,7 +3687,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -3700,7 +3700,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3717,7 +3717,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -3731,7 +3731,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3748,7 +3748,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3762,7 +3762,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3779,7 +3779,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -3793,7 +3793,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3811,7 +3811,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -3826,7 +3826,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3844,7 +3844,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3859,7 +3859,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3877,7 +3877,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -3892,7 +3892,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3911,7 +3911,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -3927,7 +3927,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3946,7 +3946,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3962,7 +3962,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3981,7 +3981,7 @@ ; CHECK-NEXT: vmv1r.v v18, v16 ; CHECK-NEXT: vmv1r.v v19, v16 ; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -3997,7 +3997,7 @@ ; CHECK-NEXT: vmv1r.v v18, v16 ; CHECK-NEXT: vmv1r.v v19, v16 ; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -4017,7 +4017,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -4034,7 +4034,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4054,7 +4054,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4071,7 +4071,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4091,7 +4091,7 @@ ; CHECK-NEXT: vmv1r.v v19, v16 ; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -4108,7 +4108,7 @@ ; CHECK-NEXT: vmv1r.v v19, v16 ; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -4129,7 +4129,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -4147,7 +4147,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4168,7 +4168,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4186,7 +4186,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4207,7 +4207,7 @@ ; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -4225,7 +4225,7 @@ ; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -4247,7 +4247,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -4266,7 +4266,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4288,7 +4288,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4307,7 +4307,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4329,7 +4329,7 @@ ; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -4348,7 +4348,7 @@ ; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -4365,7 +4365,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -4379,7 +4379,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -4396,7 +4396,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -4410,7 +4410,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -4427,7 +4427,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -4441,7 +4441,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -4458,7 +4458,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -4472,7 +4472,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4489,7 +4489,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -4503,7 +4503,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4519,7 +4519,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -4532,7 +4532,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4549,7 +4549,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4563,7 +4563,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4580,7 +4580,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4594,7 +4594,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4611,7 +4611,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -4625,7 +4625,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4643,7 +4643,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4658,7 +4658,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4676,7 +4676,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4691,7 +4691,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4709,7 +4709,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -4724,7 +4724,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4743,7 +4743,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4759,7 +4759,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4778,7 +4778,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4794,7 +4794,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4813,7 +4813,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -4829,7 +4829,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4849,7 +4849,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4866,7 +4866,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4886,7 +4886,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4903,7 +4903,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4923,7 +4923,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -4940,7 +4940,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4961,7 +4961,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4979,7 +4979,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5000,7 +5000,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5018,7 +5018,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5039,7 +5039,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -5057,7 +5057,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -5079,7 +5079,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5098,7 +5098,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5120,7 +5120,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5139,7 +5139,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5161,7 +5161,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -5180,7 +5180,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -5197,7 +5197,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -5211,7 +5211,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -5228,7 +5228,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -5242,7 +5242,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -5259,7 +5259,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -5273,7 +5273,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -5290,7 +5290,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5304,7 +5304,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5321,7 +5321,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5335,7 +5335,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5352,7 +5352,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5366,7 +5366,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5384,7 +5384,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5399,7 +5399,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5417,7 +5417,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5432,7 +5432,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5450,7 +5450,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5465,7 +5465,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5484,7 +5484,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5500,7 +5500,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5519,7 +5519,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5535,7 +5535,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5554,7 +5554,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5570,7 +5570,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5590,7 +5590,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5607,7 +5607,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5627,7 +5627,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5644,7 +5644,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5664,7 +5664,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5681,7 +5681,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5702,7 +5702,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5720,7 +5720,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5741,7 +5741,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5759,7 +5759,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5780,7 +5780,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5798,7 +5798,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5820,7 +5820,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5839,7 +5839,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5861,7 +5861,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5880,7 +5880,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5902,7 +5902,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5921,7 +5921,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5937,7 +5937,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -5950,7 +5950,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -5967,7 +5967,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -5981,7 +5981,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -5998,7 +5998,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -6012,7 +6012,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -6029,7 +6029,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -6043,7 +6043,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -6060,7 +6060,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -6074,7 +6074,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -6091,7 +6091,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6105,7 +6105,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6122,7 +6122,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6136,7 +6136,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6153,7 +6153,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6167,7 +6167,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6185,7 +6185,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6200,7 +6200,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6218,7 +6218,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6233,7 +6233,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6251,7 +6251,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6266,7 +6266,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6285,7 +6285,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6301,7 +6301,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6320,7 +6320,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6336,7 +6336,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6355,7 +6355,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6371,7 +6371,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6391,7 +6391,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6408,7 +6408,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6428,7 +6428,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6445,7 +6445,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6465,7 +6465,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6482,7 +6482,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6503,7 +6503,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6521,7 +6521,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6542,7 +6542,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6560,7 +6560,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6581,7 +6581,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6599,7 +6599,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6621,7 +6621,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6640,7 +6640,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6662,7 +6662,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6681,7 +6681,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6703,7 +6703,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6722,7 +6722,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6739,7 +6739,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -6753,7 +6753,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -6770,7 +6770,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -6784,7 +6784,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -6801,7 +6801,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -6815,7 +6815,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -6832,7 +6832,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6846,7 +6846,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6863,7 +6863,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6877,7 +6877,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6894,7 +6894,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6908,7 +6908,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6926,7 +6926,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6941,7 +6941,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6959,7 +6959,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6974,7 +6974,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6992,7 +6992,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -7007,7 +7007,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -7026,7 +7026,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -7042,7 +7042,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -7061,7 +7061,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -7077,7 +7077,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -7096,7 +7096,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -7112,7 +7112,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -7132,7 +7132,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -7149,7 +7149,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -7169,7 +7169,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -7186,7 +7186,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -7206,7 +7206,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -7223,7 +7223,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -7244,7 +7244,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -7262,7 +7262,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -7283,7 +7283,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -7301,7 +7301,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -7322,7 +7322,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -7340,7 +7340,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -7362,7 +7362,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -7381,7 +7381,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -7403,7 +7403,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -7422,7 +7422,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -7444,7 +7444,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -7463,7 +7463,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -7480,7 +7480,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -7494,7 +7494,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -7511,7 +7511,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -7525,7 +7525,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -7542,7 +7542,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -7556,7 +7556,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -7573,7 +7573,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -7587,7 +7587,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -7604,7 +7604,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -7618,7 +7618,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -7635,7 +7635,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -7649,7 +7649,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -7667,7 +7667,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -7682,7 +7682,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -7700,7 +7700,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -7715,7 +7715,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -7733,7 +7733,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -7748,7 +7748,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -7765,7 +7765,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -7779,7 +7779,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -7796,7 +7796,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -7810,7 +7810,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -7826,7 +7826,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -7839,7 +7839,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -7856,7 +7856,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -7870,7 +7870,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -7887,7 +7887,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -7901,7 +7901,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -7918,7 +7918,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -7932,7 +7932,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -7949,7 +7949,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -7963,7 +7963,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -7980,7 +7980,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -7994,7 +7994,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -8011,7 +8011,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -8025,7 +8025,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -8042,7 +8042,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8056,7 +8056,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8073,7 +8073,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8087,7 +8087,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8104,7 +8104,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8118,7 +8118,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8136,7 +8136,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8151,7 +8151,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8169,7 +8169,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8184,7 +8184,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8202,7 +8202,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8217,7 +8217,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8236,7 +8236,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8252,7 +8252,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8271,7 +8271,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8287,7 +8287,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8306,7 +8306,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8322,7 +8322,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8342,7 +8342,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8359,7 +8359,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8379,7 +8379,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8396,7 +8396,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8416,7 +8416,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8433,7 +8433,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8454,7 +8454,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8472,7 +8472,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8493,7 +8493,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8511,7 +8511,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8532,7 +8532,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8550,7 +8550,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8572,7 +8572,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8591,7 +8591,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8613,7 +8613,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8632,7 +8632,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8654,7 +8654,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8673,7 +8673,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8690,7 +8690,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -8704,7 +8704,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -8721,7 +8721,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -8735,7 +8735,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -8752,7 +8752,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -8766,7 +8766,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -8783,7 +8783,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8797,7 +8797,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8814,7 +8814,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8828,7 +8828,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8845,7 +8845,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8859,7 +8859,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8877,7 +8877,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8892,7 +8892,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8910,7 +8910,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8925,7 +8925,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8943,7 +8943,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8958,7 +8958,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8977,7 +8977,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8993,7 +8993,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9012,7 +9012,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9028,7 +9028,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9047,7 +9047,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9063,7 +9063,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9083,7 +9083,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9100,7 +9100,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9120,7 +9120,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9137,7 +9137,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9157,7 +9157,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9174,7 +9174,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9195,7 +9195,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9213,7 +9213,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9234,7 +9234,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9252,7 +9252,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9273,7 +9273,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9291,7 +9291,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9313,7 +9313,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9332,7 +9332,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9354,7 +9354,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9373,7 +9373,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9395,7 +9395,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9414,7 +9414,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9431,7 +9431,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -9445,7 +9445,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -9462,7 +9462,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -9476,7 +9476,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -9493,7 +9493,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -9507,7 +9507,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -9524,7 +9524,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9538,7 +9538,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9555,7 +9555,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9569,7 +9569,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9586,7 +9586,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9600,7 +9600,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9618,7 +9618,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9633,7 +9633,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9651,7 +9651,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9666,7 +9666,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9684,7 +9684,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9699,7 +9699,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9718,7 +9718,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9734,7 +9734,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9753,7 +9753,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9769,7 +9769,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9788,7 +9788,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9804,7 +9804,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9824,7 +9824,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9841,7 +9841,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9861,7 +9861,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9878,7 +9878,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9898,7 +9898,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9915,7 +9915,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9936,7 +9936,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9954,7 +9954,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9975,7 +9975,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9993,7 +9993,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10014,7 +10014,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10032,7 +10032,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10054,7 +10054,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10073,7 +10073,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10095,7 +10095,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10114,7 +10114,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10136,7 +10136,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10155,7 +10155,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10172,7 +10172,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -10186,7 +10186,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -10203,7 +10203,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -10217,7 +10217,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -10234,7 +10234,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -10248,7 +10248,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -10265,7 +10265,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10279,7 +10279,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10296,7 +10296,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10310,7 +10310,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10327,7 +10327,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10341,7 +10341,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10359,7 +10359,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10374,7 +10374,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10392,7 +10392,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10407,7 +10407,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10425,7 +10425,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10440,7 +10440,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10459,7 +10459,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10475,7 +10475,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10494,7 +10494,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10510,7 +10510,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10529,7 +10529,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10545,7 +10545,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10565,7 +10565,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10582,7 +10582,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10602,7 +10602,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10619,7 +10619,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10639,7 +10639,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10656,7 +10656,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10677,7 +10677,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10695,7 +10695,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10716,7 +10716,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10734,7 +10734,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10755,7 +10755,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10773,7 +10773,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10795,7 +10795,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10814,7 +10814,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10836,7 +10836,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10855,7 +10855,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10877,7 +10877,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10896,7 +10896,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10913,7 +10913,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -10927,7 +10927,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -10944,7 +10944,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -10958,7 +10958,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -10974,7 +10974,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -10987,7 +10987,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -11004,7 +11004,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11018,7 +11018,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11035,7 +11035,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11049,7 +11049,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11066,7 +11066,7 @@ ; CHECK-NEXT: vmv2r.v v16, v8 ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -11080,7 +11080,7 @@ ; CHECK-NEXT: vmv2r.v v16, v8 ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -11098,7 +11098,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11113,7 +11113,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11131,7 +11131,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11146,7 +11146,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11164,7 +11164,7 @@ ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -11179,7 +11179,7 @@ ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -11196,7 +11196,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -11210,7 +11210,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -11227,7 +11227,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -11241,7 +11241,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -11258,7 +11258,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -11272,7 +11272,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -11289,7 +11289,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -11303,7 +11303,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -11320,7 +11320,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -11334,7 +11334,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -11351,7 +11351,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -11365,7 +11365,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -11382,7 +11382,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11396,7 +11396,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11413,7 +11413,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11427,7 +11427,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11444,7 +11444,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11458,7 +11458,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11476,7 +11476,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11491,7 +11491,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11509,7 +11509,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11524,7 +11524,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11542,7 +11542,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11557,7 +11557,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11574,7 +11574,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11588,7 +11588,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11605,7 +11605,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11619,7 +11619,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11635,7 +11635,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11648,7 +11648,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11665,7 +11665,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -11679,7 +11679,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -11696,7 +11696,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -11710,7 +11710,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -11727,7 +11727,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11741,7 +11741,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11759,7 +11759,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -11774,7 +11774,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -11792,7 +11792,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -11807,7 +11807,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -11825,7 +11825,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11840,7 +11840,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11859,7 +11859,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -11875,7 +11875,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -11894,7 +11894,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -11910,7 +11910,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -11929,7 +11929,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11945,7 +11945,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11965,7 +11965,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -11982,7 +11982,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12002,7 +12002,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12019,7 +12019,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12039,7 +12039,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -12056,7 +12056,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -12077,7 +12077,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12095,7 +12095,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12116,7 +12116,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12134,7 +12134,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12155,7 +12155,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -12173,7 +12173,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -12195,7 +12195,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12214,7 +12214,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12236,7 +12236,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12255,7 +12255,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12277,7 +12277,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -12296,7 +12296,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -12313,7 +12313,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -12327,7 +12327,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -12344,7 +12344,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -12358,7 +12358,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -12375,7 +12375,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -12389,7 +12389,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -12406,7 +12406,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12420,7 +12420,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12437,7 +12437,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12451,7 +12451,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12468,7 +12468,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12482,7 +12482,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12500,7 +12500,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12515,7 +12515,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12533,7 +12533,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12548,7 +12548,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12566,7 +12566,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12581,7 +12581,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12600,7 +12600,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12616,7 +12616,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12635,7 +12635,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12651,7 +12651,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12670,7 +12670,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12686,7 +12686,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12706,7 +12706,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12723,7 +12723,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12743,7 +12743,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12760,7 +12760,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12780,7 +12780,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12797,7 +12797,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12818,7 +12818,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12836,7 +12836,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12857,7 +12857,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12875,7 +12875,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12896,7 +12896,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12914,7 +12914,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12936,7 +12936,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12955,7 +12955,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12977,7 +12977,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12996,7 +12996,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -13018,7 +13018,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -13037,7 +13037,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -13054,7 +13054,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -13068,7 +13068,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -13085,7 +13085,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -13099,7 +13099,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -13116,7 +13116,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -13130,7 +13130,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -13147,7 +13147,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -13161,7 +13161,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -13178,7 +13178,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -13192,7 +13192,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -13209,7 +13209,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -13223,7 +13223,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -13241,7 +13241,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -13256,7 +13256,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -13274,7 +13274,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -13289,7 +13289,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -13307,7 +13307,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -13322,7 +13322,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv64.ll @@ -11,7 +11,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -25,7 +25,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -42,7 +42,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -56,7 +56,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -72,7 +72,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -85,7 +85,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -102,7 +102,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -116,7 +116,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -133,7 +133,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -147,7 +147,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -163,7 +163,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -176,7 +176,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -193,7 +193,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -207,7 +207,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -224,7 +224,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -238,7 +238,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -255,7 +255,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -269,7 +269,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -286,7 +286,7 @@ ; CHECK-NEXT: vmv2r.v v16, v8 ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -300,7 +300,7 @@ ; CHECK-NEXT: vmv2r.v v16, v8 ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -317,7 +317,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -331,7 +331,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -349,7 +349,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -364,7 +364,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -382,7 +382,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -397,7 +397,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -415,7 +415,7 @@ ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -430,7 +430,7 @@ ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -448,7 +448,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -463,7 +463,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -479,7 +479,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -492,7 +492,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -509,7 +509,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -523,7 +523,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -539,7 +539,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -552,7 +552,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -569,7 +569,7 @@ ; CHECK-NEXT: vmv2r.v v16, v8 ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -583,7 +583,7 @@ ; CHECK-NEXT: vmv2r.v v16, v8 ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -600,7 +600,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -614,7 +614,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -631,7 +631,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -645,7 +645,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -663,7 +663,7 @@ ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -678,7 +678,7 @@ ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -696,7 +696,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -711,7 +711,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -729,7 +729,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -744,7 +744,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -761,7 +761,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -775,7 +775,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -792,7 +792,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -806,7 +806,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -823,7 +823,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -837,7 +837,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -854,7 +854,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -868,7 +868,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -885,7 +885,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -899,7 +899,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -916,7 +916,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -930,7 +930,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -947,7 +947,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -961,7 +961,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -978,7 +978,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -992,7 +992,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1010,7 +1010,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1025,7 +1025,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1043,7 +1043,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1058,7 +1058,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1076,7 +1076,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1091,7 +1091,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1109,7 +1109,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1124,7 +1124,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1143,7 +1143,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1159,7 +1159,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1178,7 +1178,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1194,7 +1194,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1213,7 +1213,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1229,7 +1229,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1248,7 +1248,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1264,7 +1264,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1284,7 +1284,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1301,7 +1301,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1321,7 +1321,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1338,7 +1338,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1358,7 +1358,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1375,7 +1375,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1395,7 +1395,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1412,7 +1412,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1433,7 +1433,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1451,7 +1451,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1472,7 +1472,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1490,7 +1490,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1511,7 +1511,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1529,7 +1529,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1550,7 +1550,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1568,7 +1568,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1590,7 +1590,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1609,7 +1609,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1631,7 +1631,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1650,7 +1650,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1672,7 +1672,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1691,7 +1691,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1713,7 +1713,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1732,7 +1732,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1749,7 +1749,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -1763,7 +1763,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1780,7 +1780,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -1794,7 +1794,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1811,7 +1811,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -1825,7 +1825,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1842,7 +1842,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -1856,7 +1856,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -1873,7 +1873,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1887,7 +1887,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1904,7 +1904,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1918,7 +1918,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1935,7 +1935,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1949,7 +1949,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1966,7 +1966,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -1980,7 +1980,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -1998,7 +1998,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2013,7 +2013,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2031,7 +2031,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2046,7 +2046,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2064,7 +2064,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2079,7 +2079,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2097,7 +2097,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2112,7 +2112,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2131,7 +2131,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2147,7 +2147,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2166,7 +2166,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2182,7 +2182,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2201,7 +2201,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2217,7 +2217,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2236,7 +2236,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2252,7 +2252,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2272,7 +2272,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2289,7 +2289,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2309,7 +2309,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2326,7 +2326,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2346,7 +2346,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2363,7 +2363,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2383,7 +2383,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2400,7 +2400,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2421,7 +2421,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2439,7 +2439,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2460,7 +2460,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2478,7 +2478,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2499,7 +2499,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2517,7 +2517,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2538,7 +2538,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2556,7 +2556,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2578,7 +2578,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2597,7 +2597,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2619,7 +2619,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2638,7 +2638,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2660,7 +2660,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2679,7 +2679,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2701,7 +2701,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -2720,7 +2720,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -2737,7 +2737,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -2751,7 +2751,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -2768,7 +2768,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -2782,7 +2782,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -2798,7 +2798,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -2811,7 +2811,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -2827,7 +2827,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -2840,7 +2840,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -2857,7 +2857,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -2871,7 +2871,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -2888,7 +2888,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -2902,7 +2902,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -2919,7 +2919,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -2933,7 +2933,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -2950,7 +2950,7 @@ ; CHECK-NEXT: vmv2r.v v16, v8 ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -2964,7 +2964,7 @@ ; CHECK-NEXT: vmv2r.v v16, v8 ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -2982,7 +2982,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -2997,7 +2997,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3015,7 +3015,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -3030,7 +3030,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3048,7 +3048,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -3063,7 +3063,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -3081,7 +3081,7 @@ ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -3096,7 +3096,7 @@ ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3112,7 +3112,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -3125,7 +3125,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3142,7 +3142,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -3156,7 +3156,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3172,7 +3172,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -3185,7 +3185,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3202,7 +3202,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -3216,7 +3216,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3233,7 +3233,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -3247,7 +3247,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3264,7 +3264,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3278,7 +3278,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3295,7 +3295,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -3309,7 +3309,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3326,7 +3326,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3340,7 +3340,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3358,7 +3358,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -3373,7 +3373,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3391,7 +3391,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3406,7 +3406,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3424,7 +3424,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -3439,7 +3439,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3457,7 +3457,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3472,7 +3472,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3491,7 +3491,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -3507,7 +3507,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3526,7 +3526,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3542,7 +3542,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3561,7 +3561,7 @@ ; CHECK-NEXT: vmv1r.v v18, v16 ; CHECK-NEXT: vmv1r.v v19, v16 ; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -3577,7 +3577,7 @@ ; CHECK-NEXT: vmv1r.v v18, v16 ; CHECK-NEXT: vmv1r.v v19, v16 ; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3596,7 +3596,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3612,7 +3612,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3632,7 +3632,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -3649,7 +3649,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3669,7 +3669,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3686,7 +3686,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3706,7 +3706,7 @@ ; CHECK-NEXT: vmv1r.v v19, v16 ; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -3723,7 +3723,7 @@ ; CHECK-NEXT: vmv1r.v v19, v16 ; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3743,7 +3743,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3760,7 +3760,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3781,7 +3781,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -3799,7 +3799,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3820,7 +3820,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3838,7 +3838,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3859,7 +3859,7 @@ ; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -3877,7 +3877,7 @@ ; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -3898,7 +3898,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3916,7 +3916,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -3938,7 +3938,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -3957,7 +3957,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -3979,7 +3979,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -3998,7 +3998,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4020,7 +4020,7 @@ ; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -4039,7 +4039,7 @@ ; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -4061,7 +4061,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4080,7 +4080,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4097,7 +4097,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -4111,7 +4111,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4128,7 +4128,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -4142,7 +4142,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4159,7 +4159,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -4173,7 +4173,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4190,7 +4190,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -4204,7 +4204,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -4221,7 +4221,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4235,7 +4235,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4252,7 +4252,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4266,7 +4266,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4283,7 +4283,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4297,7 +4297,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4314,7 +4314,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4328,7 +4328,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4346,7 +4346,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4361,7 +4361,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4379,7 +4379,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4394,7 +4394,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4412,7 +4412,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4427,7 +4427,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4445,7 +4445,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4460,7 +4460,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4479,7 +4479,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4495,7 +4495,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4514,7 +4514,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4530,7 +4530,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4549,7 +4549,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4565,7 +4565,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4584,7 +4584,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4600,7 +4600,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4620,7 +4620,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4637,7 +4637,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4657,7 +4657,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4674,7 +4674,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4694,7 +4694,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4711,7 +4711,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4731,7 +4731,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4748,7 +4748,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4769,7 +4769,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4787,7 +4787,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4808,7 +4808,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4826,7 +4826,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4847,7 +4847,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4865,7 +4865,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4886,7 +4886,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4904,7 +4904,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4926,7 +4926,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4945,7 +4945,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -4967,7 +4967,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -4986,7 +4986,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5008,7 +5008,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5027,7 +5027,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5049,7 +5049,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5068,7 +5068,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5085,7 +5085,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -5099,7 +5099,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -5116,7 +5116,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -5130,7 +5130,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -5147,7 +5147,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -5161,7 +5161,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -5177,7 +5177,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -5190,7 +5190,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -5207,7 +5207,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5221,7 +5221,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5238,7 +5238,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5252,7 +5252,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5269,7 +5269,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5283,7 +5283,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5300,7 +5300,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -5314,7 +5314,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -5332,7 +5332,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5347,7 +5347,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5365,7 +5365,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5380,7 +5380,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5398,7 +5398,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5413,7 +5413,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5431,7 +5431,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -5446,7 +5446,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -5465,7 +5465,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5481,7 +5481,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5500,7 +5500,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5516,7 +5516,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5535,7 +5535,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5551,7 +5551,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5570,7 +5570,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -5586,7 +5586,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -5606,7 +5606,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5623,7 +5623,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5643,7 +5643,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5660,7 +5660,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5680,7 +5680,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5697,7 +5697,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5717,7 +5717,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -5734,7 +5734,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -5755,7 +5755,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5773,7 +5773,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5794,7 +5794,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5812,7 +5812,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5833,7 +5833,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5851,7 +5851,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5872,7 +5872,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -5890,7 +5890,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -5912,7 +5912,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5931,7 +5931,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5953,7 +5953,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -5972,7 +5972,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -5994,7 +5994,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6013,7 +6013,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6035,7 +6035,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -6054,7 +6054,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -6070,7 +6070,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -6083,7 +6083,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -6100,7 +6100,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -6114,7 +6114,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -6130,7 +6130,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -6143,7 +6143,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -6159,7 +6159,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -6172,7 +6172,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -6189,7 +6189,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -6203,7 +6203,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -6220,7 +6220,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6234,7 +6234,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6251,7 +6251,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -6265,7 +6265,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -6282,7 +6282,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -6296,7 +6296,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -6314,7 +6314,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -6329,7 +6329,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -6347,7 +6347,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6362,7 +6362,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6380,7 +6380,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -6395,7 +6395,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -6413,7 +6413,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -6428,7 +6428,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -6447,7 +6447,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -6463,7 +6463,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -6482,7 +6482,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6498,7 +6498,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6517,7 +6517,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -6533,7 +6533,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -6552,7 +6552,7 @@ ; CHECK-NEXT: vmv1r.v v18, v16 ; CHECK-NEXT: vmv1r.v v19, v16 ; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -6568,7 +6568,7 @@ ; CHECK-NEXT: vmv1r.v v18, v16 ; CHECK-NEXT: vmv1r.v v19, v16 ; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -6588,7 +6588,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -6605,7 +6605,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -6625,7 +6625,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6642,7 +6642,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6662,7 +6662,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -6679,7 +6679,7 @@ ; CHECK-NEXT: vmv1r.v v11, v8 ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -6699,7 +6699,7 @@ ; CHECK-NEXT: vmv1r.v v19, v16 ; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -6716,7 +6716,7 @@ ; CHECK-NEXT: vmv1r.v v19, v16 ; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -6737,7 +6737,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -6755,7 +6755,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -6776,7 +6776,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6794,7 +6794,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6815,7 +6815,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -6833,7 +6833,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -6854,7 +6854,7 @@ ; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -6872,7 +6872,7 @@ ; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -6894,7 +6894,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -6913,7 +6913,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -6935,7 +6935,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -6954,7 +6954,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -6976,7 +6976,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -6995,7 +6995,7 @@ ; CHECK-NEXT: vmv1r.v v13, v8 ; CHECK-NEXT: vmv1r.v v14, v8 ; CHECK-NEXT: vmv1r.v v15, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -7017,7 +7017,7 @@ ; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -7036,7 +7036,7 @@ ; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -7053,7 +7053,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -7067,7 +7067,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -7084,7 +7084,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -7098,7 +7098,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -7115,7 +7115,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -7129,7 +7129,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -7146,7 +7146,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -7160,7 +7160,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -7176,7 +7176,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -7189,7 +7189,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -7206,7 +7206,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -7220,7 +7220,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -7236,7 +7236,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -7249,7 +7249,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -7266,7 +7266,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -7280,7 +7280,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -7297,7 +7297,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -7311,7 +7311,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -7328,7 +7328,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -7342,7 +7342,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -7359,7 +7359,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -7373,7 +7373,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -7390,7 +7390,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -7404,7 +7404,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -7422,7 +7422,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -7437,7 +7437,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -7455,7 +7455,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -7470,7 +7470,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -7488,7 +7488,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -7503,7 +7503,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -7521,7 +7521,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -7536,7 +7536,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -7555,7 +7555,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -7571,7 +7571,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -7590,7 +7590,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -7606,7 +7606,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -7625,7 +7625,7 @@ ; CHECK-NEXT: vmv1r.v v18, v16 ; CHECK-NEXT: vmv1r.v v19, v16 ; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -7641,7 +7641,7 @@ ; CHECK-NEXT: vmv1r.v v18, v16 ; CHECK-NEXT: vmv1r.v v19, v16 ; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -7660,7 +7660,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -7676,7 +7676,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -7696,7 +7696,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -7713,7 +7713,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -7733,7 +7733,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -7750,7 +7750,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -7770,7 +7770,7 @@ ; CHECK-NEXT: vmv1r.v v19, v16 ; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -7787,7 +7787,7 @@ ; CHECK-NEXT: vmv1r.v v19, v16 ; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -7807,7 +7807,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -7824,7 +7824,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -7845,7 +7845,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -7863,7 +7863,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -7884,7 +7884,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -7902,7 +7902,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -7923,7 +7923,7 @@ ; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -7941,7 +7941,7 @@ ; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -7962,7 +7962,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -7980,7 +7980,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8002,7 +8002,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -8021,7 +8021,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -8043,7 +8043,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8062,7 +8062,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8084,7 +8084,7 @@ ; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -8103,7 +8103,7 @@ ; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -8125,7 +8125,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8144,7 +8144,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8161,7 +8161,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -8175,7 +8175,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -8192,7 +8192,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -8206,7 +8206,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -8223,7 +8223,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -8237,7 +8237,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -8254,7 +8254,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -8268,7 +8268,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -8285,7 +8285,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8299,7 +8299,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8316,7 +8316,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8330,7 +8330,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8347,7 +8347,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8361,7 +8361,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8378,7 +8378,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8392,7 +8392,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8410,7 +8410,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8425,7 +8425,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8443,7 +8443,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8458,7 +8458,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8476,7 +8476,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8491,7 +8491,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8509,7 +8509,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8524,7 +8524,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8543,7 +8543,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8559,7 +8559,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8578,7 +8578,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8594,7 +8594,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8613,7 +8613,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8629,7 +8629,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8648,7 +8648,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8664,7 +8664,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8684,7 +8684,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8701,7 +8701,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8721,7 +8721,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8738,7 +8738,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8758,7 +8758,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8775,7 +8775,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8795,7 +8795,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8812,7 +8812,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8833,7 +8833,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8851,7 +8851,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8872,7 +8872,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8890,7 +8890,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8911,7 +8911,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8929,7 +8929,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8950,7 +8950,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -8968,7 +8968,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -8990,7 +8990,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9009,7 +9009,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9031,7 +9031,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9050,7 +9050,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9072,7 +9072,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9091,7 +9091,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9113,7 +9113,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9132,7 +9132,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9149,7 +9149,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -9163,7 +9163,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -9180,7 +9180,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -9194,7 +9194,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -9211,7 +9211,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -9225,7 +9225,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -9241,7 +9241,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -9254,7 +9254,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -9271,7 +9271,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9285,7 +9285,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9302,7 +9302,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9316,7 +9316,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9333,7 +9333,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9347,7 +9347,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9364,7 +9364,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -9378,7 +9378,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -9396,7 +9396,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9411,7 +9411,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9429,7 +9429,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9444,7 +9444,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9462,7 +9462,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9477,7 +9477,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9495,7 +9495,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -9510,7 +9510,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -9529,7 +9529,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9545,7 +9545,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9564,7 +9564,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9580,7 +9580,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9599,7 +9599,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9615,7 +9615,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9634,7 +9634,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg5ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -9650,7 +9650,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg5ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -9670,7 +9670,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9687,7 +9687,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9707,7 +9707,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9724,7 +9724,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9744,7 +9744,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9761,7 +9761,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9781,7 +9781,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg6ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -9798,7 +9798,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg6ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -9819,7 +9819,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9837,7 +9837,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9858,7 +9858,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9876,7 +9876,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9897,7 +9897,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9915,7 +9915,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -9936,7 +9936,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg7ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -9954,7 +9954,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg7ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -9976,7 +9976,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -9995,7 +9995,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10017,7 +10017,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10036,7 +10036,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10058,7 +10058,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10077,7 +10077,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10099,7 +10099,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg8ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -10118,7 +10118,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vsuxseg8ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -10135,7 +10135,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -10149,7 +10149,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -10166,7 +10166,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -10180,7 +10180,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -10196,7 +10196,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -10209,7 +10209,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -10226,7 +10226,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -10240,7 +10240,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -10256,7 +10256,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -10269,7 +10269,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -10286,7 +10286,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -10300,7 +10300,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -10317,7 +10317,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -10331,7 +10331,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -10348,7 +10348,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -10362,7 +10362,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -10379,7 +10379,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -10393,7 +10393,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -10409,7 +10409,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -10422,7 +10422,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -10439,7 +10439,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10453,7 +10453,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10470,7 +10470,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10484,7 +10484,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10501,7 +10501,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10515,7 +10515,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10532,7 +10532,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -10546,7 +10546,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -10564,7 +10564,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10579,7 +10579,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10597,7 +10597,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10612,7 +10612,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10630,7 +10630,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10645,7 +10645,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10663,7 +10663,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -10678,7 +10678,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -10697,7 +10697,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10713,7 +10713,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10732,7 +10732,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10748,7 +10748,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10767,7 +10767,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10783,7 +10783,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10802,7 +10802,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -10818,7 +10818,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -10838,7 +10838,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10855,7 +10855,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10875,7 +10875,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10892,7 +10892,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10912,7 +10912,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -10929,7 +10929,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -10949,7 +10949,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -10966,7 +10966,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -10987,7 +10987,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -11005,7 +11005,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -11026,7 +11026,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -11044,7 +11044,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -11065,7 +11065,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -11083,7 +11083,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -11104,7 +11104,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11122,7 +11122,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11144,7 +11144,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -11163,7 +11163,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -11185,7 +11185,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -11204,7 +11204,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -11226,7 +11226,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -11245,7 +11245,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -11267,7 +11267,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11286,7 +11286,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11303,7 +11303,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -11317,7 +11317,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -11334,7 +11334,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -11348,7 +11348,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -11365,7 +11365,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -11379,7 +11379,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -11396,7 +11396,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -11410,7 +11410,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -11427,7 +11427,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11441,7 +11441,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11458,7 +11458,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11472,7 +11472,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11489,7 +11489,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11503,7 +11503,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11520,7 +11520,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11534,7 +11534,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11552,7 +11552,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11567,7 +11567,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11585,7 +11585,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11600,7 +11600,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11618,7 +11618,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11633,7 +11633,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11651,7 +11651,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11666,7 +11666,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11683,7 +11683,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -11697,7 +11697,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -11714,7 +11714,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -11728,7 +11728,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -11744,7 +11744,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -11757,7 +11757,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -11774,7 +11774,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -11788,7 +11788,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -11805,7 +11805,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -11819,7 +11819,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -11836,7 +11836,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -11850,7 +11850,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -11867,7 +11867,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -11881,7 +11881,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -11898,7 +11898,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11912,7 +11912,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11929,7 +11929,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11943,7 +11943,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11960,7 +11960,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -11974,7 +11974,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -11991,7 +11991,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -12005,7 +12005,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -12022,7 +12022,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12036,7 +12036,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12053,7 +12053,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12067,7 +12067,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12084,7 +12084,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12098,7 +12098,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12115,7 +12115,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12129,7 +12129,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12147,7 +12147,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12162,7 +12162,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12180,7 +12180,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12195,7 +12195,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12213,7 +12213,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12228,7 +12228,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12246,7 +12246,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12261,7 +12261,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12280,7 +12280,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12296,7 +12296,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12315,7 +12315,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12331,7 +12331,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12350,7 +12350,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12366,7 +12366,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12385,7 +12385,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12401,7 +12401,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12421,7 +12421,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12438,7 +12438,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12458,7 +12458,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12475,7 +12475,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12495,7 +12495,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12512,7 +12512,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12532,7 +12532,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12549,7 +12549,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12570,7 +12570,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12588,7 +12588,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12609,7 +12609,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12627,7 +12627,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12648,7 +12648,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12666,7 +12666,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12687,7 +12687,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12705,7 +12705,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12727,7 +12727,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12746,7 +12746,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12768,7 +12768,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12787,7 +12787,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12809,7 +12809,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12828,7 +12828,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12850,7 +12850,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -12869,7 +12869,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -12886,7 +12886,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -12900,7 +12900,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -12917,7 +12917,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -12931,7 +12931,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -12948,7 +12948,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -12962,7 +12962,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -12978,7 +12978,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -12991,7 +12991,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -13008,7 +13008,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -13022,7 +13022,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -13039,7 +13039,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -13053,7 +13053,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -13070,7 +13070,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -13084,7 +13084,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -13101,7 +13101,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -13115,7 +13115,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -13133,7 +13133,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -13148,7 +13148,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -13166,7 +13166,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -13181,7 +13181,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -13199,7 +13199,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -13214,7 +13214,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -13232,7 +13232,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -13247,7 +13247,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -13266,7 +13266,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -13282,7 +13282,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -13301,7 +13301,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -13317,7 +13317,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -13336,7 +13336,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -13352,7 +13352,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -13371,7 +13371,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -13387,7 +13387,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -13407,7 +13407,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -13424,7 +13424,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -13444,7 +13444,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -13461,7 +13461,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -13481,7 +13481,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -13498,7 +13498,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -13518,7 +13518,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -13535,7 +13535,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -13556,7 +13556,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -13574,7 +13574,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -13595,7 +13595,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -13613,7 +13613,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -13634,7 +13634,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -13652,7 +13652,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -13673,7 +13673,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -13691,7 +13691,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -13713,7 +13713,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -13732,7 +13732,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -13754,7 +13754,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -13773,7 +13773,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -13795,7 +13795,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -13814,7 +13814,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -13836,7 +13836,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -13855,7 +13855,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -13872,7 +13872,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -13886,7 +13886,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -13903,7 +13903,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -13917,7 +13917,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -13934,7 +13934,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -13948,7 +13948,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -13965,7 +13965,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -13979,7 +13979,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -13996,7 +13996,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -14010,7 +14010,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -14027,7 +14027,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -14041,7 +14041,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -14058,7 +14058,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -14072,7 +14072,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -14089,7 +14089,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -14103,7 +14103,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -14121,7 +14121,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -14136,7 +14136,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -14154,7 +14154,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -14169,7 +14169,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -14187,7 +14187,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -14202,7 +14202,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -14220,7 +14220,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -14235,7 +14235,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -14254,7 +14254,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -14270,7 +14270,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -14289,7 +14289,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -14305,7 +14305,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -14324,7 +14324,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -14340,7 +14340,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -14359,7 +14359,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -14375,7 +14375,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -14395,7 +14395,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -14412,7 +14412,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -14432,7 +14432,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -14449,7 +14449,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -14469,7 +14469,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -14486,7 +14486,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -14506,7 +14506,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -14523,7 +14523,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -14544,7 +14544,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -14562,7 +14562,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -14583,7 +14583,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -14601,7 +14601,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -14622,7 +14622,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -14640,7 +14640,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -14661,7 +14661,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -14679,7 +14679,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -14701,7 +14701,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -14720,7 +14720,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -14742,7 +14742,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -14761,7 +14761,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -14783,7 +14783,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -14802,7 +14802,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -14824,7 +14824,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -14843,7 +14843,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -14860,7 +14860,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -14874,7 +14874,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -14891,7 +14891,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -14905,7 +14905,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -14922,7 +14922,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -14936,7 +14936,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -14953,7 +14953,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -14967,7 +14967,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -14984,7 +14984,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -14998,7 +14998,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -15015,7 +15015,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -15029,7 +15029,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -15046,7 +15046,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -15060,7 +15060,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -15077,7 +15077,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -15091,7 +15091,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -15109,7 +15109,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -15124,7 +15124,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -15142,7 +15142,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -15157,7 +15157,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -15175,7 +15175,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -15190,7 +15190,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -15208,7 +15208,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -15223,7 +15223,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -15242,7 +15242,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -15258,7 +15258,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -15277,7 +15277,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -15293,7 +15293,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -15312,7 +15312,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -15328,7 +15328,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -15347,7 +15347,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -15363,7 +15363,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -15383,7 +15383,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -15400,7 +15400,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -15420,7 +15420,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -15437,7 +15437,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -15457,7 +15457,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -15474,7 +15474,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -15494,7 +15494,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -15511,7 +15511,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -15532,7 +15532,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -15550,7 +15550,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -15571,7 +15571,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -15589,7 +15589,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -15610,7 +15610,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -15628,7 +15628,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -15649,7 +15649,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -15667,7 +15667,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -15689,7 +15689,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -15708,7 +15708,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -15730,7 +15730,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -15749,7 +15749,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -15771,7 +15771,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -15790,7 +15790,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -15812,7 +15812,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -15831,7 +15831,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -15848,7 +15848,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -15862,7 +15862,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -15879,7 +15879,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -15893,7 +15893,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -15909,7 +15909,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -15922,7 +15922,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -15938,7 +15938,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -15951,7 +15951,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -15968,7 +15968,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -15982,7 +15982,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -15999,7 +15999,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -16013,7 +16013,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -16030,7 +16030,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -16044,7 +16044,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2 ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -16061,7 +16061,7 @@ ; CHECK-NEXT: vmv2r.v v16, v8 ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -16075,7 +16075,7 @@ ; CHECK-NEXT: vmv2r.v v16, v8 ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -16093,7 +16093,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -16108,7 +16108,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -16126,7 +16126,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -16141,7 +16141,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -16159,7 +16159,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -16174,7 +16174,7 @@ ; CHECK-NEXT: vmv2r.v v10, v8 ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -16192,7 +16192,7 @@ ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -16207,7 +16207,7 @@ ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -16224,7 +16224,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -16238,7 +16238,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -16255,7 +16255,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -16269,7 +16269,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -16285,7 +16285,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -16298,7 +16298,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -16315,7 +16315,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -16329,7 +16329,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -16346,7 +16346,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -16360,7 +16360,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -16377,7 +16377,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -16391,7 +16391,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -16408,7 +16408,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -16422,7 +16422,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -16439,7 +16439,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -16453,7 +16453,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -16470,7 +16470,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -16484,7 +16484,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -16501,7 +16501,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -16515,7 +16515,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -16532,7 +16532,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -16546,7 +16546,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -16563,7 +16563,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -16577,7 +16577,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -16595,7 +16595,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -16610,7 +16610,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -16628,7 +16628,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -16643,7 +16643,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -16661,7 +16661,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -16676,7 +16676,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -16694,7 +16694,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -16709,7 +16709,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -16725,7 +16725,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -16738,7 +16738,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -16755,7 +16755,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -16769,7 +16769,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -16785,7 +16785,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -16798,7 +16798,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -16815,7 +16815,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -16829,7 +16829,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -16846,7 +16846,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -16860,7 +16860,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -16877,7 +16877,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -16891,7 +16891,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -16908,7 +16908,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -16922,7 +16922,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10 ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -16939,7 +16939,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -16953,7 +16953,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -16971,7 +16971,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -16986,7 +16986,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -17004,7 +17004,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -17019,7 +17019,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -17037,7 +17037,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -17052,7 +17052,7 @@ ; CHECK-NEXT: vmv1r.v v9, v8 ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -17070,7 +17070,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -17085,7 +17085,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -17104,7 +17104,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -17120,7 +17120,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -17139,7 +17139,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -17155,7 +17155,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -17174,7 +17174,7 @@ ; CHECK-NEXT: vmv1r.v v18, v16 ; CHECK-NEXT: vmv1r.v v19, v16 ; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -17190,7 +17190,7 @@ ; CHECK-NEXT: vmv1r.v v18, v16 ; CHECK-NEXT: vmv1r.v v19, v16 ; CHECK-NEXT: vmv1r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -17209,7 +17209,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -17225,7 +17225,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -17245,7 +17245,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -17262,7 +17262,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -17282,7 +17282,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -17299,7 +17299,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -17319,7 +17319,7 @@ ; CHECK-NEXT: vmv1r.v v19, v16 ; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -17336,7 +17336,7 @@ ; CHECK-NEXT: vmv1r.v v19, v16 ; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vmv1r.v v21, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -17356,7 +17356,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -17373,7 +17373,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -17394,7 +17394,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -17412,7 +17412,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -17433,7 +17433,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -17451,7 +17451,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -17472,7 +17472,7 @@ ; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -17490,7 +17490,7 @@ ; CHECK-NEXT: vmv1r.v v20, v16 ; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vmv1r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -17511,7 +17511,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -17529,7 +17529,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -17551,7 +17551,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -17570,7 +17570,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -17592,7 +17592,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -17611,7 +17611,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -17633,7 +17633,7 @@ ; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -17652,7 +17652,7 @@ ; CHECK-NEXT: vmv1r.v v21, v16 ; CHECK-NEXT: vmv1r.v v22, v16 ; CHECK-NEXT: vmv1r.v v23, v16 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -17674,7 +17674,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -17693,7 +17693,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -17710,7 +17710,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -17724,7 +17724,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -17741,7 +17741,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -17755,7 +17755,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -17772,7 +17772,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -17786,7 +17786,7 @@ ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9 ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -17802,7 +17802,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10 ; CHECK-NEXT: ret entry: @@ -17815,7 +17815,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9 ; CHECK-NEXT: vmv1r.v v9, v8 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -17832,7 +17832,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -17846,7 +17846,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -17863,7 +17863,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -17877,7 +17877,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -17894,7 +17894,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -17908,7 +17908,7 @@ ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -17925,7 +17925,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -17939,7 +17939,7 @@ ; CHECK-NEXT: vmv1r.v v12, v8 ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -17957,7 +17957,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -17972,7 +17972,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -17990,7 +17990,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -18005,7 +18005,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -18023,7 +18023,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -18038,7 +18038,7 @@ ; CHECK-NEXT: vmv1r.v v11, v10 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -18056,7 +18056,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -18071,7 +18071,7 @@ ; CHECK-NEXT: vmv1r.v v13, v12 ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -18090,7 +18090,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -18106,7 +18106,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -18125,7 +18125,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -18141,7 +18141,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -18160,7 +18160,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -18176,7 +18176,7 @@ ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -18195,7 +18195,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -18211,7 +18211,7 @@ ; CHECK-NEXT: vmv1r.v v14, v12 ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg5ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -18231,7 +18231,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -18248,7 +18248,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -18268,7 +18268,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -18285,7 +18285,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -18305,7 +18305,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -18322,7 +18322,7 @@ ; CHECK-NEXT: vmv1r.v v13, v10 ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -18342,7 +18342,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -18359,7 +18359,7 @@ ; CHECK-NEXT: vmv1r.v v15, v12 ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg6ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -18380,7 +18380,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -18398,7 +18398,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -18419,7 +18419,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -18437,7 +18437,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -18458,7 +18458,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -18476,7 +18476,7 @@ ; CHECK-NEXT: vmv1r.v v14, v10 ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -18497,7 +18497,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -18515,7 +18515,7 @@ ; CHECK-NEXT: vmv1r.v v16, v12 ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg7ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -18537,7 +18537,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -18556,7 +18556,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -18578,7 +18578,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -18597,7 +18597,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -18619,7 +18619,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9 ; CHECK-NEXT: ret entry: @@ -18638,7 +18638,7 @@ ; CHECK-NEXT: vmv1r.v v15, v10 ; CHECK-NEXT: vmv1r.v v16, v10 ; CHECK-NEXT: vmv1r.v v17, v10 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t ; CHECK-NEXT: ret entry: @@ -18660,7 +18660,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei64.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -18679,7 +18679,7 @@ ; CHECK-NEXT: vmv1r.v v17, v12 ; CHECK-NEXT: vmv1r.v v18, v12 ; CHECK-NEXT: vmv1r.v v19, v12 -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vsuxseg8ei64.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -18696,7 +18696,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -18710,7 +18710,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -18727,7 +18727,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -18741,7 +18741,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -18757,7 +18757,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -18770,7 +18770,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -18787,7 +18787,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12 ; CHECK-NEXT: ret entry: @@ -18801,7 +18801,7 @@ ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2 ; CHECK-NEXT: vmv1r.v v12, v10 ; CHECK-NEXT: vmv2r.v v10, v8 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -18818,7 +18818,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -18832,7 +18832,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -18849,7 +18849,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -18863,7 +18863,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -18880,7 +18880,7 @@ ; CHECK-NEXT: vmv2r.v v16, v8 ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -18894,7 +18894,7 @@ ; CHECK-NEXT: vmv2r.v v16, v8 ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -18911,7 +18911,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -18925,7 +18925,7 @@ ; CHECK-NEXT: vmv2r.v v12, v8 ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -18943,7 +18943,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -18958,7 +18958,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -18976,7 +18976,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -18991,7 +18991,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: @@ -19009,7 +19009,7 @@ ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei64.v v16, (a0), v12 ; CHECK-NEXT: ret entry: @@ -19024,7 +19024,7 @@ ; CHECK-NEXT: vmv2r.v v18, v16 ; CHECK-NEXT: vmv2r.v v20, v16 ; CHECK-NEXT: vmv2r.v v22, v16 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei64.v v16, (a0), v12, v0.t ; CHECK-NEXT: ret entry: @@ -19042,7 +19042,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10 ; CHECK-NEXT: ret entry: @@ -19057,7 +19057,7 @@ ; CHECK-NEXT: vmv2r.v v14, v12 ; CHECK-NEXT: vmv2r.v v16, v12 ; CHECK-NEXT: vmv2r.v v18, v12 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp-mask.ll --- a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp-mask.ll @@ -19,7 +19,7 @@ define @vtrunc_nxv2i1_nxv2i16_unmasked( %a, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i1_nxv2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -44,7 +44,7 @@ define @vtrunc_nxv2i1_nxv2i32_unmasked( %a, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i1_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret @@ -70,7 +70,7 @@ define @vtrunc_nxv2i1_nxv2i64_unmasked( %a, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i1_nxv2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll @@ -41,7 +41,7 @@ define @vtrunc_nxv2i8_nxv2i16_unmasked( %a, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i8_nxv2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %v = call @llvm.vp.trunc.nxv2i8.nxv2i16( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) @@ -65,9 +65,9 @@ define @vtrunc_nxv2i8_nxv2i32_unmasked( %a, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i8_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %v = call @llvm.vp.trunc.nxv2i8.nxv2i32( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) @@ -93,11 +93,11 @@ define @vtrunc_nxv2i8_nxv2i64_unmasked( %a, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i8_nxv2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vnsrl.wi v10, v8, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %v = call @llvm.vp.trunc.nxv2i8.nxv2i64( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) @@ -119,7 +119,7 @@ define @vtrunc_nxv2i16_nxv2i32_unmasked( %a, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i16_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %v = call @llvm.vp.trunc.nxv2i16.nxv2i32( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) @@ -143,9 +143,9 @@ define @vtrunc_nxv2i16_nxv2i64_unmasked( %a, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i16_nxv2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vnsrl.wi v10, v8, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-NEXT: ret %v = call @llvm.vp.trunc.nxv2i16.nxv2i64( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) @@ -161,7 +161,7 @@ ; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a4, a1, 3 -; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma ; CHECK-NEXT: sub a3, a0, a1 ; CHECK-NEXT: vslidedown.vx v0, v0, a4 ; CHECK-NEXT: bltu a0, a3, .LBB12_2 @@ -203,7 +203,7 @@ define @vtrunc_nxv2i32_nxv2i64_unmasked( %a, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i32_nxv2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vnsrl.wi v10, v8, 0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -220,7 +220,7 @@ ; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a4, a1, 2 -; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma ; CHECK-NEXT: slli a1, a1, 1 ; CHECK-NEXT: sub a3, a0, a1 ; CHECK-NEXT: vslidedown.vx v0, v0, a4 @@ -256,7 +256,7 @@ ; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a4, a1, 2 -; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma ; CHECK-NEXT: slli a1, a1, 1 ; CHECK-NEXT: sub a3, a0, a1 ; CHECK-NEXT: vslidedown.vx v0, v0, a4 @@ -308,7 +308,7 @@ ; CHECK-NEXT: mv a5, a4 ; CHECK-NEXT: .LBB17_2: ; CHECK-NEXT: li a6, 0 -; CHECK-NEXT: vsetvli a7, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a7, zero, e8, mf4, ta, ma ; CHECK-NEXT: sub a7, a5, a1 ; CHECK-NEXT: vslidedown.vx v0, v24, a3 ; CHECK-NEXT: bltu a5, a7, .LBB17_4 @@ -324,7 +324,7 @@ ; CHECK-NEXT: mv a5, a1 ; CHECK-NEXT: .LBB17_6: ; CHECK-NEXT: li a6, 0 -; CHECK-NEXT: vsetvli t1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli t1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v1, v24, a7 ; CHECK-NEXT: add a7, a0, t0 ; CHECK-NEXT: vsetvli zero, a5, e32, m4, ta, mu @@ -340,7 +340,7 @@ ; CHECK-NEXT: # %bb.7: ; CHECK-NEXT: mv a6, a4 ; CHECK-NEXT: .LBB17_8: -; CHECK-NEXT: vsetvli a2, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, mf4, ta, ma ; CHECK-NEXT: vl8re64.v v16, (a7) ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: slli a2, a2, 3 diff --git a/llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode.ll @@ -5,7 +5,7 @@ define @vtrunc_nxv1i16_nxv1i8( %va) { ; CHECK-LABEL: vtrunc_nxv1i16_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %tvec = trunc %va to @@ -15,7 +15,7 @@ define @vtrunc_nxv2i16_nxv2i8( %va) { ; CHECK-LABEL: vtrunc_nxv2i16_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %tvec = trunc %va to @@ -25,7 +25,7 @@ define @vtrunc_nxv4i16_nxv4i8( %va) { ; CHECK-LABEL: vtrunc_nxv4i16_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %tvec = trunc %va to @@ -35,7 +35,7 @@ define @vtrunc_nxv8i16_nxv8i8( %va) { ; CHECK-LABEL: vtrunc_nxv8i16_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vnsrl.wi v10, v8, 0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -46,7 +46,7 @@ define @vtrunc_nxv16i16_nxv16i8( %va) { ; CHECK-LABEL: vtrunc_nxv16i16_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vnsrl.wi v12, v8, 0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -57,9 +57,9 @@ define @vtrunc_nxv1i32_nxv1i8( %va) { ; CHECK-LABEL: vtrunc_nxv1i32_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %tvec = trunc %va to @@ -69,7 +69,7 @@ define @vtrunc_nxv1i32_nxv1i16( %va) { ; CHECK-LABEL: vtrunc_nxv1i32_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %tvec = trunc %va to @@ -79,9 +79,9 @@ define @vtrunc_nxv2i32_nxv2i8( %va) { ; CHECK-LABEL: vtrunc_nxv2i32_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %tvec = trunc %va to @@ -91,7 +91,7 @@ define @vtrunc_nxv2i32_nxv2i16( %va) { ; CHECK-LABEL: vtrunc_nxv2i32_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %tvec = trunc %va to @@ -101,9 +101,9 @@ define @vtrunc_nxv4i32_nxv4i8( %va) { ; CHECK-LABEL: vtrunc_nxv4i32_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vnsrl.wi v10, v8, 0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-NEXT: ret %tvec = trunc %va to @@ -113,7 +113,7 @@ define @vtrunc_nxv4i32_nxv4i16( %va) { ; CHECK-LABEL: vtrunc_nxv4i32_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vnsrl.wi v10, v8, 0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -124,9 +124,9 @@ define @vtrunc_nxv8i32_nxv8i8( %va) { ; CHECK-LABEL: vtrunc_nxv8i32_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vnsrl.wi v12, v8, 0 -; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v12, 0 ; CHECK-NEXT: ret %tvec = trunc %va to @@ -136,7 +136,7 @@ define @vtrunc_nxv8i32_nxv8i16( %va) { ; CHECK-LABEL: vtrunc_nxv8i32_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vnsrl.wi v12, v8, 0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -147,9 +147,9 @@ define @vtrunc_nxv16i32_nxv16i8( %va) { ; CHECK-LABEL: vtrunc_nxv16i32_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vnsrl.wi v16, v8, 0 -; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v16, 0 ; CHECK-NEXT: ret %tvec = trunc %va to @@ -159,7 +159,7 @@ define @vtrunc_nxv16i32_nxv16i16( %va) { ; CHECK-LABEL: vtrunc_nxv16i32_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vnsrl.wi v16, v8, 0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -170,11 +170,11 @@ define @vtrunc_nxv1i64_nxv1i8( %va) { ; CHECK-LABEL: vtrunc_nxv1i64_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %tvec = trunc %va to @@ -184,9 +184,9 @@ define @vtrunc_nxv1i64_nxv1i16( %va) { ; CHECK-LABEL: vtrunc_nxv1i64_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %tvec = trunc %va to @@ -196,7 +196,7 @@ define @vtrunc_nxv1i64_nxv1i32( %va) { ; CHECK-LABEL: vtrunc_nxv1i64_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %tvec = trunc %va to @@ -206,11 +206,11 @@ define @vtrunc_nxv2i64_nxv2i8( %va) { ; CHECK-LABEL: vtrunc_nxv2i64_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vnsrl.wi v10, v8, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v10, 0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %tvec = trunc %va to @@ -220,9 +220,9 @@ define @vtrunc_nxv2i64_nxv2i16( %va) { ; CHECK-LABEL: vtrunc_nxv2i64_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vnsrl.wi v10, v8, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-NEXT: ret %tvec = trunc %va to @@ -232,7 +232,7 @@ define @vtrunc_nxv2i64_nxv2i32( %va) { ; CHECK-LABEL: vtrunc_nxv2i64_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vnsrl.wi v10, v8, 0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -243,11 +243,11 @@ define @vtrunc_nxv4i64_nxv4i8( %va) { ; CHECK-LABEL: vtrunc_nxv4i64_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vnsrl.wi v12, v8, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v12, 0 -; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret %tvec = trunc %va to @@ -257,9 +257,9 @@ define @vtrunc_nxv4i64_nxv4i16( %va) { ; CHECK-LABEL: vtrunc_nxv4i64_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vnsrl.wi v12, v8, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v12, 0 ; CHECK-NEXT: ret %tvec = trunc %va to @@ -269,7 +269,7 @@ define @vtrunc_nxv4i64_nxv4i32( %va) { ; CHECK-LABEL: vtrunc_nxv4i64_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vnsrl.wi v12, v8, 0 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -280,11 +280,11 @@ define @vtrunc_nxv8i64_nxv8i8( %va) { ; CHECK-LABEL: vtrunc_nxv8i64_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vnsrl.wi v16, v8, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vnsrl.wi v10, v16, 0 -; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v10, 0 ; CHECK-NEXT: ret %tvec = trunc %va to @@ -294,9 +294,9 @@ define @vtrunc_nxv8i64_nxv8i16( %va) { ; CHECK-LABEL: vtrunc_nxv8i64_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vnsrl.wi v16, v8, 0 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v16, 0 ; CHECK-NEXT: ret %tvec = trunc %va to @@ -306,7 +306,7 @@ define @vtrunc_nxv8i64_nxv8i32( %va) { ; CHECK-LABEL: vtrunc_nxv8i64_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vnsrl.wi v16, v8, 0 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp-mask.ll --- a/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp-mask.ll @@ -20,7 +20,7 @@ define @vuitofp_nxv2f16_nxv2i1_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f16_nxv2i1_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 @@ -47,7 +47,7 @@ define @vuitofp_nxv2f32_nxv2i1_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f32_nxv2i1_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 @@ -74,7 +74,7 @@ define @vuitofp_nxv2f64_nxv2i1_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f64_nxv2i1_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 diff --git a/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll @@ -8,7 +8,7 @@ ; CHECK-LABEL: vuitofp_nxv2f16_nxv2i7: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 127 -; CHECK-NEXT: vsetvli a2, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e8, mf4, ta, ma ; CHECK-NEXT: vand.vx v9, v8, a1 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t @@ -33,7 +33,7 @@ define @vuitofp_nxv2f16_nxv2i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f16_nxv2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -56,7 +56,7 @@ define @vuitofp_nxv2f16_nxv2i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f16_nxv2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.uitofp.nxv2f16.nxv2i16( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) @@ -79,7 +79,7 @@ define @vuitofp_nxv2f16_nxv2i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f16_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.f.xu.w v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -104,9 +104,9 @@ define @vuitofp_nxv2f16_nxv2i64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f16_nxv2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.f.xu.w v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vfncvt.f.f.w v8, v10 ; CHECK-NEXT: ret %v = call @llvm.vp.uitofp.nxv2f16.nxv2i64( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) @@ -129,7 +129,7 @@ define @vuitofp_nxv2f32_nxv2i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f32_nxv2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vzext.vf2 v9, v8 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v9 ; CHECK-NEXT: ret @@ -153,7 +153,7 @@ define @vuitofp_nxv2f32_nxv2i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f32_nxv2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -176,7 +176,7 @@ define @vuitofp_nxv2f32_nxv2i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f32_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.uitofp.nxv2f32.nxv2i32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) @@ -199,7 +199,7 @@ define @vuitofp_nxv2f32_nxv2i64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f32_nxv2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfncvt.f.xu.w v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -223,7 +223,7 @@ define @vuitofp_nxv2f64_nxv2i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f64_nxv2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vzext.vf4 v10, v8 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v10 ; CHECK-NEXT: ret @@ -247,7 +247,7 @@ define @vuitofp_nxv2f64_nxv2i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f64_nxv2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vfwcvt.f.xu.v v8, v10 ; CHECK-NEXT: ret @@ -271,7 +271,7 @@ define @vuitofp_nxv2f64_nxv2i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f64_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfwcvt.f.xu.v v10, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -294,7 +294,7 @@ define @vuitofp_nxv2f64_nxv2i64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vuitofp_nxv2f64_nxv2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.uitofp.nxv2f64.nxv2i64( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) @@ -317,7 +317,7 @@ ; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a4, a1, 2 -; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma ; CHECK-NEXT: slli a1, a1, 1 ; CHECK-NEXT: sub a3, a0, a1 ; CHECK-NEXT: vslidedown.vx v0, v0, a4 @@ -354,7 +354,7 @@ ; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a4, a1, 2 -; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma ; CHECK-NEXT: slli a1, a1, 1 ; CHECK-NEXT: sub a3, a0, a1 ; CHECK-NEXT: vslidedown.vx v0, v0, a4 @@ -387,14 +387,14 @@ ; CHECK-NEXT: mv a2, a1 ; CHECK-NEXT: .LBB27_2: ; CHECK-NEXT: li a3, 0 -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: sub a1, a0, a1 ; CHECK-NEXT: vfcvt.f.xu.v v8, v8 ; CHECK-NEXT: bltu a0, a1, .LBB27_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a3, a1 ; CHECK-NEXT: .LBB27_4: -; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; CHECK-NEXT: vfcvt.f.xu.v v16, v16 ; CHECK-NEXT: ret %v = call @llvm.vp.uitofp.nxv32f32.nxv32i32( %va, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vwadd.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -58,7 +58,7 @@ define @intrinsic_vwadd_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vwadd.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -106,7 +106,7 @@ define @intrinsic_vwadd_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vwadd.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -154,7 +154,7 @@ define @intrinsic_vwadd_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vwadd.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -202,7 +202,7 @@ define @intrinsic_vwadd_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vwadd.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -250,7 +250,7 @@ define @intrinsic_vwadd_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwadd.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -298,7 +298,7 @@ define @intrinsic_vwadd_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vwadd.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -346,7 +346,7 @@ define @intrinsic_vwadd_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vwadd.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -394,7 +394,7 @@ define @intrinsic_vwadd_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vwadd.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -442,7 +442,7 @@ define @intrinsic_vwadd_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vwadd.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -490,7 +490,7 @@ define @intrinsic_vwadd_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vwadd.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -538,7 +538,7 @@ define @intrinsic_vwadd_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vwadd.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -586,7 +586,7 @@ define @intrinsic_vwadd_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vwadd.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -634,7 +634,7 @@ define @intrinsic_vwadd_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vwadd.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -682,7 +682,7 @@ define @intrinsic_vwadd_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vwadd.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -730,7 +730,7 @@ define @intrinsic_vwadd_vx_nxv1i16_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vwadd.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -778,7 +778,7 @@ define @intrinsic_vwadd_vx_nxv2i16_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vwadd.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -826,7 +826,7 @@ define @intrinsic_vwadd_vx_nxv4i16_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vwadd.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -874,7 +874,7 @@ define @intrinsic_vwadd_vx_nxv8i16_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vwadd.vx v10, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -922,7 +922,7 @@ define @intrinsic_vwadd_vx_nxv16i16_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vwadd.vx v12, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -970,7 +970,7 @@ define @intrinsic_vwadd_vx_nxv32i16_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vwadd.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -1018,7 +1018,7 @@ define @intrinsic_vwadd_vx_nxv1i32_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vwadd.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1066,7 +1066,7 @@ define @intrinsic_vwadd_vx_nxv2i32_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vwadd.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1114,7 +1114,7 @@ define @intrinsic_vwadd_vx_nxv4i32_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vwadd.vx v10, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -1162,7 +1162,7 @@ define @intrinsic_vwadd_vx_nxv8i32_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vwadd.vx v12, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -1210,7 +1210,7 @@ define @intrinsic_vwadd_vx_nxv16i32_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vwadd.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -1258,7 +1258,7 @@ define @intrinsic_vwadd_vx_nxv1i64_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vwadd.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1306,7 +1306,7 @@ define @intrinsic_vwadd_vx_nxv2i64_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vwadd.vx v10, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -1354,7 +1354,7 @@ define @intrinsic_vwadd_vx_nxv4i64_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vwadd.vx v12, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -1402,7 +1402,7 @@ define @intrinsic_vwadd_vx_nxv8i64_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vwadd.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vwadd.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -58,7 +58,7 @@ define @intrinsic_vwadd_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vwadd.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -106,7 +106,7 @@ define @intrinsic_vwadd_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vwadd.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -154,7 +154,7 @@ define @intrinsic_vwadd_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vwadd.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -202,7 +202,7 @@ define @intrinsic_vwadd_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vwadd.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -250,7 +250,7 @@ define @intrinsic_vwadd_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwadd.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -298,7 +298,7 @@ define @intrinsic_vwadd_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vwadd.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -346,7 +346,7 @@ define @intrinsic_vwadd_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vwadd.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -394,7 +394,7 @@ define @intrinsic_vwadd_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vwadd.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -442,7 +442,7 @@ define @intrinsic_vwadd_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vwadd.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -490,7 +490,7 @@ define @intrinsic_vwadd_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vwadd.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -538,7 +538,7 @@ define @intrinsic_vwadd_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vwadd.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -586,7 +586,7 @@ define @intrinsic_vwadd_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vwadd.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -634,7 +634,7 @@ define @intrinsic_vwadd_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vwadd.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -682,7 +682,7 @@ define @intrinsic_vwadd_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vwadd.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -730,7 +730,7 @@ define @intrinsic_vwadd_vx_nxv1i16_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vwadd.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -778,7 +778,7 @@ define @intrinsic_vwadd_vx_nxv2i16_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vwadd.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -826,7 +826,7 @@ define @intrinsic_vwadd_vx_nxv4i16_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vwadd.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -874,7 +874,7 @@ define @intrinsic_vwadd_vx_nxv8i16_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vwadd.vx v10, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -922,7 +922,7 @@ define @intrinsic_vwadd_vx_nxv16i16_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vwadd.vx v12, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -970,7 +970,7 @@ define @intrinsic_vwadd_vx_nxv32i16_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vwadd.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -1018,7 +1018,7 @@ define @intrinsic_vwadd_vx_nxv1i32_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vwadd.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1066,7 +1066,7 @@ define @intrinsic_vwadd_vx_nxv2i32_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vwadd.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1114,7 +1114,7 @@ define @intrinsic_vwadd_vx_nxv4i32_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vwadd.vx v10, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -1162,7 +1162,7 @@ define @intrinsic_vwadd_vx_nxv8i32_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vwadd.vx v12, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -1210,7 +1210,7 @@ define @intrinsic_vwadd_vx_nxv16i32_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vwadd.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -1258,7 +1258,7 @@ define @intrinsic_vwadd_vx_nxv1i64_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vwadd.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1306,7 +1306,7 @@ define @intrinsic_vwadd_vx_nxv2i64_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vwadd.vx v10, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -1354,7 +1354,7 @@ define @intrinsic_vwadd_vx_nxv4i64_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vwadd.vx v12, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -1402,7 +1402,7 @@ define @intrinsic_vwadd_vx_nxv8i64_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vwadd.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwadd-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwadd-sdnode.ll @@ -5,7 +5,7 @@ define @vwadd_vv_nxv1i64( %va, %vb) { ; CHECK-LABEL: vwadd_vv_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vwadd.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -18,7 +18,7 @@ define @vwaddu_vv_nxv1i64( %va, %vb) { ; CHECK-LABEL: vwaddu_vv_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vwaddu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -31,7 +31,7 @@ define @vwadd_vx_nxv1i64( %va, i32 %b) { ; CHECK-LABEL: vwadd_vx_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vwadd.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -46,7 +46,7 @@ define @vwaddu_vx_nxv1i64( %va, i32 %b) { ; CHECK-LABEL: vwaddu_vx_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vwaddu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -61,7 +61,7 @@ define @vwadd_wv_nxv1i64( %va, %vb) { ; CHECK-LABEL: vwadd_wv_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vwadd.wv v8, v8, v9 ; CHECK-NEXT: ret %vc = sext %vb to @@ -72,7 +72,7 @@ define @vwaddu_wv_nxv1i64( %va, %vb) { ; CHECK-LABEL: vwaddu_wv_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vwaddu.wv v8, v8, v9 ; CHECK-NEXT: ret %vc = zext %vb to @@ -83,7 +83,7 @@ define @vwadd_wx_nxv1i64( %va, i32 %b) { ; CHECK-LABEL: vwadd_wx_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vwadd.wx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -96,7 +96,7 @@ define @vwaddu_wx_nxv1i64( %va, i32 %b) { ; CHECK-LABEL: vwaddu_wx_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vwaddu.wx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -109,7 +109,7 @@ define @vwadd_vv_nxv2i64( %va, %vb) { ; CHECK-LABEL: vwadd_vv_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vwadd.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -122,7 +122,7 @@ define @vwaddu_vv_nxv2i64( %va, %vb) { ; CHECK-LABEL: vwaddu_vv_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vwaddu.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -135,7 +135,7 @@ define @vwadd_vx_nxv2i64( %va, i32 %b) { ; CHECK-LABEL: vwadd_vx_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vwadd.vx v10, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -150,7 +150,7 @@ define @vwaddu_vx_nxv2i64( %va, i32 %b) { ; CHECK-LABEL: vwaddu_vx_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vwaddu.vx v10, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -165,7 +165,7 @@ define @vwadd_wv_nxv2i64( %va, %vb) { ; CHECK-LABEL: vwadd_wv_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vwadd.wv v8, v8, v10 ; CHECK-NEXT: ret %vc = sext %vb to @@ -176,7 +176,7 @@ define @vwaddu_wv_nxv2i64( %va, %vb) { ; CHECK-LABEL: vwaddu_wv_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vwaddu.wv v8, v8, v10 ; CHECK-NEXT: ret %vc = zext %vb to @@ -187,7 +187,7 @@ define @vwadd_wx_nxv2i64( %va, i32 %b) { ; CHECK-LABEL: vwadd_wx_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vwadd.wx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -200,7 +200,7 @@ define @vwaddu_wx_nxv2i64( %va, i32 %b) { ; CHECK-LABEL: vwaddu_wx_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vwaddu.wx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -213,7 +213,7 @@ define @vwadd_vv_nxv4i64( %va, %vb) { ; CHECK-LABEL: vwadd_vv_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vwadd.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -226,7 +226,7 @@ define @vwaddu_vv_nxv4i64( %va, %vb) { ; CHECK-LABEL: vwaddu_vv_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vwaddu.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -239,7 +239,7 @@ define @vwadd_vx_nxv4i64( %va, i32 %b) { ; CHECK-LABEL: vwadd_vx_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vwadd.vx v12, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -254,7 +254,7 @@ define @vwaddu_vx_nxv4i64( %va, i32 %b) { ; CHECK-LABEL: vwaddu_vx_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vwaddu.vx v12, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -269,7 +269,7 @@ define @vwadd_wv_nxv4i64( %va, %vb) { ; CHECK-LABEL: vwadd_wv_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vwadd.wv v8, v8, v12 ; CHECK-NEXT: ret %vc = sext %vb to @@ -280,7 +280,7 @@ define @vwaddu_wv_nxv4i64( %va, %vb) { ; CHECK-LABEL: vwaddu_wv_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vwaddu.wv v8, v8, v12 ; CHECK-NEXT: ret %vc = zext %vb to @@ -291,7 +291,7 @@ define @vwadd_wx_nxv4i64( %va, i32 %b) { ; CHECK-LABEL: vwadd_wx_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vwadd.wx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -304,7 +304,7 @@ define @vwaddu_wx_nxv4i64( %va, i32 %b) { ; CHECK-LABEL: vwaddu_wx_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vwaddu.wx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -317,7 +317,7 @@ define @vwadd_vv_nxv8i64( %va, %vb) { ; CHECK-LABEL: vwadd_vv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vwadd.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -330,7 +330,7 @@ define @vwaddu_vv_nxv8i64( %va, %vb) { ; CHECK-LABEL: vwaddu_vv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vwaddu.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -343,7 +343,7 @@ define @vwadd_vx_nxv8i64( %va, i32 %b) { ; CHECK-LABEL: vwadd_vx_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vwadd.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -358,7 +358,7 @@ define @vwaddu_vx_nxv8i64( %va, i32 %b) { ; CHECK-LABEL: vwaddu_vx_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vwaddu.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -373,7 +373,7 @@ define @vwadd_wv_nxv8i64( %va, %vb) { ; CHECK-LABEL: vwadd_wv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vwadd.wv v8, v8, v16 ; CHECK-NEXT: ret %vc = sext %vb to @@ -384,7 +384,7 @@ define @vwaddu_wv_nxv8i64( %va, %vb) { ; CHECK-LABEL: vwaddu_wv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vwaddu.wv v8, v8, v16 ; CHECK-NEXT: ret %vc = zext %vb to @@ -395,7 +395,7 @@ define @vwadd_wx_nxv8i64( %va, i32 %b) { ; CHECK-LABEL: vwadd_wx_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vwadd.wx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -408,7 +408,7 @@ define @vwaddu_wx_nxv8i64( %va, i32 %b) { ; CHECK-LABEL: vwaddu_wx_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vwaddu.wx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vwadd.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vwadd.w_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vwadd.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vwadd.w_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vwadd.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vwadd.w_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vwadd.wv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vwadd.w_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vwadd.wv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vwadd.w_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwadd.wv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -293,7 +293,7 @@ define @intrinsic_vwadd.w_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vwadd.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vwadd.w_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vwadd.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vwadd.w_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vwadd.wv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vwadd.w_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vwadd.wv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vwadd.w_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vwadd.wv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -529,7 +529,7 @@ define @intrinsic_vwadd.w_wv_nxv1i64_nxv1i64_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vwadd.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -576,7 +576,7 @@ define @intrinsic_vwadd.w_wv_nxv2i64_nxv2i64_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vwadd.wv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vwadd.w_wv_nxv4i64_nxv4i64_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vwadd.wv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vwadd.w_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vwadd.wv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -718,7 +718,7 @@ define @intrinsic_vwadd.w_wx_nxv1i16_nxv1i16_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv1i16_nxv1i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vwadd.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -765,7 +765,7 @@ define @intrinsic_vwadd.w_wx_nxv2i16_nxv2i16_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv2i16_nxv2i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vwadd.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -812,7 +812,7 @@ define @intrinsic_vwadd.w_wx_nxv4i16_nxv4i16_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv4i16_nxv4i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vwadd.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vwadd.w_wx_nxv8i16_nxv8i16_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv8i16_nxv8i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vwadd.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vwadd.w_wx_nxv16i16_nxv16i16_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv16i16_nxv16i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vwadd.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vwadd.w_wx_nxv32i16_nxv32i16_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv32i16_nxv32i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vwadd.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vwadd.w_wx_nxv1i32_nxv1i32_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv1i32_nxv1i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vwadd.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1047,7 +1047,7 @@ define @intrinsic_vwadd.w_wx_nxv2i32_nxv2i32_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv2i32_nxv2i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vwadd.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1094,7 +1094,7 @@ define @intrinsic_vwadd.w_wx_nxv4i32_nxv4i32_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv4i32_nxv4i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vwadd.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1141,7 +1141,7 @@ define @intrinsic_vwadd.w_wx_nxv8i32_nxv8i32_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv8i32_nxv8i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vwadd.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1188,7 +1188,7 @@ define @intrinsic_vwadd.w_wx_nxv16i32_nxv16i32_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv16i32_nxv16i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vwadd.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1235,7 +1235,7 @@ define @intrinsic_vwadd.w_wx_nxv1i64_nxv1i64_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv1i64_nxv1i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vwadd.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1282,7 +1282,7 @@ define @intrinsic_vwadd.w_wx_nxv2i64_nxv2i64_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv2i64_nxv2i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vwadd.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1329,7 +1329,7 @@ define @intrinsic_vwadd.w_wx_nxv4i64_nxv4i64_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv4i64_nxv4i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vwadd.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1376,7 +1376,7 @@ define @intrinsic_vwadd.w_wx_nxv8i64_nxv8i64_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv8i64_nxv8i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vwadd.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1927,7 +1927,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv1i16_nxv1i16_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vwadd.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1944,7 +1944,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv2i16_nxv2i16_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vwadd.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1961,7 +1961,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv4i16_nxv4i16_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vwadd.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1978,7 +1978,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv8i16_nxv8i16_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vwadd.wv v12, v10, v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -1995,7 +1995,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv16i16_nxv16i16_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vwadd.wv v16, v12, v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -2012,7 +2012,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv32i16_nxv32i16_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwadd.wv v24, v16, v8 ; CHECK-NEXT: vmv8r.v v8, v24 ; CHECK-NEXT: ret @@ -2029,7 +2029,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv1i32_nxv1i32_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vwadd.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2046,7 +2046,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv2i32_nxv2i32_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vwadd.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2063,7 +2063,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv4i32_nxv4i32_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vwadd.wv v12, v10, v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -2080,7 +2080,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv8i32_nxv8i32_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vwadd.wv v16, v12, v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -2097,7 +2097,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv1i64_nxv1i64_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vwadd.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2114,7 +2114,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv2i64_nxv2i64_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vwadd.wv v12, v10, v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -2131,7 +2131,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv4i64_nxv4i64_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vwadd.wv v16, v12, v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -2148,7 +2148,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv8i64_nxv8i64_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vwadd.wv v24, v16, v8 ; CHECK-NEXT: vmv8r.v v8, v24 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vwadd.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vwadd.w_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vwadd.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vwadd.w_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vwadd.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vwadd.w_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vwadd.wv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vwadd.w_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vwadd.wv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vwadd.w_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwadd.wv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -293,7 +293,7 @@ define @intrinsic_vwadd.w_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vwadd.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vwadd.w_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vwadd.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vwadd.w_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vwadd.wv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vwadd.w_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vwadd.wv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vwadd.w_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vwadd.wv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -529,7 +529,7 @@ define @intrinsic_vwadd.w_wv_nxv1i64_nxv1i64_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vwadd.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -576,7 +576,7 @@ define @intrinsic_vwadd.w_wv_nxv2i64_nxv2i64_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vwadd.wv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vwadd.w_wv_nxv4i64_nxv4i64_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vwadd.wv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vwadd.w_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vwadd.wv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -718,7 +718,7 @@ define @intrinsic_vwadd.w_wx_nxv1i16_nxv1i16_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv1i16_nxv1i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vwadd.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -765,7 +765,7 @@ define @intrinsic_vwadd.w_wx_nxv2i16_nxv2i16_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv2i16_nxv2i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vwadd.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -812,7 +812,7 @@ define @intrinsic_vwadd.w_wx_nxv4i16_nxv4i16_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv4i16_nxv4i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vwadd.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vwadd.w_wx_nxv8i16_nxv8i16_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv8i16_nxv8i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vwadd.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vwadd.w_wx_nxv16i16_nxv16i16_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv16i16_nxv16i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vwadd.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vwadd.w_wx_nxv32i16_nxv32i16_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv32i16_nxv32i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vwadd.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vwadd.w_wx_nxv1i32_nxv1i32_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv1i32_nxv1i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vwadd.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1047,7 +1047,7 @@ define @intrinsic_vwadd.w_wx_nxv2i32_nxv2i32_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv2i32_nxv2i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vwadd.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1094,7 +1094,7 @@ define @intrinsic_vwadd.w_wx_nxv4i32_nxv4i32_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv4i32_nxv4i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vwadd.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1141,7 +1141,7 @@ define @intrinsic_vwadd.w_wx_nxv8i32_nxv8i32_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv8i32_nxv8i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vwadd.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1188,7 +1188,7 @@ define @intrinsic_vwadd.w_wx_nxv16i32_nxv16i32_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv16i32_nxv16i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vwadd.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1235,7 +1235,7 @@ define @intrinsic_vwadd.w_wx_nxv1i64_nxv1i64_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv1i64_nxv1i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vwadd.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1282,7 +1282,7 @@ define @intrinsic_vwadd.w_wx_nxv2i64_nxv2i64_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv2i64_nxv2i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vwadd.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1329,7 +1329,7 @@ define @intrinsic_vwadd.w_wx_nxv4i64_nxv4i64_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv4i64_nxv4i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vwadd.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1376,7 +1376,7 @@ define @intrinsic_vwadd.w_wx_nxv8i64_nxv8i64_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv8i64_nxv8i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vwadd.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1927,7 +1927,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv1i16_nxv1i16_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vwadd.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1944,7 +1944,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv2i16_nxv2i16_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vwadd.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1961,7 +1961,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv4i16_nxv4i16_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vwadd.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1978,7 +1978,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv8i16_nxv8i16_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vwadd.wv v12, v10, v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -1995,7 +1995,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv16i16_nxv16i16_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vwadd.wv v16, v12, v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -2012,7 +2012,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv32i16_nxv32i16_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwadd.wv v24, v16, v8 ; CHECK-NEXT: vmv8r.v v8, v24 ; CHECK-NEXT: ret @@ -2029,7 +2029,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv1i32_nxv1i32_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vwadd.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2046,7 +2046,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv2i32_nxv2i32_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vwadd.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2063,7 +2063,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv4i32_nxv4i32_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vwadd.wv v12, v10, v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -2080,7 +2080,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv8i32_nxv8i32_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vwadd.wv v16, v12, v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -2097,7 +2097,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv1i64_nxv1i64_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vwadd.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2114,7 +2114,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv2i64_nxv2i64_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vwadd.wv v12, v10, v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -2131,7 +2131,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv4i64_nxv4i64_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vwadd.wv v16, v12, v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -2148,7 +2148,7 @@ define @intrinsic_vwadd.w_wv_untie_nxv8i64_nxv8i64_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vwadd.wv v24, v16, v8 ; CHECK-NEXT: vmv8r.v v8, v24 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vwaddu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -58,7 +58,7 @@ define @intrinsic_vwaddu_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vwaddu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -106,7 +106,7 @@ define @intrinsic_vwaddu_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vwaddu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -154,7 +154,7 @@ define @intrinsic_vwaddu_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vwaddu.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -202,7 +202,7 @@ define @intrinsic_vwaddu_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vwaddu.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -250,7 +250,7 @@ define @intrinsic_vwaddu_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwaddu.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -298,7 +298,7 @@ define @intrinsic_vwaddu_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vwaddu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -346,7 +346,7 @@ define @intrinsic_vwaddu_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vwaddu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -394,7 +394,7 @@ define @intrinsic_vwaddu_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vwaddu.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -442,7 +442,7 @@ define @intrinsic_vwaddu_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vwaddu.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -490,7 +490,7 @@ define @intrinsic_vwaddu_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vwaddu.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -538,7 +538,7 @@ define @intrinsic_vwaddu_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vwaddu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -586,7 +586,7 @@ define @intrinsic_vwaddu_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vwaddu.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -634,7 +634,7 @@ define @intrinsic_vwaddu_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vwaddu.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -682,7 +682,7 @@ define @intrinsic_vwaddu_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vwaddu.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -730,7 +730,7 @@ define @intrinsic_vwaddu_vx_nxv1i16_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vwaddu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -778,7 +778,7 @@ define @intrinsic_vwaddu_vx_nxv2i16_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vwaddu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -826,7 +826,7 @@ define @intrinsic_vwaddu_vx_nxv4i16_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vwaddu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -874,7 +874,7 @@ define @intrinsic_vwaddu_vx_nxv8i16_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vwaddu.vx v10, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -922,7 +922,7 @@ define @intrinsic_vwaddu_vx_nxv16i16_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vwaddu.vx v12, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -970,7 +970,7 @@ define @intrinsic_vwaddu_vx_nxv32i16_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vwaddu.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -1018,7 +1018,7 @@ define @intrinsic_vwaddu_vx_nxv1i32_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vwaddu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1066,7 +1066,7 @@ define @intrinsic_vwaddu_vx_nxv2i32_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vwaddu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1114,7 +1114,7 @@ define @intrinsic_vwaddu_vx_nxv4i32_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vwaddu.vx v10, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -1162,7 +1162,7 @@ define @intrinsic_vwaddu_vx_nxv8i32_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vwaddu.vx v12, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -1210,7 +1210,7 @@ define @intrinsic_vwaddu_vx_nxv16i32_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vwaddu.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -1258,7 +1258,7 @@ define @intrinsic_vwaddu_vx_nxv1i64_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vwaddu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1306,7 +1306,7 @@ define @intrinsic_vwaddu_vx_nxv2i64_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vwaddu.vx v10, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -1354,7 +1354,7 @@ define @intrinsic_vwaddu_vx_nxv4i64_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vwaddu.vx v12, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -1402,7 +1402,7 @@ define @intrinsic_vwaddu_vx_nxv8i64_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vwaddu.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vwaddu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -58,7 +58,7 @@ define @intrinsic_vwaddu_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vwaddu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -106,7 +106,7 @@ define @intrinsic_vwaddu_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vwaddu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -154,7 +154,7 @@ define @intrinsic_vwaddu_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vwaddu.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -202,7 +202,7 @@ define @intrinsic_vwaddu_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vwaddu.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -250,7 +250,7 @@ define @intrinsic_vwaddu_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwaddu.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -298,7 +298,7 @@ define @intrinsic_vwaddu_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vwaddu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -346,7 +346,7 @@ define @intrinsic_vwaddu_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vwaddu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -394,7 +394,7 @@ define @intrinsic_vwaddu_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vwaddu.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -442,7 +442,7 @@ define @intrinsic_vwaddu_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vwaddu.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -490,7 +490,7 @@ define @intrinsic_vwaddu_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vwaddu.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -538,7 +538,7 @@ define @intrinsic_vwaddu_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vwaddu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -586,7 +586,7 @@ define @intrinsic_vwaddu_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vwaddu.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -634,7 +634,7 @@ define @intrinsic_vwaddu_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vwaddu.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -682,7 +682,7 @@ define @intrinsic_vwaddu_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vwaddu.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -730,7 +730,7 @@ define @intrinsic_vwaddu_vx_nxv1i16_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vwaddu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -778,7 +778,7 @@ define @intrinsic_vwaddu_vx_nxv2i16_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vwaddu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -826,7 +826,7 @@ define @intrinsic_vwaddu_vx_nxv4i16_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vwaddu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -874,7 +874,7 @@ define @intrinsic_vwaddu_vx_nxv8i16_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vwaddu.vx v10, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -922,7 +922,7 @@ define @intrinsic_vwaddu_vx_nxv16i16_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vwaddu.vx v12, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -970,7 +970,7 @@ define @intrinsic_vwaddu_vx_nxv32i16_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vwaddu.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -1018,7 +1018,7 @@ define @intrinsic_vwaddu_vx_nxv1i32_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vwaddu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1066,7 +1066,7 @@ define @intrinsic_vwaddu_vx_nxv2i32_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vwaddu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1114,7 +1114,7 @@ define @intrinsic_vwaddu_vx_nxv4i32_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vwaddu.vx v10, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -1162,7 +1162,7 @@ define @intrinsic_vwaddu_vx_nxv8i32_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vwaddu.vx v12, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -1210,7 +1210,7 @@ define @intrinsic_vwaddu_vx_nxv16i32_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vwaddu.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -1258,7 +1258,7 @@ define @intrinsic_vwaddu_vx_nxv1i64_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vwaddu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1306,7 +1306,7 @@ define @intrinsic_vwaddu_vx_nxv2i64_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vwaddu.vx v10, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -1354,7 +1354,7 @@ define @intrinsic_vwaddu_vx_nxv4i64_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vwaddu.vx v12, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -1402,7 +1402,7 @@ define @intrinsic_vwaddu_vx_nxv8i64_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vwaddu.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vwaddu.w_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vwaddu.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vwaddu.w_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vwaddu.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vwaddu.w_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vwaddu.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vwaddu.w_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vwaddu.wv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vwaddu.w_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vwaddu.wv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vwaddu.w_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwaddu.wv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -293,7 +293,7 @@ define @intrinsic_vwaddu.w_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vwaddu.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vwaddu.w_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vwaddu.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vwaddu.w_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vwaddu.wv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vwaddu.w_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vwaddu.wv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vwaddu.w_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vwaddu.wv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -529,7 +529,7 @@ define @intrinsic_vwaddu.w_wv_nxv1i64_nxv1i64_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vwaddu.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -576,7 +576,7 @@ define @intrinsic_vwaddu.w_wv_nxv2i64_nxv2i64_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vwaddu.wv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vwaddu.w_wv_nxv4i64_nxv4i64_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vwaddu.wv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vwaddu.w_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vwaddu.wv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -718,7 +718,7 @@ define @intrinsic_vwaddu.w_wx_nxv1i16_nxv1i16_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv1i16_nxv1i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vwaddu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -765,7 +765,7 @@ define @intrinsic_vwaddu.w_wx_nxv2i16_nxv2i16_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv2i16_nxv2i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vwaddu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -812,7 +812,7 @@ define @intrinsic_vwaddu.w_wx_nxv4i16_nxv4i16_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv4i16_nxv4i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vwaddu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vwaddu.w_wx_nxv8i16_nxv8i16_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv8i16_nxv8i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vwaddu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vwaddu.w_wx_nxv16i16_nxv16i16_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv16i16_nxv16i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vwaddu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vwaddu.w_wx_nxv32i16_nxv32i16_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv32i16_nxv32i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vwaddu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vwaddu.w_wx_nxv1i32_nxv1i32_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv1i32_nxv1i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vwaddu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1047,7 +1047,7 @@ define @intrinsic_vwaddu.w_wx_nxv2i32_nxv2i32_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv2i32_nxv2i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vwaddu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1094,7 +1094,7 @@ define @intrinsic_vwaddu.w_wx_nxv4i32_nxv4i32_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv4i32_nxv4i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vwaddu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1141,7 +1141,7 @@ define @intrinsic_vwaddu.w_wx_nxv8i32_nxv8i32_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv8i32_nxv8i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vwaddu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1188,7 +1188,7 @@ define @intrinsic_vwaddu.w_wx_nxv16i32_nxv16i32_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv16i32_nxv16i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vwaddu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1235,7 +1235,7 @@ define @intrinsic_vwaddu.w_wx_nxv1i64_nxv1i64_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv1i64_nxv1i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vwaddu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1282,7 +1282,7 @@ define @intrinsic_vwaddu.w_wx_nxv2i64_nxv2i64_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv2i64_nxv2i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vwaddu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1329,7 +1329,7 @@ define @intrinsic_vwaddu.w_wx_nxv4i64_nxv4i64_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv4i64_nxv4i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vwaddu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1376,7 +1376,7 @@ define @intrinsic_vwaddu.w_wx_nxv8i64_nxv8i64_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv8i64_nxv8i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vwaddu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1927,7 +1927,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv1i16_nxv1i16_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vwaddu.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1944,7 +1944,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv2i16_nxv2i16_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vwaddu.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1961,7 +1961,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv4i16_nxv4i16_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vwaddu.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1978,7 +1978,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv8i16_nxv8i16_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vwaddu.wv v12, v10, v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -1995,7 +1995,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv16i16_nxv16i16_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vwaddu.wv v16, v12, v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -2012,7 +2012,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv32i16_nxv32i16_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwaddu.wv v24, v16, v8 ; CHECK-NEXT: vmv8r.v v8, v24 ; CHECK-NEXT: ret @@ -2029,7 +2029,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv1i32_nxv1i32_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vwaddu.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2046,7 +2046,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv2i32_nxv2i32_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vwaddu.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2063,7 +2063,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv4i32_nxv4i32_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vwaddu.wv v12, v10, v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -2080,7 +2080,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv8i32_nxv8i32_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vwaddu.wv v16, v12, v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -2097,7 +2097,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv1i64_nxv1i64_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vwaddu.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2114,7 +2114,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv2i64_nxv2i64_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vwaddu.wv v12, v10, v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -2131,7 +2131,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv4i64_nxv4i64_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vwaddu.wv v16, v12, v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -2148,7 +2148,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv8i64_nxv8i64_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vwaddu.wv v24, v16, v8 ; CHECK-NEXT: vmv8r.v v8, v24 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vwaddu.w_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vwaddu.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vwaddu.w_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vwaddu.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vwaddu.w_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vwaddu.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vwaddu.w_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vwaddu.wv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vwaddu.w_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vwaddu.wv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vwaddu.w_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwaddu.wv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -293,7 +293,7 @@ define @intrinsic_vwaddu.w_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vwaddu.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vwaddu.w_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vwaddu.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vwaddu.w_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vwaddu.wv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vwaddu.w_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vwaddu.wv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vwaddu.w_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vwaddu.wv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -529,7 +529,7 @@ define @intrinsic_vwaddu.w_wv_nxv1i64_nxv1i64_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vwaddu.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -576,7 +576,7 @@ define @intrinsic_vwaddu.w_wv_nxv2i64_nxv2i64_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vwaddu.wv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vwaddu.w_wv_nxv4i64_nxv4i64_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vwaddu.wv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vwaddu.w_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vwaddu.wv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -718,7 +718,7 @@ define @intrinsic_vwaddu.w_wx_nxv1i16_nxv1i16_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv1i16_nxv1i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vwaddu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -765,7 +765,7 @@ define @intrinsic_vwaddu.w_wx_nxv2i16_nxv2i16_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv2i16_nxv2i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vwaddu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -812,7 +812,7 @@ define @intrinsic_vwaddu.w_wx_nxv4i16_nxv4i16_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv4i16_nxv4i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vwaddu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vwaddu.w_wx_nxv8i16_nxv8i16_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv8i16_nxv8i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vwaddu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vwaddu.w_wx_nxv16i16_nxv16i16_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv16i16_nxv16i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vwaddu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vwaddu.w_wx_nxv32i16_nxv32i16_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv32i16_nxv32i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vwaddu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vwaddu.w_wx_nxv1i32_nxv1i32_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv1i32_nxv1i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vwaddu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1047,7 +1047,7 @@ define @intrinsic_vwaddu.w_wx_nxv2i32_nxv2i32_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv2i32_nxv2i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vwaddu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1094,7 +1094,7 @@ define @intrinsic_vwaddu.w_wx_nxv4i32_nxv4i32_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv4i32_nxv4i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vwaddu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1141,7 +1141,7 @@ define @intrinsic_vwaddu.w_wx_nxv8i32_nxv8i32_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv8i32_nxv8i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vwaddu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1188,7 +1188,7 @@ define @intrinsic_vwaddu.w_wx_nxv16i32_nxv16i32_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv16i32_nxv16i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vwaddu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1235,7 +1235,7 @@ define @intrinsic_vwaddu.w_wx_nxv1i64_nxv1i64_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv1i64_nxv1i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vwaddu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1282,7 +1282,7 @@ define @intrinsic_vwaddu.w_wx_nxv2i64_nxv2i64_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv2i64_nxv2i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vwaddu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1329,7 +1329,7 @@ define @intrinsic_vwaddu.w_wx_nxv4i64_nxv4i64_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv4i64_nxv4i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vwaddu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1376,7 +1376,7 @@ define @intrinsic_vwaddu.w_wx_nxv8i64_nxv8i64_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv8i64_nxv8i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vwaddu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1927,7 +1927,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv1i16_nxv1i16_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vwaddu.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1944,7 +1944,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv2i16_nxv2i16_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vwaddu.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1961,7 +1961,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv4i16_nxv4i16_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vwaddu.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1978,7 +1978,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv8i16_nxv8i16_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vwaddu.wv v12, v10, v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -1995,7 +1995,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv16i16_nxv16i16_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vwaddu.wv v16, v12, v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -2012,7 +2012,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv32i16_nxv32i16_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwaddu.wv v24, v16, v8 ; CHECK-NEXT: vmv8r.v v8, v24 ; CHECK-NEXT: ret @@ -2029,7 +2029,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv1i32_nxv1i32_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vwaddu.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2046,7 +2046,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv2i32_nxv2i32_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vwaddu.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2063,7 +2063,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv4i32_nxv4i32_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vwaddu.wv v12, v10, v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -2080,7 +2080,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv8i32_nxv8i32_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vwaddu.wv v16, v12, v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -2097,7 +2097,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv1i64_nxv1i64_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vwaddu.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2114,7 +2114,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv2i64_nxv2i64_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vwaddu.wv v12, v10, v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -2131,7 +2131,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv4i64_nxv4i64_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vwaddu.wv v16, v12, v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -2148,7 +2148,7 @@ define @intrinsic_vwaddu.w_wv_untie_nxv8i64_nxv8i64_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_wv_untie_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vwaddu.wv v24, v16, v8 ; CHECK-NEXT: vmv8r.v v8, v24 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv32.ll @@ -11,7 +11,7 @@ define @intrinsic_vwmacc_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vwmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -58,7 +58,7 @@ define @intrinsic_vwmacc_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vwmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -105,7 +105,7 @@ define @intrinsic_vwmacc_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vwmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -152,7 +152,7 @@ define @intrinsic_vwmacc_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vwmacc.vv v8, v10, v11 ; CHECK-NEXT: ret entry: @@ -199,7 +199,7 @@ define @intrinsic_vwmacc_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vwmacc.vv v8, v12, v14 ; CHECK-NEXT: ret entry: @@ -246,7 +246,7 @@ define @intrinsic_vwmacc_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vwmacc.vv v8, v16, v20 ; CHECK-NEXT: ret entry: @@ -293,7 +293,7 @@ define @intrinsic_vwmacc_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vwmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vwmacc_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vwmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vwmacc_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vwmacc.vv v8, v10, v11 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vwmacc_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vwmacc.vv v8, v12, v14 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vwmacc_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vwmacc.vv v8, v16, v20 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vwmacc_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vwmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vwmacc_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vwmacc.vv v8, v10, v11 ; CHECK-NEXT: ret entry: @@ -622,7 +622,7 @@ define @intrinsic_vwmacc_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vwmacc.vv v8, v12, v14 ; CHECK-NEXT: ret entry: @@ -669,7 +669,7 @@ define @intrinsic_vwmacc_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vwmacc.vv v8, v16, v20 ; CHECK-NEXT: ret entry: @@ -716,7 +716,7 @@ define @intrinsic_vwmacc_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv1i16_i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma ; CHECK-NEXT: vwmacc.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -763,7 +763,7 @@ define @intrinsic_vwmacc_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv2i16_i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma ; CHECK-NEXT: vwmacc.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -810,7 +810,7 @@ define @intrinsic_vwmacc_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv4i16_i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma ; CHECK-NEXT: vwmacc.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -857,7 +857,7 @@ define @intrinsic_vwmacc_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv8i16_i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma ; CHECK-NEXT: vwmacc.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -904,7 +904,7 @@ define @intrinsic_vwmacc_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv16i16_i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, ma ; CHECK-NEXT: vwmacc.vx v8, a0, v12 ; CHECK-NEXT: ret entry: @@ -951,7 +951,7 @@ define @intrinsic_vwmacc_vx_nxv32i16_i8_nxv32i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv32i16_i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, ma ; CHECK-NEXT: vwmacc.vx v8, a0, v16 ; CHECK-NEXT: ret entry: @@ -998,7 +998,7 @@ define @intrinsic_vwmacc_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv1i32_i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma ; CHECK-NEXT: vwmacc.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1045,7 +1045,7 @@ define @intrinsic_vwmacc_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv2i32_i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma ; CHECK-NEXT: vwmacc.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1092,7 +1092,7 @@ define @intrinsic_vwmacc_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv4i32_i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma ; CHECK-NEXT: vwmacc.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -1139,7 +1139,7 @@ define @intrinsic_vwmacc_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv8i32_i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma ; CHECK-NEXT: vwmacc.vx v8, a0, v12 ; CHECK-NEXT: ret entry: @@ -1186,7 +1186,7 @@ define @intrinsic_vwmacc_vx_nxv16i32_i16_nxv16i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv16i32_i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma ; CHECK-NEXT: vwmacc.vx v8, a0, v16 ; CHECK-NEXT: ret entry: @@ -1233,7 +1233,7 @@ define @intrinsic_vwmacc_vx_nxv1i64_i32_nxv1i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv1i64_i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma ; CHECK-NEXT: vwmacc.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1280,7 +1280,7 @@ define @intrinsic_vwmacc_vx_nxv2i64_i32_nxv2i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv2i64_i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; CHECK-NEXT: vwmacc.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -1327,7 +1327,7 @@ define @intrinsic_vwmacc_vx_nxv4i64_i32_nxv4i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv4i64_i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma ; CHECK-NEXT: vwmacc.vx v8, a0, v12 ; CHECK-NEXT: ret entry: @@ -1374,7 +1374,7 @@ define @intrinsic_vwmacc_vx_nxv8i64_i32_nxv8i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv8i64_i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma ; CHECK-NEXT: vwmacc.vx v8, a0, v16 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv64.ll @@ -11,7 +11,7 @@ define @intrinsic_vwmacc_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vwmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -58,7 +58,7 @@ define @intrinsic_vwmacc_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vwmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -105,7 +105,7 @@ define @intrinsic_vwmacc_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vwmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -152,7 +152,7 @@ define @intrinsic_vwmacc_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vwmacc.vv v8, v10, v11 ; CHECK-NEXT: ret entry: @@ -199,7 +199,7 @@ define @intrinsic_vwmacc_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vwmacc.vv v8, v12, v14 ; CHECK-NEXT: ret entry: @@ -246,7 +246,7 @@ define @intrinsic_vwmacc_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vwmacc.vv v8, v16, v20 ; CHECK-NEXT: ret entry: @@ -293,7 +293,7 @@ define @intrinsic_vwmacc_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vwmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vwmacc_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vwmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vwmacc_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vwmacc.vv v8, v10, v11 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vwmacc_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vwmacc.vv v8, v12, v14 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vwmacc_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vwmacc.vv v8, v16, v20 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vwmacc_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vwmacc.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vwmacc_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vwmacc.vv v8, v10, v11 ; CHECK-NEXT: ret entry: @@ -622,7 +622,7 @@ define @intrinsic_vwmacc_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vwmacc.vv v8, v12, v14 ; CHECK-NEXT: ret entry: @@ -669,7 +669,7 @@ define @intrinsic_vwmacc_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vwmacc.vv v8, v16, v20 ; CHECK-NEXT: ret entry: @@ -716,7 +716,7 @@ define @intrinsic_vwmacc_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv1i16_i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma ; CHECK-NEXT: vwmacc.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -763,7 +763,7 @@ define @intrinsic_vwmacc_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv2i16_i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma ; CHECK-NEXT: vwmacc.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -810,7 +810,7 @@ define @intrinsic_vwmacc_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv4i16_i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma ; CHECK-NEXT: vwmacc.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -857,7 +857,7 @@ define @intrinsic_vwmacc_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv8i16_i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma ; CHECK-NEXT: vwmacc.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -904,7 +904,7 @@ define @intrinsic_vwmacc_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv16i16_i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, ma ; CHECK-NEXT: vwmacc.vx v8, a0, v12 ; CHECK-NEXT: ret entry: @@ -951,7 +951,7 @@ define @intrinsic_vwmacc_vx_nxv32i16_i8_nxv32i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv32i16_i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, ma ; CHECK-NEXT: vwmacc.vx v8, a0, v16 ; CHECK-NEXT: ret entry: @@ -998,7 +998,7 @@ define @intrinsic_vwmacc_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv1i32_i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma ; CHECK-NEXT: vwmacc.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1045,7 +1045,7 @@ define @intrinsic_vwmacc_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv2i32_i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma ; CHECK-NEXT: vwmacc.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1092,7 +1092,7 @@ define @intrinsic_vwmacc_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv4i32_i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma ; CHECK-NEXT: vwmacc.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -1139,7 +1139,7 @@ define @intrinsic_vwmacc_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv8i32_i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma ; CHECK-NEXT: vwmacc.vx v8, a0, v12 ; CHECK-NEXT: ret entry: @@ -1186,7 +1186,7 @@ define @intrinsic_vwmacc_vx_nxv16i32_i16_nxv16i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv16i32_i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma ; CHECK-NEXT: vwmacc.vx v8, a0, v16 ; CHECK-NEXT: ret entry: @@ -1233,7 +1233,7 @@ define @intrinsic_vwmacc_vx_nxv1i64_i32_nxv1i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv1i64_i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma ; CHECK-NEXT: vwmacc.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1280,7 +1280,7 @@ define @intrinsic_vwmacc_vx_nxv2i64_i32_nxv2i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv2i64_i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; CHECK-NEXT: vwmacc.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -1327,7 +1327,7 @@ define @intrinsic_vwmacc_vx_nxv4i64_i32_nxv4i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv4i64_i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma ; CHECK-NEXT: vwmacc.vx v8, a0, v12 ; CHECK-NEXT: ret entry: @@ -1374,7 +1374,7 @@ define @intrinsic_vwmacc_vx_nxv8i64_i32_nxv8i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_vx_nxv8i64_i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma ; CHECK-NEXT: vwmacc.vx v8, a0, v16 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmacc-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vwmacc-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmacc-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmacc-sdnode.ll @@ -7,7 +7,7 @@ define @vwmacc_vv_nxv1i32( %va, %vb, %vc) { ; CHECK-LABEL: vwmacc_vv_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vwmacc.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -22,7 +22,7 @@ define @vwmacc_vx_nxv1i32( %va, i32 %b, %vc) { ; CHECK-LABEL: vwmacc_vx_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vwmacc.vx v9, a0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -39,7 +39,7 @@ define @vwmaccu_vv_nxv1i32( %va, %vb, %vc) { ; CHECK-LABEL: vwmaccu_vv_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vwmaccu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -54,7 +54,7 @@ define @vwmaccu_vx_nxv1i32( %va, i32 %b, %vc) { ; CHECK-LABEL: vwmaccu_vx_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vwmaccu.vx v9, a0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -71,7 +71,7 @@ define @vwmaccsu_vv_nxv1i32( %va, %vb, %vc) { ; CHECK-LABEL: vwmaccsu_vv_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vwmaccsu.vv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -86,7 +86,7 @@ define @vwmaccsu_vx_nxv1i32( %va, i32 %b, %vc) { ; CHECK-LABEL: vwmaccsu_vx_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vwmaccsu.vx v9, a0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -103,7 +103,7 @@ define @vwmaccus_vx_nxv1i32( %va, i32 %b, %vc) { ; CHECK-LABEL: vwmaccus_vx_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vwmaccus.vx v9, a0, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -120,7 +120,7 @@ define @vwmacc_vv_nxv2i32( %va, %vb, %vc) { ; CHECK-LABEL: vwmacc_vv_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vwmacc.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -135,7 +135,7 @@ define @vwmacc_vx_nxv2i32( %va, i32 %b, %vc) { ; CHECK-LABEL: vwmacc_vx_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vwmacc.vx v10, a0, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -152,7 +152,7 @@ define @vwmaccu_vv_nxv2i32( %va, %vb, %vc) { ; CHECK-LABEL: vwmaccu_vv_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vwmaccu.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -167,7 +167,7 @@ define @vwmaccu_vx_nxv2i32( %va, i32 %b, %vc) { ; CHECK-LABEL: vwmaccu_vx_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vwmaccu.vx v10, a0, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -184,7 +184,7 @@ define @vwmaccsu_vv_nxv2i32( %va, %vb, %vc) { ; CHECK-LABEL: vwmaccsu_vv_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vwmaccsu.vv v10, v9, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -199,7 +199,7 @@ define @vwmaccsu_vx_nxv2i32( %va, i32 %b, %vc) { ; CHECK-LABEL: vwmaccsu_vx_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vwmaccsu.vx v10, a0, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -216,7 +216,7 @@ define @vwmaccus_vx_nxv2i32( %va, i32 %b, %vc) { ; CHECK-LABEL: vwmaccus_vx_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vwmaccus.vx v10, a0, v8 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -233,7 +233,7 @@ define @vwmacc_vv_nxv4i32( %va, %vb, %vc) { ; CHECK-LABEL: vwmacc_vv_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vwmacc.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -248,7 +248,7 @@ define @vwmacc_vx_nxv4i32( %va, i32 %b, %vc) { ; CHECK-LABEL: vwmacc_vx_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vwmacc.vx v12, a0, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -265,7 +265,7 @@ define @vwmaccu_vv_nxv4i32( %va, %vb, %vc) { ; CHECK-LABEL: vwmaccu_vv_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vwmaccu.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -280,7 +280,7 @@ define @vwmaccu_vx_nxv4i32( %va, i32 %b, %vc) { ; CHECK-LABEL: vwmaccu_vx_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vwmaccu.vx v12, a0, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -297,7 +297,7 @@ define @vwmaccsu_vv_nxv4i32( %va, %vb, %vc) { ; CHECK-LABEL: vwmaccsu_vv_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vwmaccsu.vv v12, v10, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -312,7 +312,7 @@ define @vwmaccsu_vx_nxv4i32( %va, i32 %b, %vc) { ; CHECK-LABEL: vwmaccsu_vx_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vwmaccsu.vx v12, a0, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -329,7 +329,7 @@ define @vwmaccus_vx_nxv4i32( %va, i32 %b, %vc) { ; CHECK-LABEL: vwmaccus_vx_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vwmaccus.vx v12, a0, v8 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -346,7 +346,7 @@ define @vwmacc_vv_nxv8i32( %va, %vb, %vc) { ; CHECK-LABEL: vwmacc_vv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vwmacc.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -361,7 +361,7 @@ define @vwmacc_vx_nxv8i32( %va, i32 %b, %vc) { ; CHECK-LABEL: vwmacc_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vwmacc.vx v16, a0, v8 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -378,7 +378,7 @@ define @vwmaccu_vv_nxv8i32( %va, %vb, %vc) { ; CHECK-LABEL: vwmaccu_vv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vwmaccu.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -393,7 +393,7 @@ define @vwmaccu_vx_nxv8i32( %va, i32 %b, %vc) { ; CHECK-LABEL: vwmaccu_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vwmaccu.vx v16, a0, v8 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -410,7 +410,7 @@ define @vwmaccsu_vv_nxv8i32( %va, %vb, %vc) { ; CHECK-LABEL: vwmaccsu_vv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vwmaccsu.vv v16, v12, v8 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -425,7 +425,7 @@ define @vwmaccsu_vx_nxv8i32( %va, i32 %b, %vc) { ; CHECK-LABEL: vwmaccsu_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vwmaccsu.vx v16, a0, v8 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -442,7 +442,7 @@ define @vwmaccus_vx_nxv8i32( %va, i32 %b, %vc) { ; CHECK-LABEL: vwmaccus_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vwmaccus.vx v16, a0, v8 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv32.ll @@ -11,7 +11,7 @@ define @intrinsic_vwmaccsu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vwmaccsu.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -58,7 +58,7 @@ define @intrinsic_vwmaccsu_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vwmaccsu.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -105,7 +105,7 @@ define @intrinsic_vwmaccsu_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vwmaccsu.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -152,7 +152,7 @@ define @intrinsic_vwmaccsu_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vwmaccsu.vv v8, v10, v11 ; CHECK-NEXT: ret entry: @@ -199,7 +199,7 @@ define @intrinsic_vwmaccsu_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vwmaccsu.vv v8, v12, v14 ; CHECK-NEXT: ret entry: @@ -246,7 +246,7 @@ define @intrinsic_vwmaccsu_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vwmaccsu.vv v8, v16, v20 ; CHECK-NEXT: ret entry: @@ -293,7 +293,7 @@ define @intrinsic_vwmaccsu_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vwmaccsu.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vwmaccsu_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vwmaccsu.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vwmaccsu_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vwmaccsu.vv v8, v10, v11 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vwmaccsu_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vwmaccsu.vv v8, v12, v14 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vwmaccsu_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vwmaccsu.vv v8, v16, v20 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vwmaccsu_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vwmaccsu.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vwmaccsu_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vwmaccsu.vv v8, v10, v11 ; CHECK-NEXT: ret entry: @@ -622,7 +622,7 @@ define @intrinsic_vwmaccsu_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vwmaccsu.vv v8, v12, v14 ; CHECK-NEXT: ret entry: @@ -669,7 +669,7 @@ define @intrinsic_vwmaccsu_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vwmaccsu.vv v8, v16, v20 ; CHECK-NEXT: ret entry: @@ -716,7 +716,7 @@ define @intrinsic_vwmaccsu_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv1i16_i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma ; CHECK-NEXT: vwmaccsu.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -763,7 +763,7 @@ define @intrinsic_vwmaccsu_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv2i16_i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma ; CHECK-NEXT: vwmaccsu.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -810,7 +810,7 @@ define @intrinsic_vwmaccsu_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv4i16_i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma ; CHECK-NEXT: vwmaccsu.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -857,7 +857,7 @@ define @intrinsic_vwmaccsu_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv8i16_i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma ; CHECK-NEXT: vwmaccsu.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -904,7 +904,7 @@ define @intrinsic_vwmaccsu_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv16i16_i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, ma ; CHECK-NEXT: vwmaccsu.vx v8, a0, v12 ; CHECK-NEXT: ret entry: @@ -951,7 +951,7 @@ define @intrinsic_vwmaccsu_vx_nxv32i16_i8_nxv32i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv32i16_i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, ma ; CHECK-NEXT: vwmaccsu.vx v8, a0, v16 ; CHECK-NEXT: ret entry: @@ -998,7 +998,7 @@ define @intrinsic_vwmaccsu_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv1i32_i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma ; CHECK-NEXT: vwmaccsu.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1045,7 +1045,7 @@ define @intrinsic_vwmaccsu_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv2i32_i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma ; CHECK-NEXT: vwmaccsu.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1092,7 +1092,7 @@ define @intrinsic_vwmaccsu_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv4i32_i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma ; CHECK-NEXT: vwmaccsu.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -1139,7 +1139,7 @@ define @intrinsic_vwmaccsu_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv8i32_i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma ; CHECK-NEXT: vwmaccsu.vx v8, a0, v12 ; CHECK-NEXT: ret entry: @@ -1186,7 +1186,7 @@ define @intrinsic_vwmaccsu_vx_nxv16i32_i16_nxv16i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv16i32_i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma ; CHECK-NEXT: vwmaccsu.vx v8, a0, v16 ; CHECK-NEXT: ret entry: @@ -1233,7 +1233,7 @@ define @intrinsic_vwmaccsu_vx_nxv1i64_i32_nxv1i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv1i64_i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma ; CHECK-NEXT: vwmaccsu.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1280,7 +1280,7 @@ define @intrinsic_vwmaccsu_vx_nxv2i64_i32_nxv2i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv2i64_i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; CHECK-NEXT: vwmaccsu.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -1327,7 +1327,7 @@ define @intrinsic_vwmaccsu_vx_nxv4i64_i32_nxv4i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv4i64_i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma ; CHECK-NEXT: vwmaccsu.vx v8, a0, v12 ; CHECK-NEXT: ret entry: @@ -1374,7 +1374,7 @@ define @intrinsic_vwmaccsu_vx_nxv8i64_i32_nxv8i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv8i64_i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma ; CHECK-NEXT: vwmaccsu.vx v8, a0, v16 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv64.ll @@ -11,7 +11,7 @@ define @intrinsic_vwmaccsu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vwmaccsu.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -58,7 +58,7 @@ define @intrinsic_vwmaccsu_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vwmaccsu.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -105,7 +105,7 @@ define @intrinsic_vwmaccsu_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vwmaccsu.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -152,7 +152,7 @@ define @intrinsic_vwmaccsu_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vwmaccsu.vv v8, v10, v11 ; CHECK-NEXT: ret entry: @@ -199,7 +199,7 @@ define @intrinsic_vwmaccsu_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vwmaccsu.vv v8, v12, v14 ; CHECK-NEXT: ret entry: @@ -246,7 +246,7 @@ define @intrinsic_vwmaccsu_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vwmaccsu.vv v8, v16, v20 ; CHECK-NEXT: ret entry: @@ -293,7 +293,7 @@ define @intrinsic_vwmaccsu_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vwmaccsu.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vwmaccsu_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vwmaccsu.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vwmaccsu_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vwmaccsu.vv v8, v10, v11 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vwmaccsu_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vwmaccsu.vv v8, v12, v14 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vwmaccsu_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vwmaccsu.vv v8, v16, v20 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vwmaccsu_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vwmaccsu.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vwmaccsu_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vwmaccsu.vv v8, v10, v11 ; CHECK-NEXT: ret entry: @@ -622,7 +622,7 @@ define @intrinsic_vwmaccsu_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vwmaccsu.vv v8, v12, v14 ; CHECK-NEXT: ret entry: @@ -669,7 +669,7 @@ define @intrinsic_vwmaccsu_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vwmaccsu.vv v8, v16, v20 ; CHECK-NEXT: ret entry: @@ -716,7 +716,7 @@ define @intrinsic_vwmaccsu_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv1i16_i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma ; CHECK-NEXT: vwmaccsu.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -763,7 +763,7 @@ define @intrinsic_vwmaccsu_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv2i16_i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma ; CHECK-NEXT: vwmaccsu.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -810,7 +810,7 @@ define @intrinsic_vwmaccsu_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv4i16_i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma ; CHECK-NEXT: vwmaccsu.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -857,7 +857,7 @@ define @intrinsic_vwmaccsu_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv8i16_i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma ; CHECK-NEXT: vwmaccsu.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -904,7 +904,7 @@ define @intrinsic_vwmaccsu_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv16i16_i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, ma ; CHECK-NEXT: vwmaccsu.vx v8, a0, v12 ; CHECK-NEXT: ret entry: @@ -951,7 +951,7 @@ define @intrinsic_vwmaccsu_vx_nxv32i16_i8_nxv32i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv32i16_i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, ma ; CHECK-NEXT: vwmaccsu.vx v8, a0, v16 ; CHECK-NEXT: ret entry: @@ -998,7 +998,7 @@ define @intrinsic_vwmaccsu_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv1i32_i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma ; CHECK-NEXT: vwmaccsu.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1045,7 +1045,7 @@ define @intrinsic_vwmaccsu_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv2i32_i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma ; CHECK-NEXT: vwmaccsu.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1092,7 +1092,7 @@ define @intrinsic_vwmaccsu_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv4i32_i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma ; CHECK-NEXT: vwmaccsu.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -1139,7 +1139,7 @@ define @intrinsic_vwmaccsu_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv8i32_i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma ; CHECK-NEXT: vwmaccsu.vx v8, a0, v12 ; CHECK-NEXT: ret entry: @@ -1186,7 +1186,7 @@ define @intrinsic_vwmaccsu_vx_nxv16i32_i16_nxv16i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv16i32_i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma ; CHECK-NEXT: vwmaccsu.vx v8, a0, v16 ; CHECK-NEXT: ret entry: @@ -1233,7 +1233,7 @@ define @intrinsic_vwmaccsu_vx_nxv1i64_i32_nxv1i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv1i64_i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma ; CHECK-NEXT: vwmaccsu.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1280,7 +1280,7 @@ define @intrinsic_vwmaccsu_vx_nxv2i64_i32_nxv2i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv2i64_i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; CHECK-NEXT: vwmaccsu.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -1327,7 +1327,7 @@ define @intrinsic_vwmaccsu_vx_nxv4i64_i32_nxv4i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv4i64_i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma ; CHECK-NEXT: vwmaccsu.vx v8, a0, v12 ; CHECK-NEXT: ret entry: @@ -1374,7 +1374,7 @@ define @intrinsic_vwmaccsu_vx_nxv8i64_i32_nxv8i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv8i64_i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma ; CHECK-NEXT: vwmaccsu.vx v8, a0, v16 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv32.ll @@ -11,7 +11,7 @@ define @intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vwmaccu.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -58,7 +58,7 @@ define @intrinsic_vwmaccu_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vwmaccu.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -105,7 +105,7 @@ define @intrinsic_vwmaccu_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vwmaccu.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -152,7 +152,7 @@ define @intrinsic_vwmaccu_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vwmaccu.vv v8, v10, v11 ; CHECK-NEXT: ret entry: @@ -199,7 +199,7 @@ define @intrinsic_vwmaccu_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vwmaccu.vv v8, v12, v14 ; CHECK-NEXT: ret entry: @@ -246,7 +246,7 @@ define @intrinsic_vwmaccu_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vwmaccu.vv v8, v16, v20 ; CHECK-NEXT: ret entry: @@ -293,7 +293,7 @@ define @intrinsic_vwmaccu_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vwmaccu.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vwmaccu_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vwmaccu.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vwmaccu_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vwmaccu.vv v8, v10, v11 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vwmaccu_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vwmaccu.vv v8, v12, v14 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vwmaccu_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vwmaccu.vv v8, v16, v20 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vwmaccu_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vwmaccu.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vwmaccu_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vwmaccu.vv v8, v10, v11 ; CHECK-NEXT: ret entry: @@ -622,7 +622,7 @@ define @intrinsic_vwmaccu_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vwmaccu.vv v8, v12, v14 ; CHECK-NEXT: ret entry: @@ -669,7 +669,7 @@ define @intrinsic_vwmaccu_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vwmaccu.vv v8, v16, v20 ; CHECK-NEXT: ret entry: @@ -716,7 +716,7 @@ define @intrinsic_vwmaccu_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv1i16_i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma ; CHECK-NEXT: vwmaccu.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -763,7 +763,7 @@ define @intrinsic_vwmaccu_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv2i16_i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma ; CHECK-NEXT: vwmaccu.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -810,7 +810,7 @@ define @intrinsic_vwmaccu_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv4i16_i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma ; CHECK-NEXT: vwmaccu.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -857,7 +857,7 @@ define @intrinsic_vwmaccu_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv8i16_i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma ; CHECK-NEXT: vwmaccu.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -904,7 +904,7 @@ define @intrinsic_vwmaccu_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv16i16_i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, ma ; CHECK-NEXT: vwmaccu.vx v8, a0, v12 ; CHECK-NEXT: ret entry: @@ -951,7 +951,7 @@ define @intrinsic_vwmaccu_vx_nxv32i16_i8_nxv32i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv32i16_i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, ma ; CHECK-NEXT: vwmaccu.vx v8, a0, v16 ; CHECK-NEXT: ret entry: @@ -998,7 +998,7 @@ define @intrinsic_vwmaccu_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv1i32_i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma ; CHECK-NEXT: vwmaccu.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1045,7 +1045,7 @@ define @intrinsic_vwmaccu_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv2i32_i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma ; CHECK-NEXT: vwmaccu.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1092,7 +1092,7 @@ define @intrinsic_vwmaccu_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv4i32_i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma ; CHECK-NEXT: vwmaccu.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -1139,7 +1139,7 @@ define @intrinsic_vwmaccu_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv8i32_i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma ; CHECK-NEXT: vwmaccu.vx v8, a0, v12 ; CHECK-NEXT: ret entry: @@ -1186,7 +1186,7 @@ define @intrinsic_vwmaccu_vx_nxv16i32_i16_nxv16i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv16i32_i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma ; CHECK-NEXT: vwmaccu.vx v8, a0, v16 ; CHECK-NEXT: ret entry: @@ -1233,7 +1233,7 @@ define @intrinsic_vwmaccu_vx_nxv1i64_i32_nxv1i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv1i64_i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma ; CHECK-NEXT: vwmaccu.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1280,7 +1280,7 @@ define @intrinsic_vwmaccu_vx_nxv2i64_i32_nxv2i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv2i64_i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; CHECK-NEXT: vwmaccu.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -1327,7 +1327,7 @@ define @intrinsic_vwmaccu_vx_nxv4i64_i32_nxv4i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv4i64_i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma ; CHECK-NEXT: vwmaccu.vx v8, a0, v12 ; CHECK-NEXT: ret entry: @@ -1374,7 +1374,7 @@ define @intrinsic_vwmaccu_vx_nxv8i64_i32_nxv8i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv8i64_i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma ; CHECK-NEXT: vwmaccu.vx v8, a0, v16 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv64.ll @@ -11,7 +11,7 @@ define @intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vwmaccu.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -58,7 +58,7 @@ define @intrinsic_vwmaccu_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vwmaccu.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -105,7 +105,7 @@ define @intrinsic_vwmaccu_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vwmaccu.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -152,7 +152,7 @@ define @intrinsic_vwmaccu_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vwmaccu.vv v8, v10, v11 ; CHECK-NEXT: ret entry: @@ -199,7 +199,7 @@ define @intrinsic_vwmaccu_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vwmaccu.vv v8, v12, v14 ; CHECK-NEXT: ret entry: @@ -246,7 +246,7 @@ define @intrinsic_vwmaccu_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vwmaccu.vv v8, v16, v20 ; CHECK-NEXT: ret entry: @@ -293,7 +293,7 @@ define @intrinsic_vwmaccu_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vwmaccu.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vwmaccu_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vwmaccu.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vwmaccu_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vwmaccu.vv v8, v10, v11 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vwmaccu_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vwmaccu.vv v8, v12, v14 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vwmaccu_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vwmaccu.vv v8, v16, v20 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vwmaccu_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vwmaccu.vv v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vwmaccu_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vwmaccu.vv v8, v10, v11 ; CHECK-NEXT: ret entry: @@ -622,7 +622,7 @@ define @intrinsic_vwmaccu_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vwmaccu.vv v8, v12, v14 ; CHECK-NEXT: ret entry: @@ -669,7 +669,7 @@ define @intrinsic_vwmaccu_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vwmaccu.vv v8, v16, v20 ; CHECK-NEXT: ret entry: @@ -716,7 +716,7 @@ define @intrinsic_vwmaccu_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv1i16_i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma ; CHECK-NEXT: vwmaccu.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -763,7 +763,7 @@ define @intrinsic_vwmaccu_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv2i16_i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma ; CHECK-NEXT: vwmaccu.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -810,7 +810,7 @@ define @intrinsic_vwmaccu_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv4i16_i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma ; CHECK-NEXT: vwmaccu.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -857,7 +857,7 @@ define @intrinsic_vwmaccu_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv8i16_i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma ; CHECK-NEXT: vwmaccu.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -904,7 +904,7 @@ define @intrinsic_vwmaccu_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv16i16_i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, ma ; CHECK-NEXT: vwmaccu.vx v8, a0, v12 ; CHECK-NEXT: ret entry: @@ -951,7 +951,7 @@ define @intrinsic_vwmaccu_vx_nxv32i16_i8_nxv32i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv32i16_i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, ma ; CHECK-NEXT: vwmaccu.vx v8, a0, v16 ; CHECK-NEXT: ret entry: @@ -998,7 +998,7 @@ define @intrinsic_vwmaccu_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv1i32_i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma ; CHECK-NEXT: vwmaccu.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1045,7 +1045,7 @@ define @intrinsic_vwmaccu_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv2i32_i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma ; CHECK-NEXT: vwmaccu.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1092,7 +1092,7 @@ define @intrinsic_vwmaccu_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv4i32_i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma ; CHECK-NEXT: vwmaccu.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -1139,7 +1139,7 @@ define @intrinsic_vwmaccu_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv8i32_i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma ; CHECK-NEXT: vwmaccu.vx v8, a0, v12 ; CHECK-NEXT: ret entry: @@ -1186,7 +1186,7 @@ define @intrinsic_vwmaccu_vx_nxv16i32_i16_nxv16i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv16i32_i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma ; CHECK-NEXT: vwmaccu.vx v8, a0, v16 ; CHECK-NEXT: ret entry: @@ -1233,7 +1233,7 @@ define @intrinsic_vwmaccu_vx_nxv1i64_i32_nxv1i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv1i64_i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma ; CHECK-NEXT: vwmaccu.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -1280,7 +1280,7 @@ define @intrinsic_vwmaccu_vx_nxv2i64_i32_nxv2i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv2i64_i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; CHECK-NEXT: vwmaccu.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -1327,7 +1327,7 @@ define @intrinsic_vwmaccu_vx_nxv4i64_i32_nxv4i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv4i64_i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma ; CHECK-NEXT: vwmaccu.vx v8, a0, v12 ; CHECK-NEXT: ret entry: @@ -1374,7 +1374,7 @@ define @intrinsic_vwmaccu_vx_nxv8i64_i32_nxv8i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv8i64_i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma ; CHECK-NEXT: vwmaccu.vx v8, a0, v16 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv32.ll @@ -11,7 +11,7 @@ define @intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma ; CHECK-NEXT: vwmaccus.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -58,7 +58,7 @@ define @intrinsic_vwmaccus_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv2i16_i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma ; CHECK-NEXT: vwmaccus.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -105,7 +105,7 @@ define @intrinsic_vwmaccus_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv4i16_i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma ; CHECK-NEXT: vwmaccus.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -152,7 +152,7 @@ define @intrinsic_vwmaccus_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv8i16_i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma ; CHECK-NEXT: vwmaccus.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -199,7 +199,7 @@ define @intrinsic_vwmaccus_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv16i16_i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, ma ; CHECK-NEXT: vwmaccus.vx v8, a0, v12 ; CHECK-NEXT: ret entry: @@ -246,7 +246,7 @@ define @intrinsic_vwmaccus_vx_nxv32i16_i8_nxv32i8( %0, i8 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv32i16_i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, ma ; CHECK-NEXT: vwmaccus.vx v8, a0, v16 ; CHECK-NEXT: ret entry: @@ -293,7 +293,7 @@ define @intrinsic_vwmaccus_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv1i32_i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma ; CHECK-NEXT: vwmaccus.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vwmaccus_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv2i32_i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma ; CHECK-NEXT: vwmaccus.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vwmaccus_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv4i32_i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma ; CHECK-NEXT: vwmaccus.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vwmaccus_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv8i32_i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma ; CHECK-NEXT: vwmaccus.vx v8, a0, v12 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vwmaccus_vx_nxv16i32_i16_nxv16i16( %0, i16 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv16i32_i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma ; CHECK-NEXT: vwmaccus.vx v8, a0, v16 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vwmaccus_vx_nxv1i64_i32_nxv1i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv1i64_i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma ; CHECK-NEXT: vwmaccus.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vwmaccus_vx_nxv2i64_i32_nxv2i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv2i64_i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; CHECK-NEXT: vwmaccus.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -622,7 +622,7 @@ define @intrinsic_vwmaccus_vx_nxv4i64_i32_nxv4i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv4i64_i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma ; CHECK-NEXT: vwmaccus.vx v8, a0, v12 ; CHECK-NEXT: ret entry: @@ -669,7 +669,7 @@ define @intrinsic_vwmaccus_vx_nxv8i64_i32_nxv8i32( %0, i32 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv8i64_i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma ; CHECK-NEXT: vwmaccus.vx v8, a0, v16 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv64.ll @@ -11,7 +11,7 @@ define @intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma ; CHECK-NEXT: vwmaccus.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -58,7 +58,7 @@ define @intrinsic_vwmaccus_vx_nxv2i16_i8_nxv2i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv2i16_i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma ; CHECK-NEXT: vwmaccus.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -105,7 +105,7 @@ define @intrinsic_vwmaccus_vx_nxv4i16_i8_nxv4i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv4i16_i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma ; CHECK-NEXT: vwmaccus.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -152,7 +152,7 @@ define @intrinsic_vwmaccus_vx_nxv8i16_i8_nxv8i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv8i16_i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma ; CHECK-NEXT: vwmaccus.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -199,7 +199,7 @@ define @intrinsic_vwmaccus_vx_nxv16i16_i8_nxv16i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv16i16_i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, ma ; CHECK-NEXT: vwmaccus.vx v8, a0, v12 ; CHECK-NEXT: ret entry: @@ -246,7 +246,7 @@ define @intrinsic_vwmaccus_vx_nxv32i16_i8_nxv32i8( %0, i8 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv32i16_i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, ma ; CHECK-NEXT: vwmaccus.vx v8, a0, v16 ; CHECK-NEXT: ret entry: @@ -293,7 +293,7 @@ define @intrinsic_vwmaccus_vx_nxv1i32_i16_nxv1i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv1i32_i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma ; CHECK-NEXT: vwmaccus.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vwmaccus_vx_nxv2i32_i16_nxv2i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv2i32_i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma ; CHECK-NEXT: vwmaccus.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vwmaccus_vx_nxv4i32_i16_nxv4i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv4i32_i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma ; CHECK-NEXT: vwmaccus.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vwmaccus_vx_nxv8i32_i16_nxv8i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv8i32_i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma ; CHECK-NEXT: vwmaccus.vx v8, a0, v12 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vwmaccus_vx_nxv16i32_i16_nxv16i16( %0, i16 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv16i32_i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma ; CHECK-NEXT: vwmaccus.vx v8, a0, v16 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vwmaccus_vx_nxv1i64_i32_nxv1i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv1i64_i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma ; CHECK-NEXT: vwmaccus.vx v8, a0, v9 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vwmaccus_vx_nxv2i64_i32_nxv2i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv2i64_i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; CHECK-NEXT: vwmaccus.vx v8, a0, v10 ; CHECK-NEXT: ret entry: @@ -622,7 +622,7 @@ define @intrinsic_vwmaccus_vx_nxv4i64_i32_nxv4i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv4i64_i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma ; CHECK-NEXT: vwmaccus.vx v8, a0, v12 ; CHECK-NEXT: ret entry: @@ -669,7 +669,7 @@ define @intrinsic_vwmaccus_vx_nxv8i64_i32_nxv8i32( %0, i32 %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv8i64_i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma ; CHECK-NEXT: vwmaccus.vx v8, a0, v16 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmul-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vwmul.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -58,7 +58,7 @@ define @intrinsic_vwmul_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vwmul.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -106,7 +106,7 @@ define @intrinsic_vwmul_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vwmul.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -154,7 +154,7 @@ define @intrinsic_vwmul_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vwmul.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -202,7 +202,7 @@ define @intrinsic_vwmul_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vwmul.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -250,7 +250,7 @@ define @intrinsic_vwmul_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwmul.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -298,7 +298,7 @@ define @intrinsic_vwmul_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vwmul.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -346,7 +346,7 @@ define @intrinsic_vwmul_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vwmul.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -394,7 +394,7 @@ define @intrinsic_vwmul_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vwmul.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -442,7 +442,7 @@ define @intrinsic_vwmul_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vwmul.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -490,7 +490,7 @@ define @intrinsic_vwmul_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vwmul.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -538,7 +538,7 @@ define @intrinsic_vwmul_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vwmul.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -586,7 +586,7 @@ define @intrinsic_vwmul_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vwmul.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -634,7 +634,7 @@ define @intrinsic_vwmul_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vwmul.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -682,7 +682,7 @@ define @intrinsic_vwmul_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vwmul.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -730,7 +730,7 @@ define @intrinsic_vwmul_vx_nxv1i16_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vwmul.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -778,7 +778,7 @@ define @intrinsic_vwmul_vx_nxv2i16_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vwmul.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -826,7 +826,7 @@ define @intrinsic_vwmul_vx_nxv4i16_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vwmul.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -874,7 +874,7 @@ define @intrinsic_vwmul_vx_nxv8i16_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vwmul.vx v10, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -922,7 +922,7 @@ define @intrinsic_vwmul_vx_nxv16i16_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vwmul.vx v12, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -970,7 +970,7 @@ define @intrinsic_vwmul_vx_nxv32i16_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vwmul.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -1018,7 +1018,7 @@ define @intrinsic_vwmul_vx_nxv1i32_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vwmul.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1066,7 +1066,7 @@ define @intrinsic_vwmul_vx_nxv2i32_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vwmul.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1114,7 +1114,7 @@ define @intrinsic_vwmul_vx_nxv4i32_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vwmul.vx v10, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -1162,7 +1162,7 @@ define @intrinsic_vwmul_vx_nxv8i32_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vwmul.vx v12, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -1210,7 +1210,7 @@ define @intrinsic_vwmul_vx_nxv16i32_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vwmul.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -1258,7 +1258,7 @@ define @intrinsic_vwmul_vx_nxv1i64_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vwmul.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1306,7 +1306,7 @@ define @intrinsic_vwmul_vx_nxv2i64_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vwmul.vx v10, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -1354,7 +1354,7 @@ define @intrinsic_vwmul_vx_nxv4i64_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vwmul.vx v12, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -1402,7 +1402,7 @@ define @intrinsic_vwmul_vx_nxv8i64_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vwmul.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmul-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vwmul.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -58,7 +58,7 @@ define @intrinsic_vwmul_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vwmul.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -106,7 +106,7 @@ define @intrinsic_vwmul_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vwmul.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -154,7 +154,7 @@ define @intrinsic_vwmul_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vwmul.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -202,7 +202,7 @@ define @intrinsic_vwmul_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vwmul.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -250,7 +250,7 @@ define @intrinsic_vwmul_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwmul.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -298,7 +298,7 @@ define @intrinsic_vwmul_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vwmul.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -346,7 +346,7 @@ define @intrinsic_vwmul_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vwmul.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -394,7 +394,7 @@ define @intrinsic_vwmul_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vwmul.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -442,7 +442,7 @@ define @intrinsic_vwmul_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vwmul.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -490,7 +490,7 @@ define @intrinsic_vwmul_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vwmul.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -538,7 +538,7 @@ define @intrinsic_vwmul_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vwmul.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -586,7 +586,7 @@ define @intrinsic_vwmul_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vwmul.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -634,7 +634,7 @@ define @intrinsic_vwmul_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vwmul.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -682,7 +682,7 @@ define @intrinsic_vwmul_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vwmul.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -730,7 +730,7 @@ define @intrinsic_vwmul_vx_nxv1i16_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vwmul.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -778,7 +778,7 @@ define @intrinsic_vwmul_vx_nxv2i16_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vwmul.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -826,7 +826,7 @@ define @intrinsic_vwmul_vx_nxv4i16_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vwmul.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -874,7 +874,7 @@ define @intrinsic_vwmul_vx_nxv8i16_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vwmul.vx v10, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -922,7 +922,7 @@ define @intrinsic_vwmul_vx_nxv16i16_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vwmul.vx v12, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -970,7 +970,7 @@ define @intrinsic_vwmul_vx_nxv32i16_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vwmul.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -1018,7 +1018,7 @@ define @intrinsic_vwmul_vx_nxv1i32_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vwmul.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1066,7 +1066,7 @@ define @intrinsic_vwmul_vx_nxv2i32_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vwmul.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1114,7 +1114,7 @@ define @intrinsic_vwmul_vx_nxv4i32_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vwmul.vx v10, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -1162,7 +1162,7 @@ define @intrinsic_vwmul_vx_nxv8i32_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vwmul.vx v12, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -1210,7 +1210,7 @@ define @intrinsic_vwmul_vx_nxv16i32_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vwmul.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -1258,7 +1258,7 @@ define @intrinsic_vwmul_vx_nxv1i64_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vwmul.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1306,7 +1306,7 @@ define @intrinsic_vwmul_vx_nxv2i64_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vwmul.vx v10, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -1354,7 +1354,7 @@ define @intrinsic_vwmul_vx_nxv4i64_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vwmul.vx v12, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -1402,7 +1402,7 @@ define @intrinsic_vwmul_vx_nxv8i64_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmul_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vwmul.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmul-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vwmul-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmul-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmul-sdnode.ll @@ -5,7 +5,7 @@ define @vwmul_vv_nxv1i64( %va, %vb) { ; CHECK-LABEL: vwmul_vv_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vwmul.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -18,7 +18,7 @@ define @vwmulu_vv_nxv1i64( %va, %vb) { ; CHECK-LABEL: vwmulu_vv_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vwmulu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -31,7 +31,7 @@ define @vwmulsu_vv_nxv1i64( %va, %vb) { ; CHECK-LABEL: vwmulsu_vv_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vwmulsu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -44,7 +44,7 @@ define @vwmul_vx_nxv1i64( %va, i32 %b) { ; CHECK-LABEL: vwmul_vx_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vwmul.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -59,7 +59,7 @@ define @vwmulu_vx_nxv1i64( %va, i32 %b) { ; CHECK-LABEL: vwmulu_vx_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vwmulu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -74,7 +74,7 @@ define @vwmulsu_vx_nxv1i64( %va, i32 %b) { ; CHECK-LABEL: vwmulsu_vx_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vwmulsu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -89,7 +89,7 @@ define @vwmul_vv_nxv2i64( %va, %vb) { ; CHECK-LABEL: vwmul_vv_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vwmul.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -102,7 +102,7 @@ define @vwmulu_vv_nxv2i64( %va, %vb) { ; CHECK-LABEL: vwmulu_vv_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vwmulu.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -115,7 +115,7 @@ define @vwmulsu_vv_nxv2i64( %va, %vb) { ; CHECK-LABEL: vwmulsu_vv_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vwmulsu.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -128,7 +128,7 @@ define @vwmul_vx_nxv2i64( %va, i32 %b) { ; CHECK-LABEL: vwmul_vx_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vwmul.vx v10, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -143,7 +143,7 @@ define @vwmulu_vx_nxv2i64( %va, i32 %b) { ; CHECK-LABEL: vwmulu_vx_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vwmulu.vx v10, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -158,7 +158,7 @@ define @vwmulsu_vx_nxv2i64( %va, i32 %b) { ; CHECK-LABEL: vwmulsu_vx_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vwmulsu.vx v10, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -173,7 +173,7 @@ define @vwmul_vv_nxv4i64( %va, %vb) { ; CHECK-LABEL: vwmul_vv_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vwmul.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -186,7 +186,7 @@ define @vwmulu_vv_nxv4i64( %va, %vb) { ; CHECK-LABEL: vwmulu_vv_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vwmulu.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -199,7 +199,7 @@ define @vwmulsu_vv_nxv4i64( %va, %vb) { ; CHECK-LABEL: vwmulsu_vv_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vwmulsu.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -212,7 +212,7 @@ define @vwmul_vx_nxv4i64( %va, i32 %b) { ; CHECK-LABEL: vwmul_vx_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vwmul.vx v12, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -227,7 +227,7 @@ define @vwmulu_vx_nxv4i64( %va, i32 %b) { ; CHECK-LABEL: vwmulu_vx_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vwmulu.vx v12, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -242,7 +242,7 @@ define @vwmulsu_vx_nxv4i64( %va, i32 %b) { ; CHECK-LABEL: vwmulsu_vx_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vwmulsu.vx v12, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -257,7 +257,7 @@ define @vwmul_vv_nxv8i64( %va, %vb) { ; CHECK-LABEL: vwmul_vv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vwmul.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -270,7 +270,7 @@ define @vwmulu_vv_nxv8i64( %va, %vb) { ; CHECK-LABEL: vwmulu_vv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vwmulu.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -283,7 +283,7 @@ define @vwmulsu_vv_nxv8i64( %va, %vb) { ; CHECK-LABEL: vwmulsu_vv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vwmulsu.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -296,7 +296,7 @@ define @vwmul_vx_nxv8i64( %va, i32 %b) { ; CHECK-LABEL: vwmul_vx_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vwmul.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -311,7 +311,7 @@ define @vwmulu_vx_nxv8i64( %va, i32 %b) { ; CHECK-LABEL: vwmulu_vx_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vwmulu.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -326,7 +326,7 @@ define @vwmulsu_vx_nxv8i64( %va, i32 %b) { ; CHECK-LABEL: vwmulsu_vx_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vwmulsu.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vwmulsu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -58,7 +58,7 @@ define @intrinsic_vwmulsu_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vwmulsu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -106,7 +106,7 @@ define @intrinsic_vwmulsu_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vwmulsu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -154,7 +154,7 @@ define @intrinsic_vwmulsu_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vwmulsu.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -202,7 +202,7 @@ define @intrinsic_vwmulsu_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vwmulsu.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -250,7 +250,7 @@ define @intrinsic_vwmulsu_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwmulsu.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -298,7 +298,7 @@ define @intrinsic_vwmulsu_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vwmulsu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -346,7 +346,7 @@ define @intrinsic_vwmulsu_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vwmulsu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -394,7 +394,7 @@ define @intrinsic_vwmulsu_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vwmulsu.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -442,7 +442,7 @@ define @intrinsic_vwmulsu_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vwmulsu.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -490,7 +490,7 @@ define @intrinsic_vwmulsu_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vwmulsu.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -538,7 +538,7 @@ define @intrinsic_vwmulsu_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vwmulsu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -586,7 +586,7 @@ define @intrinsic_vwmulsu_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vwmulsu.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -634,7 +634,7 @@ define @intrinsic_vwmulsu_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vwmulsu.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -682,7 +682,7 @@ define @intrinsic_vwmulsu_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vwmulsu.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -730,7 +730,7 @@ define @intrinsic_vwmulsu_vx_nxv1i16_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vwmulsu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -778,7 +778,7 @@ define @intrinsic_vwmulsu_vx_nxv2i16_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vwmulsu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -826,7 +826,7 @@ define @intrinsic_vwmulsu_vx_nxv4i16_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vwmulsu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -874,7 +874,7 @@ define @intrinsic_vwmulsu_vx_nxv8i16_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vwmulsu.vx v10, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -922,7 +922,7 @@ define @intrinsic_vwmulsu_vx_nxv16i16_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vwmulsu.vx v12, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -970,7 +970,7 @@ define @intrinsic_vwmulsu_vx_nxv32i16_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vwmulsu.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -1018,7 +1018,7 @@ define @intrinsic_vwmulsu_vx_nxv1i32_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vwmulsu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1066,7 +1066,7 @@ define @intrinsic_vwmulsu_vx_nxv2i32_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vwmulsu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1114,7 +1114,7 @@ define @intrinsic_vwmulsu_vx_nxv4i32_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vwmulsu.vx v10, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -1162,7 +1162,7 @@ define @intrinsic_vwmulsu_vx_nxv8i32_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vwmulsu.vx v12, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -1210,7 +1210,7 @@ define @intrinsic_vwmulsu_vx_nxv16i32_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vwmulsu.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -1258,7 +1258,7 @@ define @intrinsic_vwmulsu_vx_nxv1i64_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vwmulsu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1306,7 +1306,7 @@ define @intrinsic_vwmulsu_vx_nxv2i64_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vwmulsu.vx v10, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -1354,7 +1354,7 @@ define @intrinsic_vwmulsu_vx_nxv4i64_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vwmulsu.vx v12, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -1402,7 +1402,7 @@ define @intrinsic_vwmulsu_vx_nxv8i64_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vwmulsu.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmulsu-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vwmulsu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -58,7 +58,7 @@ define @intrinsic_vwmulsu_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vwmulsu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -106,7 +106,7 @@ define @intrinsic_vwmulsu_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vwmulsu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -154,7 +154,7 @@ define @intrinsic_vwmulsu_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vwmulsu.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -202,7 +202,7 @@ define @intrinsic_vwmulsu_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vwmulsu.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -250,7 +250,7 @@ define @intrinsic_vwmulsu_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwmulsu.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -298,7 +298,7 @@ define @intrinsic_vwmulsu_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vwmulsu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -346,7 +346,7 @@ define @intrinsic_vwmulsu_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vwmulsu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -394,7 +394,7 @@ define @intrinsic_vwmulsu_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vwmulsu.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -442,7 +442,7 @@ define @intrinsic_vwmulsu_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vwmulsu.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -490,7 +490,7 @@ define @intrinsic_vwmulsu_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vwmulsu.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -538,7 +538,7 @@ define @intrinsic_vwmulsu_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vwmulsu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -586,7 +586,7 @@ define @intrinsic_vwmulsu_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vwmulsu.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -634,7 +634,7 @@ define @intrinsic_vwmulsu_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vwmulsu.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -682,7 +682,7 @@ define @intrinsic_vwmulsu_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vwmulsu.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -730,7 +730,7 @@ define @intrinsic_vwmulsu_vx_nxv1i16_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vwmulsu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -778,7 +778,7 @@ define @intrinsic_vwmulsu_vx_nxv2i16_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vwmulsu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -826,7 +826,7 @@ define @intrinsic_vwmulsu_vx_nxv4i16_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vwmulsu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -874,7 +874,7 @@ define @intrinsic_vwmulsu_vx_nxv8i16_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vwmulsu.vx v10, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -922,7 +922,7 @@ define @intrinsic_vwmulsu_vx_nxv16i16_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vwmulsu.vx v12, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -970,7 +970,7 @@ define @intrinsic_vwmulsu_vx_nxv32i16_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vwmulsu.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -1018,7 +1018,7 @@ define @intrinsic_vwmulsu_vx_nxv1i32_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vwmulsu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1066,7 +1066,7 @@ define @intrinsic_vwmulsu_vx_nxv2i32_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vwmulsu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1114,7 +1114,7 @@ define @intrinsic_vwmulsu_vx_nxv4i32_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vwmulsu.vx v10, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -1162,7 +1162,7 @@ define @intrinsic_vwmulsu_vx_nxv8i32_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vwmulsu.vx v12, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -1210,7 +1210,7 @@ define @intrinsic_vwmulsu_vx_nxv16i32_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vwmulsu.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -1258,7 +1258,7 @@ define @intrinsic_vwmulsu_vx_nxv1i64_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vwmulsu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1306,7 +1306,7 @@ define @intrinsic_vwmulsu_vx_nxv2i64_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vwmulsu.vx v10, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -1354,7 +1354,7 @@ define @intrinsic_vwmulsu_vx_nxv4i64_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vwmulsu.vx v12, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -1402,7 +1402,7 @@ define @intrinsic_vwmulsu_vx_nxv8i64_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulsu_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vwmulsu.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vwmulu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -58,7 +58,7 @@ define @intrinsic_vwmulu_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vwmulu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -106,7 +106,7 @@ define @intrinsic_vwmulu_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vwmulu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -154,7 +154,7 @@ define @intrinsic_vwmulu_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vwmulu.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -202,7 +202,7 @@ define @intrinsic_vwmulu_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vwmulu.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -250,7 +250,7 @@ define @intrinsic_vwmulu_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwmulu.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -298,7 +298,7 @@ define @intrinsic_vwmulu_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vwmulu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -346,7 +346,7 @@ define @intrinsic_vwmulu_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vwmulu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -394,7 +394,7 @@ define @intrinsic_vwmulu_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vwmulu.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -442,7 +442,7 @@ define @intrinsic_vwmulu_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vwmulu.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -490,7 +490,7 @@ define @intrinsic_vwmulu_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vwmulu.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -538,7 +538,7 @@ define @intrinsic_vwmulu_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vwmulu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -586,7 +586,7 @@ define @intrinsic_vwmulu_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vwmulu.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -634,7 +634,7 @@ define @intrinsic_vwmulu_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vwmulu.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -682,7 +682,7 @@ define @intrinsic_vwmulu_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vwmulu.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -730,7 +730,7 @@ define @intrinsic_vwmulu_vx_nxv1i16_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vwmulu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -778,7 +778,7 @@ define @intrinsic_vwmulu_vx_nxv2i16_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vwmulu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -826,7 +826,7 @@ define @intrinsic_vwmulu_vx_nxv4i16_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vwmulu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -874,7 +874,7 @@ define @intrinsic_vwmulu_vx_nxv8i16_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vwmulu.vx v10, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -922,7 +922,7 @@ define @intrinsic_vwmulu_vx_nxv16i16_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vwmulu.vx v12, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -970,7 +970,7 @@ define @intrinsic_vwmulu_vx_nxv32i16_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vwmulu.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -1018,7 +1018,7 @@ define @intrinsic_vwmulu_vx_nxv1i32_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vwmulu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1066,7 +1066,7 @@ define @intrinsic_vwmulu_vx_nxv2i32_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vwmulu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1114,7 +1114,7 @@ define @intrinsic_vwmulu_vx_nxv4i32_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vwmulu.vx v10, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -1162,7 +1162,7 @@ define @intrinsic_vwmulu_vx_nxv8i32_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vwmulu.vx v12, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -1210,7 +1210,7 @@ define @intrinsic_vwmulu_vx_nxv16i32_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vwmulu.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -1258,7 +1258,7 @@ define @intrinsic_vwmulu_vx_nxv1i64_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vwmulu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1306,7 +1306,7 @@ define @intrinsic_vwmulu_vx_nxv2i64_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vwmulu.vx v10, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -1354,7 +1354,7 @@ define @intrinsic_vwmulu_vx_nxv4i64_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vwmulu.vx v12, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -1402,7 +1402,7 @@ define @intrinsic_vwmulu_vx_nxv8i64_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vwmulu.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwmulu-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vwmulu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -58,7 +58,7 @@ define @intrinsic_vwmulu_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vwmulu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -106,7 +106,7 @@ define @intrinsic_vwmulu_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vwmulu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -154,7 +154,7 @@ define @intrinsic_vwmulu_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vwmulu.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -202,7 +202,7 @@ define @intrinsic_vwmulu_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vwmulu.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -250,7 +250,7 @@ define @intrinsic_vwmulu_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwmulu.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -298,7 +298,7 @@ define @intrinsic_vwmulu_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vwmulu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -346,7 +346,7 @@ define @intrinsic_vwmulu_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vwmulu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -394,7 +394,7 @@ define @intrinsic_vwmulu_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vwmulu.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -442,7 +442,7 @@ define @intrinsic_vwmulu_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vwmulu.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -490,7 +490,7 @@ define @intrinsic_vwmulu_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vwmulu.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -538,7 +538,7 @@ define @intrinsic_vwmulu_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vwmulu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -586,7 +586,7 @@ define @intrinsic_vwmulu_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vwmulu.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -634,7 +634,7 @@ define @intrinsic_vwmulu_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vwmulu.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -682,7 +682,7 @@ define @intrinsic_vwmulu_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vwmulu.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -730,7 +730,7 @@ define @intrinsic_vwmulu_vx_nxv1i16_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vwmulu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -778,7 +778,7 @@ define @intrinsic_vwmulu_vx_nxv2i16_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vwmulu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -826,7 +826,7 @@ define @intrinsic_vwmulu_vx_nxv4i16_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vwmulu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -874,7 +874,7 @@ define @intrinsic_vwmulu_vx_nxv8i16_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vwmulu.vx v10, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -922,7 +922,7 @@ define @intrinsic_vwmulu_vx_nxv16i16_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vwmulu.vx v12, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -970,7 +970,7 @@ define @intrinsic_vwmulu_vx_nxv32i16_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vwmulu.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -1018,7 +1018,7 @@ define @intrinsic_vwmulu_vx_nxv1i32_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vwmulu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1066,7 +1066,7 @@ define @intrinsic_vwmulu_vx_nxv2i32_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vwmulu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1114,7 +1114,7 @@ define @intrinsic_vwmulu_vx_nxv4i32_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vwmulu.vx v10, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -1162,7 +1162,7 @@ define @intrinsic_vwmulu_vx_nxv8i32_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vwmulu.vx v12, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -1210,7 +1210,7 @@ define @intrinsic_vwmulu_vx_nxv16i32_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vwmulu.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -1258,7 +1258,7 @@ define @intrinsic_vwmulu_vx_nxv1i64_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vwmulu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1306,7 +1306,7 @@ define @intrinsic_vwmulu_vx_nxv2i64_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vwmulu.vx v10, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -1354,7 +1354,7 @@ define @intrinsic_vwmulu_vx_nxv4i64_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vwmulu.vx v12, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -1402,7 +1402,7 @@ define @intrinsic_vwmulu_vx_nxv8i64_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwmulu_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vwmulu.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vwredsum_vs_nxv4i16_nxv1i8_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv1i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -33,7 +33,7 @@ define @intrinsic_vwredsum_mask_vs_nxv4i16_nxv1i8_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv1i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -56,7 +56,7 @@ define @intrinsic_vwredsum_vs_nxv4i16_nxv2i8_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv2i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -79,7 +79,7 @@ define @intrinsic_vwredsum_mask_vs_nxv4i16_nxv2i8_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv2i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -102,7 +102,7 @@ define @intrinsic_vwredsum_vs_nxv4i16_nxv4i8_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -125,7 +125,7 @@ define @intrinsic_vwredsum_mask_vs_nxv4i16_nxv4i8_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -148,7 +148,7 @@ define @intrinsic_vwredsum_vs_nxv4i16_nxv8i8_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv8i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -171,7 +171,7 @@ define @intrinsic_vwredsum_mask_vs_nxv4i16_nxv8i8_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv8i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -194,7 +194,7 @@ define @intrinsic_vwredsum_vs_nxv4i16_nxv16i8_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv16i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define @intrinsic_vwredsum_mask_vs_nxv4i16_nxv16i8_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv16i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -240,7 +240,7 @@ define @intrinsic_vwredsum_vs_nxv4i16_nxv32i8_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv32i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -263,7 +263,7 @@ define @intrinsic_vwredsum_mask_vs_nxv4i16_nxv32i8_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv32i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -286,7 +286,7 @@ define @intrinsic_vwredsum_vs_nxv4i16_nxv64i8_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv64i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -309,7 +309,7 @@ define @intrinsic_vwredsum_mask_vs_nxv4i16_nxv64i8_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv64i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -332,7 +332,7 @@ define @intrinsic_vwredsum_vs_nxv2i32_nxv1i16_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv1i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ define @intrinsic_vwredsum_mask_vs_nxv2i32_nxv1i16_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv1i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -378,7 +378,7 @@ define @intrinsic_vwredsum_vs_nxv2i32_nxv2i16_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -401,7 +401,7 @@ define @intrinsic_vwredsum_mask_vs_nxv2i32_nxv2i16_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -424,7 +424,7 @@ define @intrinsic_vwredsum_vs_nxv2i32_nxv4i16_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv4i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -447,7 +447,7 @@ define @intrinsic_vwredsum_mask_vs_nxv2i32_nxv4i16_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv4i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -470,7 +470,7 @@ define @intrinsic_vwredsum_vs_nxv2i32_nxv8i16_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv8i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -493,7 +493,7 @@ define @intrinsic_vwredsum_mask_vs_nxv2i32_nxv8i16_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv8i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +516,7 @@ define @intrinsic_vwredsum_vs_nxv2i32_nxv16i16_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv16i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -539,7 +539,7 @@ define @intrinsic_vwredsum_mask_vs_nxv2i32_nxv16i16_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv16i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -562,7 +562,7 @@ define @intrinsic_vwredsum_vs_nxv2i32_nxv32i16_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv32i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -585,7 +585,7 @@ define @intrinsic_vwredsum_mask_vs_nxv2i32_nxv32i16_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv32i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -608,7 +608,7 @@ define @intrinsic_vwredsum_vs_nxv1i64_nxv1i32_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -631,7 +631,7 @@ define @intrinsic_vwredsum_mask_vs_nxv1i64_nxv1i32_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -654,7 +654,7 @@ define @intrinsic_vwredsum_vs_nxv1i64_nxv2i32_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv2i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -677,7 +677,7 @@ define @intrinsic_vwredsum_mask_vs_nxv1i64_nxv2i32_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv2i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -700,7 +700,7 @@ define @intrinsic_vwredsum_vs_nxv1i64_nxv4i32_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv4i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -723,7 +723,7 @@ define @intrinsic_vwredsum_mask_vs_nxv1i64_nxv4i32_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv4i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +746,7 @@ define @intrinsic_vwredsum_vs_nxv1i64_nxv8i32_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv8i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -769,7 +769,7 @@ define @intrinsic_vwredsum_mask_vs_nxv1i64_nxv8i32_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv8i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -792,7 +792,7 @@ define @intrinsic_vwredsum_vs_nxv1i64_nxv16i32_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv16i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -815,7 +815,7 @@ define @intrinsic_vwredsum_mask_vs_nxv1i64_nxv16i32_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv16i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwredsum-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vwredsum_vs_nxv4i16_nxv1i8_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv1i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -33,7 +33,7 @@ define @intrinsic_vwredsum_mask_vs_nxv4i16_nxv1i8_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv1i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -56,7 +56,7 @@ define @intrinsic_vwredsum_vs_nxv4i16_nxv2i8_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv2i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -79,7 +79,7 @@ define @intrinsic_vwredsum_mask_vs_nxv4i16_nxv2i8_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv2i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -102,7 +102,7 @@ define @intrinsic_vwredsum_vs_nxv4i16_nxv4i8_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -125,7 +125,7 @@ define @intrinsic_vwredsum_mask_vs_nxv4i16_nxv4i8_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -148,7 +148,7 @@ define @intrinsic_vwredsum_vs_nxv4i16_nxv8i8_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv8i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -171,7 +171,7 @@ define @intrinsic_vwredsum_mask_vs_nxv4i16_nxv8i8_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv8i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -194,7 +194,7 @@ define @intrinsic_vwredsum_vs_nxv4i16_nxv16i8_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv16i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define @intrinsic_vwredsum_mask_vs_nxv4i16_nxv16i8_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv16i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -240,7 +240,7 @@ define @intrinsic_vwredsum_vs_nxv4i16_nxv32i8_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv32i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -263,7 +263,7 @@ define @intrinsic_vwredsum_mask_vs_nxv4i16_nxv32i8_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv32i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -286,7 +286,7 @@ define @intrinsic_vwredsum_vs_nxv4i16_nxv64i8_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv64i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -309,7 +309,7 @@ define @intrinsic_vwredsum_mask_vs_nxv4i16_nxv64i8_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv64i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -332,7 +332,7 @@ define @intrinsic_vwredsum_vs_nxv2i32_nxv1i16_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv1i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ define @intrinsic_vwredsum_mask_vs_nxv2i32_nxv1i16_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv1i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -378,7 +378,7 @@ define @intrinsic_vwredsum_vs_nxv2i32_nxv2i16_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -401,7 +401,7 @@ define @intrinsic_vwredsum_mask_vs_nxv2i32_nxv2i16_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -424,7 +424,7 @@ define @intrinsic_vwredsum_vs_nxv2i32_nxv4i16_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv4i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -447,7 +447,7 @@ define @intrinsic_vwredsum_mask_vs_nxv2i32_nxv4i16_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv4i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -470,7 +470,7 @@ define @intrinsic_vwredsum_vs_nxv2i32_nxv8i16_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv8i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -493,7 +493,7 @@ define @intrinsic_vwredsum_mask_vs_nxv2i32_nxv8i16_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv8i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +516,7 @@ define @intrinsic_vwredsum_vs_nxv2i32_nxv16i16_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv16i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -539,7 +539,7 @@ define @intrinsic_vwredsum_mask_vs_nxv2i32_nxv16i16_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv16i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -562,7 +562,7 @@ define @intrinsic_vwredsum_vs_nxv2i32_nxv32i16_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv32i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -585,7 +585,7 @@ define @intrinsic_vwredsum_mask_vs_nxv2i32_nxv32i16_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv32i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -608,7 +608,7 @@ define @intrinsic_vwredsum_vs_nxv1i64_nxv1i32_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -631,7 +631,7 @@ define @intrinsic_vwredsum_mask_vs_nxv1i64_nxv1i32_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -654,7 +654,7 @@ define @intrinsic_vwredsum_vs_nxv1i64_nxv2i32_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv2i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -677,7 +677,7 @@ define @intrinsic_vwredsum_mask_vs_nxv1i64_nxv2i32_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv2i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -700,7 +700,7 @@ define @intrinsic_vwredsum_vs_nxv1i64_nxv4i32_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv4i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -723,7 +723,7 @@ define @intrinsic_vwredsum_mask_vs_nxv1i64_nxv4i32_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv4i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +746,7 @@ define @intrinsic_vwredsum_vs_nxv1i64_nxv8i32_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv8i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -769,7 +769,7 @@ define @intrinsic_vwredsum_mask_vs_nxv1i64_nxv8i32_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv8i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -792,7 +792,7 @@ define @intrinsic_vwredsum_vs_nxv1i64_nxv16i32_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv16i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -815,7 +815,7 @@ define @intrinsic_vwredsum_mask_vs_nxv1i64_nxv16i32_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv16i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vwredsum.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vwredsumu_vs_nxv4i16_nxv1i8_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv1i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -33,7 +33,7 @@ define @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv1i8_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv1i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -56,7 +56,7 @@ define @intrinsic_vwredsumu_vs_nxv4i16_nxv2i8_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv2i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -79,7 +79,7 @@ define @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv2i8_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv2i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -102,7 +102,7 @@ define @intrinsic_vwredsumu_vs_nxv4i16_nxv4i8_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -125,7 +125,7 @@ define @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv4i8_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -148,7 +148,7 @@ define @intrinsic_vwredsumu_vs_nxv4i16_nxv8i8_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv8i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -171,7 +171,7 @@ define @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv8i8_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv8i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -194,7 +194,7 @@ define @intrinsic_vwredsumu_vs_nxv4i16_nxv16i8_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv16i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv16i8_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv16i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -240,7 +240,7 @@ define @intrinsic_vwredsumu_vs_nxv4i16_nxv32i8_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv32i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -263,7 +263,7 @@ define @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv32i8_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv32i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -286,7 +286,7 @@ define @intrinsic_vwredsumu_vs_nxv4i16_nxv64i8_nxv4i16( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv64i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -309,7 +309,7 @@ define @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv64i8_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv64i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -332,7 +332,7 @@ define @intrinsic_vwredsumu_vs_nxv2i32_nxv1i16_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv1i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ define @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv1i16_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv1i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -378,7 +378,7 @@ define @intrinsic_vwredsumu_vs_nxv2i32_nxv2i16_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -401,7 +401,7 @@ define @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv2i16_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -424,7 +424,7 @@ define @intrinsic_vwredsumu_vs_nxv2i32_nxv4i16_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv4i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -447,7 +447,7 @@ define @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv4i16_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv4i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -470,7 +470,7 @@ define @intrinsic_vwredsumu_vs_nxv2i32_nxv8i16_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv8i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -493,7 +493,7 @@ define @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv8i16_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv8i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +516,7 @@ define @intrinsic_vwredsumu_vs_nxv2i32_nxv16i16_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv16i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -539,7 +539,7 @@ define @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv16i16_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv16i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -562,7 +562,7 @@ define @intrinsic_vwredsumu_vs_nxv2i32_nxv32i16_nxv2i32( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv32i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -585,7 +585,7 @@ define @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv32i16_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv32i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -608,7 +608,7 @@ define @intrinsic_vwredsumu_vs_nxv1i64_nxv1i32_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -631,7 +631,7 @@ define @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv1i32_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -654,7 +654,7 @@ define @intrinsic_vwredsumu_vs_nxv1i64_nxv2i32_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv2i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -677,7 +677,7 @@ define @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv2i32_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv2i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -700,7 +700,7 @@ define @intrinsic_vwredsumu_vs_nxv1i64_nxv4i32_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv4i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -723,7 +723,7 @@ define @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv4i32_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv4i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +746,7 @@ define @intrinsic_vwredsumu_vs_nxv1i64_nxv8i32_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv8i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -769,7 +769,7 @@ define @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv8i32_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv8i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -792,7 +792,7 @@ define @intrinsic_vwredsumu_vs_nxv1i64_nxv16i32_nxv1i64( %0, %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv16i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -815,7 +815,7 @@ define @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv16i32_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv16i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwredsumu-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vwredsumu_vs_nxv4i16_nxv1i8_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv1i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -33,7 +33,7 @@ define @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv1i8_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv1i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -56,7 +56,7 @@ define @intrinsic_vwredsumu_vs_nxv4i16_nxv2i8_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv2i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -79,7 +79,7 @@ define @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv2i8_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv2i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -102,7 +102,7 @@ define @intrinsic_vwredsumu_vs_nxv4i16_nxv4i8_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -125,7 +125,7 @@ define @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv4i8_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv4i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -148,7 +148,7 @@ define @intrinsic_vwredsumu_vs_nxv4i16_nxv8i8_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv8i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -171,7 +171,7 @@ define @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv8i8_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv8i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -194,7 +194,7 @@ define @intrinsic_vwredsumu_vs_nxv4i16_nxv16i8_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv16i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -217,7 +217,7 @@ define @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv16i8_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv16i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -240,7 +240,7 @@ define @intrinsic_vwredsumu_vs_nxv4i16_nxv32i8_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv32i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -263,7 +263,7 @@ define @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv32i8_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv32i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -286,7 +286,7 @@ define @intrinsic_vwredsumu_vs_nxv4i16_nxv64i8_nxv4i16( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv64i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -309,7 +309,7 @@ define @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv64i8_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv64i8_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -332,7 +332,7 @@ define @intrinsic_vwredsumu_vs_nxv2i32_nxv1i16_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv1i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -355,7 +355,7 @@ define @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv1i16_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv1i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -378,7 +378,7 @@ define @intrinsic_vwredsumu_vs_nxv2i32_nxv2i16_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -401,7 +401,7 @@ define @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv2i16_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv2i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -424,7 +424,7 @@ define @intrinsic_vwredsumu_vs_nxv2i32_nxv4i16_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv4i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -447,7 +447,7 @@ define @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv4i16_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv4i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -470,7 +470,7 @@ define @intrinsic_vwredsumu_vs_nxv2i32_nxv8i16_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv8i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -493,7 +493,7 @@ define @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv8i16_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv8i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -516,7 +516,7 @@ define @intrinsic_vwredsumu_vs_nxv2i32_nxv16i16_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv16i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -539,7 +539,7 @@ define @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv16i16_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv16i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -562,7 +562,7 @@ define @intrinsic_vwredsumu_vs_nxv2i32_nxv32i16_nxv2i32( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv32i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -585,7 +585,7 @@ define @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv32i16_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv32i16_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: @@ -608,7 +608,7 @@ define @intrinsic_vwredsumu_vs_nxv1i64_nxv1i32_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -631,7 +631,7 @@ define @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv1i32_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv1i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -654,7 +654,7 @@ define @intrinsic_vwredsumu_vs_nxv1i64_nxv2i32_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv2i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10 ; CHECK-NEXT: ret entry: @@ -677,7 +677,7 @@ define @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv2i32_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv2i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: @@ -700,7 +700,7 @@ define @intrinsic_vwredsumu_vs_nxv1i64_nxv4i32_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv4i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v10, v9 ; CHECK-NEXT: ret entry: @@ -723,7 +723,7 @@ define @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv4i32_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv4i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v10, v9, v0.t ; CHECK-NEXT: ret entry: @@ -746,7 +746,7 @@ define @intrinsic_vwredsumu_vs_nxv1i64_nxv8i32_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv8i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v12, v9 ; CHECK-NEXT: ret entry: @@ -769,7 +769,7 @@ define @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv8i32_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv8i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v12, v9, v0.t ; CHECK-NEXT: ret entry: @@ -792,7 +792,7 @@ define @intrinsic_vwredsumu_vs_nxv1i64_nxv16i32_nxv1i64( %0, %1, %2, i64 %3) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv16i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v16, v9 ; CHECK-NEXT: ret entry: @@ -815,7 +815,7 @@ define @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv16i32_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv16i32_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vwredsumu.vs v8, v16, v9, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vwsub.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -58,7 +58,7 @@ define @intrinsic_vwsub_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vwsub.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -106,7 +106,7 @@ define @intrinsic_vwsub_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vwsub.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -154,7 +154,7 @@ define @intrinsic_vwsub_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vwsub.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -202,7 +202,7 @@ define @intrinsic_vwsub_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vwsub.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -250,7 +250,7 @@ define @intrinsic_vwsub_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwsub.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -298,7 +298,7 @@ define @intrinsic_vwsub_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vwsub.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -346,7 +346,7 @@ define @intrinsic_vwsub_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vwsub.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -394,7 +394,7 @@ define @intrinsic_vwsub_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vwsub.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -442,7 +442,7 @@ define @intrinsic_vwsub_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vwsub.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -490,7 +490,7 @@ define @intrinsic_vwsub_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vwsub.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -538,7 +538,7 @@ define @intrinsic_vwsub_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vwsub.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -586,7 +586,7 @@ define @intrinsic_vwsub_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vwsub.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -634,7 +634,7 @@ define @intrinsic_vwsub_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vwsub.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -682,7 +682,7 @@ define @intrinsic_vwsub_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vwsub.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -730,7 +730,7 @@ define @intrinsic_vwsub_vx_nxv1i16_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vwsub.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -778,7 +778,7 @@ define @intrinsic_vwsub_vx_nxv2i16_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vwsub.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -826,7 +826,7 @@ define @intrinsic_vwsub_vx_nxv4i16_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vwsub.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -874,7 +874,7 @@ define @intrinsic_vwsub_vx_nxv8i16_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vwsub.vx v10, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -922,7 +922,7 @@ define @intrinsic_vwsub_vx_nxv16i16_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vwsub.vx v12, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -970,7 +970,7 @@ define @intrinsic_vwsub_vx_nxv32i16_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vwsub.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -1018,7 +1018,7 @@ define @intrinsic_vwsub_vx_nxv1i32_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vwsub.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1066,7 +1066,7 @@ define @intrinsic_vwsub_vx_nxv2i32_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vwsub.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1114,7 +1114,7 @@ define @intrinsic_vwsub_vx_nxv4i32_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vwsub.vx v10, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -1162,7 +1162,7 @@ define @intrinsic_vwsub_vx_nxv8i32_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vwsub.vx v12, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -1210,7 +1210,7 @@ define @intrinsic_vwsub_vx_nxv16i32_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vwsub.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -1258,7 +1258,7 @@ define @intrinsic_vwsub_vx_nxv1i64_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vwsub.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1306,7 +1306,7 @@ define @intrinsic_vwsub_vx_nxv2i64_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vwsub.vx v10, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -1354,7 +1354,7 @@ define @intrinsic_vwsub_vx_nxv4i64_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vwsub.vx v12, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -1402,7 +1402,7 @@ define @intrinsic_vwsub_vx_nxv8i64_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vwsub.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vwsub.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -58,7 +58,7 @@ define @intrinsic_vwsub_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vwsub.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -106,7 +106,7 @@ define @intrinsic_vwsub_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vwsub.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -154,7 +154,7 @@ define @intrinsic_vwsub_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vwsub.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -202,7 +202,7 @@ define @intrinsic_vwsub_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vwsub.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -250,7 +250,7 @@ define @intrinsic_vwsub_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwsub.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -298,7 +298,7 @@ define @intrinsic_vwsub_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vwsub.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -346,7 +346,7 @@ define @intrinsic_vwsub_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vwsub.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -394,7 +394,7 @@ define @intrinsic_vwsub_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vwsub.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -442,7 +442,7 @@ define @intrinsic_vwsub_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vwsub.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -490,7 +490,7 @@ define @intrinsic_vwsub_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vwsub.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -538,7 +538,7 @@ define @intrinsic_vwsub_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vwsub.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -586,7 +586,7 @@ define @intrinsic_vwsub_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vwsub.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -634,7 +634,7 @@ define @intrinsic_vwsub_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vwsub.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -682,7 +682,7 @@ define @intrinsic_vwsub_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vwsub.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -730,7 +730,7 @@ define @intrinsic_vwsub_vx_nxv1i16_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vwsub.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -778,7 +778,7 @@ define @intrinsic_vwsub_vx_nxv2i16_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vwsub.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -826,7 +826,7 @@ define @intrinsic_vwsub_vx_nxv4i16_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vwsub.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -874,7 +874,7 @@ define @intrinsic_vwsub_vx_nxv8i16_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vwsub.vx v10, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -922,7 +922,7 @@ define @intrinsic_vwsub_vx_nxv16i16_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vwsub.vx v12, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -970,7 +970,7 @@ define @intrinsic_vwsub_vx_nxv32i16_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vwsub.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -1018,7 +1018,7 @@ define @intrinsic_vwsub_vx_nxv1i32_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vwsub.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1066,7 +1066,7 @@ define @intrinsic_vwsub_vx_nxv2i32_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vwsub.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1114,7 +1114,7 @@ define @intrinsic_vwsub_vx_nxv4i32_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vwsub.vx v10, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -1162,7 +1162,7 @@ define @intrinsic_vwsub_vx_nxv8i32_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vwsub.vx v12, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -1210,7 +1210,7 @@ define @intrinsic_vwsub_vx_nxv16i32_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vwsub.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -1258,7 +1258,7 @@ define @intrinsic_vwsub_vx_nxv1i64_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vwsub.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1306,7 +1306,7 @@ define @intrinsic_vwsub_vx_nxv2i64_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vwsub.vx v10, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -1354,7 +1354,7 @@ define @intrinsic_vwsub_vx_nxv4i64_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vwsub.vx v12, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -1402,7 +1402,7 @@ define @intrinsic_vwsub_vx_nxv8i64_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vwsub.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwsub-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsub-sdnode.ll @@ -5,7 +5,7 @@ define @vwsub_vv_nxv1i64( %va, %vb) { ; CHECK-LABEL: vwsub_vv_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vwsub.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -18,7 +18,7 @@ define @vwsubu_vv_nxv1i64( %va, %vb) { ; CHECK-LABEL: vwsubu_vv_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vwsubu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -31,7 +31,7 @@ define @vwsub_vx_nxv1i64( %va, i32 %b) { ; CHECK-LABEL: vwsub_vx_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vwsub.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -46,7 +46,7 @@ define @vwsubu_vx_nxv1i64( %va, i32 %b) { ; CHECK-LABEL: vwsubu_vx_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vwsubu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -61,7 +61,7 @@ define @vwsub_wv_nxv1i64( %va, %vb) { ; CHECK-LABEL: vwsub_wv_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vwsub.wv v8, v8, v9 ; CHECK-NEXT: ret %vc = sext %vb to @@ -72,7 +72,7 @@ define @vwsubu_wv_nxv1i64( %va, %vb) { ; CHECK-LABEL: vwsubu_wv_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vwsubu.wv v8, v8, v9 ; CHECK-NEXT: ret %vc = zext %vb to @@ -83,7 +83,7 @@ define @vwsub_wx_nxv1i64( %va, i32 %b) { ; CHECK-LABEL: vwsub_wx_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vwsub.wx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -96,7 +96,7 @@ define @vwsubu_wx_nxv1i64( %va, i32 %b) { ; CHECK-LABEL: vwsubu_wx_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vwsubu.wx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -109,7 +109,7 @@ define @vwsub_vv_nxv2i64( %va, %vb) { ; CHECK-LABEL: vwsub_vv_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vwsub.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -122,7 +122,7 @@ define @vwsubu_vv_nxv2i64( %va, %vb) { ; CHECK-LABEL: vwsubu_vv_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vwsubu.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -135,7 +135,7 @@ define @vwsub_vx_nxv2i64( %va, i32 %b) { ; CHECK-LABEL: vwsub_vx_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vwsub.vx v10, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -150,7 +150,7 @@ define @vwsubu_vx_nxv2i64( %va, i32 %b) { ; CHECK-LABEL: vwsubu_vx_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vwsubu.vx v10, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -165,7 +165,7 @@ define @vwsub_wv_nxv2i64( %va, %vb) { ; CHECK-LABEL: vwsub_wv_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vwsub.wv v8, v8, v10 ; CHECK-NEXT: ret %vc = sext %vb to @@ -176,7 +176,7 @@ define @vwsubu_wv_nxv2i64( %va, %vb) { ; CHECK-LABEL: vwsubu_wv_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vwsubu.wv v8, v8, v10 ; CHECK-NEXT: ret %vc = zext %vb to @@ -187,7 +187,7 @@ define @vwsub_wx_nxv2i64( %va, i32 %b) { ; CHECK-LABEL: vwsub_wx_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vwsub.wx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -200,7 +200,7 @@ define @vwsubu_wx_nxv2i64( %va, i32 %b) { ; CHECK-LABEL: vwsubu_wx_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vwsubu.wx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -213,7 +213,7 @@ define @vwsub_vv_nxv4i64( %va, %vb) { ; CHECK-LABEL: vwsub_vv_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vwsub.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -226,7 +226,7 @@ define @vwsubu_vv_nxv4i64( %va, %vb) { ; CHECK-LABEL: vwsubu_vv_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vwsubu.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -239,7 +239,7 @@ define @vwsub_vx_nxv4i64( %va, i32 %b) { ; CHECK-LABEL: vwsub_vx_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vwsub.vx v12, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -254,7 +254,7 @@ define @vwsubu_vx_nxv4i64( %va, i32 %b) { ; CHECK-LABEL: vwsubu_vx_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vwsubu.vx v12, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -269,7 +269,7 @@ define @vwsub_wv_nxv4i64( %va, %vb) { ; CHECK-LABEL: vwsub_wv_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vwsub.wv v8, v8, v12 ; CHECK-NEXT: ret %vc = sext %vb to @@ -280,7 +280,7 @@ define @vwsubu_wv_nxv4i64( %va, %vb) { ; CHECK-LABEL: vwsubu_wv_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vwsubu.wv v8, v8, v12 ; CHECK-NEXT: ret %vc = zext %vb to @@ -291,7 +291,7 @@ define @vwsub_wx_nxv4i64( %va, i32 %b) { ; CHECK-LABEL: vwsub_wx_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vwsub.wx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -304,7 +304,7 @@ define @vwsubu_wx_nxv4i64( %va, i32 %b) { ; CHECK-LABEL: vwsubu_wx_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vwsubu.wx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -317,7 +317,7 @@ define @vwsub_vv_nxv8i64( %va, %vb) { ; CHECK-LABEL: vwsub_vv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vwsub.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -330,7 +330,7 @@ define @vwsubu_vv_nxv8i64( %va, %vb) { ; CHECK-LABEL: vwsubu_vv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vwsubu.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -343,7 +343,7 @@ define @vwsub_vx_nxv8i64( %va, i32 %b) { ; CHECK-LABEL: vwsub_vx_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vwsub.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -358,7 +358,7 @@ define @vwsubu_vx_nxv8i64( %va, i32 %b) { ; CHECK-LABEL: vwsubu_vx_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vwsubu.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -373,7 +373,7 @@ define @vwsub_wv_nxv8i64( %va, %vb) { ; CHECK-LABEL: vwsub_wv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vwsub.wv v8, v8, v16 ; CHECK-NEXT: ret %vc = sext %vb to @@ -384,7 +384,7 @@ define @vwsubu_wv_nxv8i64( %va, %vb) { ; CHECK-LABEL: vwsubu_wv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vwsubu.wv v8, v8, v16 ; CHECK-NEXT: ret %vc = zext %vb to @@ -395,7 +395,7 @@ define @vwsub_wx_nxv8i64( %va, i32 %b) { ; CHECK-LABEL: vwsub_wx_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vwsub.wx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -408,7 +408,7 @@ define @vwsubu_wx_nxv8i64( %va, i32 %b) { ; CHECK-LABEL: vwsubu_wx_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vwsubu.wx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vwsub.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vwsub.w_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vwsub.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vwsub.w_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vwsub.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vwsub.w_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vwsub.wv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vwsub.w_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vwsub.wv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vwsub.w_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwsub.wv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -293,7 +293,7 @@ define @intrinsic_vwsub.w_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vwsub.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vwsub.w_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vwsub.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vwsub.w_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vwsub.wv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vwsub.w_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vwsub.wv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vwsub.w_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vwsub.wv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -529,7 +529,7 @@ define @intrinsic_vwsub.w_wv_nxv1i64_nxv1i64_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vwsub.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -576,7 +576,7 @@ define @intrinsic_vwsub.w_wv_nxv2i64_nxv2i64_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vwsub.wv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vwsub.w_wv_nxv4i64_nxv4i64_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vwsub.wv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vwsub.w_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vwsub.wv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -718,7 +718,7 @@ define @intrinsic_vwsub.w_wx_nxv1i16_nxv1i16_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv1i16_nxv1i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vwsub.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -765,7 +765,7 @@ define @intrinsic_vwsub.w_wx_nxv2i16_nxv2i16_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv2i16_nxv2i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vwsub.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -812,7 +812,7 @@ define @intrinsic_vwsub.w_wx_nxv4i16_nxv4i16_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv4i16_nxv4i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vwsub.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vwsub.w_wx_nxv8i16_nxv8i16_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv8i16_nxv8i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vwsub.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vwsub.w_wx_nxv16i16_nxv16i16_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv16i16_nxv16i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vwsub.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vwsub.w_wx_nxv32i16_nxv32i16_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv32i16_nxv32i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vwsub.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vwsub.w_wx_nxv1i32_nxv1i32_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv1i32_nxv1i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vwsub.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1047,7 +1047,7 @@ define @intrinsic_vwsub.w_wx_nxv2i32_nxv2i32_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv2i32_nxv2i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vwsub.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1094,7 +1094,7 @@ define @intrinsic_vwsub.w_wx_nxv4i32_nxv4i32_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv4i32_nxv4i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vwsub.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1141,7 +1141,7 @@ define @intrinsic_vwsub.w_wx_nxv8i32_nxv8i32_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv8i32_nxv8i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vwsub.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1188,7 +1188,7 @@ define @intrinsic_vwsub.w_wx_nxv16i32_nxv16i32_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv16i32_nxv16i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vwsub.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1235,7 +1235,7 @@ define @intrinsic_vwsub.w_wx_nxv1i64_nxv1i64_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv1i64_nxv1i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vwsub.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1282,7 +1282,7 @@ define @intrinsic_vwsub.w_wx_nxv2i64_nxv2i64_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv2i64_nxv2i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vwsub.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1329,7 +1329,7 @@ define @intrinsic_vwsub.w_wx_nxv4i64_nxv4i64_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv4i64_nxv4i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vwsub.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1376,7 +1376,7 @@ define @intrinsic_vwsub.w_wx_nxv8i64_nxv8i64_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv8i64_nxv8i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vwsub.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1927,7 +1927,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv1i16_nxv1i16_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vwsub.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1944,7 +1944,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv2i16_nxv2i16_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vwsub.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1961,7 +1961,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv4i16_nxv4i16_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vwsub.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1978,7 +1978,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv8i16_nxv8i16_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vwsub.wv v12, v10, v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -1995,7 +1995,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv16i16_nxv16i16_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vwsub.wv v16, v12, v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -2012,7 +2012,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv32i16_nxv32i16_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwsub.wv v24, v16, v8 ; CHECK-NEXT: vmv8r.v v8, v24 ; CHECK-NEXT: ret @@ -2029,7 +2029,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv1i32_nxv1i32_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vwsub.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2046,7 +2046,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv2i32_nxv2i32_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vwsub.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2063,7 +2063,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv4i32_nxv4i32_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vwsub.wv v12, v10, v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -2080,7 +2080,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv8i32_nxv8i32_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vwsub.wv v16, v12, v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -2097,7 +2097,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv1i64_nxv1i64_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vwsub.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2114,7 +2114,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv2i64_nxv2i64_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vwsub.wv v12, v10, v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -2131,7 +2131,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv4i64_nxv4i64_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vwsub.wv v16, v12, v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -2148,7 +2148,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv8i64_nxv8i64_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vwsub.wv v24, v16, v8 ; CHECK-NEXT: vmv8r.v v8, v24 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vwsub.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vwsub.w_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vwsub.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vwsub.w_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vwsub.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vwsub.w_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vwsub.wv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vwsub.w_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vwsub.wv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vwsub.w_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwsub.wv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -293,7 +293,7 @@ define @intrinsic_vwsub.w_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vwsub.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vwsub.w_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vwsub.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vwsub.w_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vwsub.wv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vwsub.w_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vwsub.wv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vwsub.w_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vwsub.wv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -529,7 +529,7 @@ define @intrinsic_vwsub.w_wv_nxv1i64_nxv1i64_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vwsub.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -576,7 +576,7 @@ define @intrinsic_vwsub.w_wv_nxv2i64_nxv2i64_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vwsub.wv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vwsub.w_wv_nxv4i64_nxv4i64_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vwsub.wv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vwsub.w_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vwsub.wv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -718,7 +718,7 @@ define @intrinsic_vwsub.w_wx_nxv1i16_nxv1i16_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv1i16_nxv1i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vwsub.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -765,7 +765,7 @@ define @intrinsic_vwsub.w_wx_nxv2i16_nxv2i16_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv2i16_nxv2i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vwsub.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -812,7 +812,7 @@ define @intrinsic_vwsub.w_wx_nxv4i16_nxv4i16_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv4i16_nxv4i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vwsub.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vwsub.w_wx_nxv8i16_nxv8i16_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv8i16_nxv8i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vwsub.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vwsub.w_wx_nxv16i16_nxv16i16_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv16i16_nxv16i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vwsub.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vwsub.w_wx_nxv32i16_nxv32i16_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv32i16_nxv32i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vwsub.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vwsub.w_wx_nxv1i32_nxv1i32_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv1i32_nxv1i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vwsub.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1047,7 +1047,7 @@ define @intrinsic_vwsub.w_wx_nxv2i32_nxv2i32_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv2i32_nxv2i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vwsub.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1094,7 +1094,7 @@ define @intrinsic_vwsub.w_wx_nxv4i32_nxv4i32_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv4i32_nxv4i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vwsub.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1141,7 +1141,7 @@ define @intrinsic_vwsub.w_wx_nxv8i32_nxv8i32_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv8i32_nxv8i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vwsub.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1188,7 +1188,7 @@ define @intrinsic_vwsub.w_wx_nxv16i32_nxv16i32_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv16i32_nxv16i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vwsub.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1235,7 +1235,7 @@ define @intrinsic_vwsub.w_wx_nxv1i64_nxv1i64_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv1i64_nxv1i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vwsub.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1282,7 +1282,7 @@ define @intrinsic_vwsub.w_wx_nxv2i64_nxv2i64_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv2i64_nxv2i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vwsub.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1329,7 +1329,7 @@ define @intrinsic_vwsub.w_wx_nxv4i64_nxv4i64_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv4i64_nxv4i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vwsub.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1376,7 +1376,7 @@ define @intrinsic_vwsub.w_wx_nxv8i64_nxv8i64_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv8i64_nxv8i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vwsub.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1927,7 +1927,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv1i16_nxv1i16_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vwsub.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1944,7 +1944,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv2i16_nxv2i16_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vwsub.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1961,7 +1961,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv4i16_nxv4i16_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vwsub.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1978,7 +1978,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv8i16_nxv8i16_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vwsub.wv v12, v10, v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -1995,7 +1995,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv16i16_nxv16i16_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vwsub.wv v16, v12, v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -2012,7 +2012,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv32i16_nxv32i16_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwsub.wv v24, v16, v8 ; CHECK-NEXT: vmv8r.v v8, v24 ; CHECK-NEXT: ret @@ -2029,7 +2029,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv1i32_nxv1i32_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vwsub.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2046,7 +2046,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv2i32_nxv2i32_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vwsub.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2063,7 +2063,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv4i32_nxv4i32_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vwsub.wv v12, v10, v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -2080,7 +2080,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv8i32_nxv8i32_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vwsub.wv v16, v12, v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -2097,7 +2097,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv1i64_nxv1i64_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vwsub.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2114,7 +2114,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv2i64_nxv2i64_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vwsub.wv v12, v10, v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -2131,7 +2131,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv4i64_nxv4i64_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vwsub.wv v16, v12, v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -2148,7 +2148,7 @@ define @intrinsic_vwsub.w_wv_untie_nxv8i64_nxv8i64_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_wv_untie_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vwsub.wv v24, v16, v8 ; CHECK-NEXT: vmv8r.v v8, v24 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vwsubu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -58,7 +58,7 @@ define @intrinsic_vwsubu_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vwsubu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -106,7 +106,7 @@ define @intrinsic_vwsubu_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vwsubu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -154,7 +154,7 @@ define @intrinsic_vwsubu_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vwsubu.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -202,7 +202,7 @@ define @intrinsic_vwsubu_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vwsubu.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -250,7 +250,7 @@ define @intrinsic_vwsubu_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwsubu.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -298,7 +298,7 @@ define @intrinsic_vwsubu_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vwsubu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -346,7 +346,7 @@ define @intrinsic_vwsubu_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vwsubu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -394,7 +394,7 @@ define @intrinsic_vwsubu_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vwsubu.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -442,7 +442,7 @@ define @intrinsic_vwsubu_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vwsubu.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -490,7 +490,7 @@ define @intrinsic_vwsubu_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vwsubu.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -538,7 +538,7 @@ define @intrinsic_vwsubu_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vwsubu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -586,7 +586,7 @@ define @intrinsic_vwsubu_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vwsubu.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -634,7 +634,7 @@ define @intrinsic_vwsubu_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vwsubu.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -682,7 +682,7 @@ define @intrinsic_vwsubu_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vwsubu.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -730,7 +730,7 @@ define @intrinsic_vwsubu_vx_nxv1i16_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vwsubu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -778,7 +778,7 @@ define @intrinsic_vwsubu_vx_nxv2i16_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vwsubu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -826,7 +826,7 @@ define @intrinsic_vwsubu_vx_nxv4i16_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vwsubu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -874,7 +874,7 @@ define @intrinsic_vwsubu_vx_nxv8i16_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vwsubu.vx v10, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -922,7 +922,7 @@ define @intrinsic_vwsubu_vx_nxv16i16_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vwsubu.vx v12, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -970,7 +970,7 @@ define @intrinsic_vwsubu_vx_nxv32i16_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vwsubu.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -1018,7 +1018,7 @@ define @intrinsic_vwsubu_vx_nxv1i32_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vwsubu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1066,7 +1066,7 @@ define @intrinsic_vwsubu_vx_nxv2i32_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vwsubu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1114,7 +1114,7 @@ define @intrinsic_vwsubu_vx_nxv4i32_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vwsubu.vx v10, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -1162,7 +1162,7 @@ define @intrinsic_vwsubu_vx_nxv8i32_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vwsubu.vx v12, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -1210,7 +1210,7 @@ define @intrinsic_vwsubu_vx_nxv16i32_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vwsubu.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -1258,7 +1258,7 @@ define @intrinsic_vwsubu_vx_nxv1i64_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vwsubu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1306,7 +1306,7 @@ define @intrinsic_vwsubu_vx_nxv2i64_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vwsubu.vx v10, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -1354,7 +1354,7 @@ define @intrinsic_vwsubu_vx_nxv4i64_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vwsubu.vx v12, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -1402,7 +1402,7 @@ define @intrinsic_vwsubu_vx_nxv8i64_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vwsubu.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vwsubu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -58,7 +58,7 @@ define @intrinsic_vwsubu_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv2i16_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vwsubu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -106,7 +106,7 @@ define @intrinsic_vwsubu_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv4i16_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vwsubu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -154,7 +154,7 @@ define @intrinsic_vwsubu_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv8i16_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vwsubu.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -202,7 +202,7 @@ define @intrinsic_vwsubu_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv16i16_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vwsubu.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -250,7 +250,7 @@ define @intrinsic_vwsubu_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv32i16_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwsubu.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -298,7 +298,7 @@ define @intrinsic_vwsubu_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv1i32_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vwsubu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -346,7 +346,7 @@ define @intrinsic_vwsubu_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv2i32_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vwsubu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -394,7 +394,7 @@ define @intrinsic_vwsubu_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv4i32_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vwsubu.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -442,7 +442,7 @@ define @intrinsic_vwsubu_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv8i32_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vwsubu.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -490,7 +490,7 @@ define @intrinsic_vwsubu_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv16i32_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vwsubu.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -538,7 +538,7 @@ define @intrinsic_vwsubu_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv1i64_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vwsubu.vv v10, v8, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -586,7 +586,7 @@ define @intrinsic_vwsubu_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv2i64_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vwsubu.vv v10, v8, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -634,7 +634,7 @@ define @intrinsic_vwsubu_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv4i64_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vwsubu.vv v12, v8, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -682,7 +682,7 @@ define @intrinsic_vwsubu_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vv_nxv8i64_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vwsubu.vv v16, v8, v12 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -730,7 +730,7 @@ define @intrinsic_vwsubu_vx_nxv1i16_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv1i16_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vwsubu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -778,7 +778,7 @@ define @intrinsic_vwsubu_vx_nxv2i16_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv2i16_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vwsubu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -826,7 +826,7 @@ define @intrinsic_vwsubu_vx_nxv4i16_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv4i16_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vwsubu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -874,7 +874,7 @@ define @intrinsic_vwsubu_vx_nxv8i16_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv8i16_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vwsubu.vx v10, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -922,7 +922,7 @@ define @intrinsic_vwsubu_vx_nxv16i16_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv16i16_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vwsubu.vx v12, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -970,7 +970,7 @@ define @intrinsic_vwsubu_vx_nxv32i16_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv32i16_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vwsubu.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -1018,7 +1018,7 @@ define @intrinsic_vwsubu_vx_nxv1i32_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv1i32_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vwsubu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1066,7 +1066,7 @@ define @intrinsic_vwsubu_vx_nxv2i32_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv2i32_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vwsubu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1114,7 +1114,7 @@ define @intrinsic_vwsubu_vx_nxv4i32_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv4i32_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vwsubu.vx v10, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -1162,7 +1162,7 @@ define @intrinsic_vwsubu_vx_nxv8i32_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv8i32_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vwsubu.vx v12, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -1210,7 +1210,7 @@ define @intrinsic_vwsubu_vx_nxv16i32_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv16i32_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vwsubu.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret @@ -1258,7 +1258,7 @@ define @intrinsic_vwsubu_vx_nxv1i64_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv1i64_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vwsubu.vx v9, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1306,7 +1306,7 @@ define @intrinsic_vwsubu_vx_nxv2i64_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv2i64_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vwsubu.vx v10, v8, a0 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -1354,7 +1354,7 @@ define @intrinsic_vwsubu_vx_nxv4i64_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv4i64_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vwsubu.vx v12, v8, a0 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret @@ -1402,7 +1402,7 @@ define @intrinsic_vwsubu_vx_nxv8i64_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu_vx_nxv8i64_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vwsubu.vx v16, v8, a0 ; CHECK-NEXT: vmv8r.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vwsubu.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vwsubu.w_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vwsubu.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vwsubu.w_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vwsubu.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vwsubu.w_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vwsubu.wv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vwsubu.w_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vwsubu.wv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vwsubu.w_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwsubu.wv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -293,7 +293,7 @@ define @intrinsic_vwsubu.w_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vwsubu.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vwsubu.w_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vwsubu.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vwsubu.w_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vwsubu.wv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vwsubu.w_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vwsubu.wv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vwsubu.w_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vwsubu.wv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -529,7 +529,7 @@ define @intrinsic_vwsubu.w_wv_nxv1i64_nxv1i64_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vwsubu.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -576,7 +576,7 @@ define @intrinsic_vwsubu.w_wv_nxv2i64_nxv2i64_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vwsubu.wv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vwsubu.w_wv_nxv4i64_nxv4i64_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vwsubu.wv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vwsubu.w_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vwsubu.wv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -718,7 +718,7 @@ define @intrinsic_vwsubu.w_wx_nxv1i16_nxv1i16_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv1i16_nxv1i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vwsubu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -765,7 +765,7 @@ define @intrinsic_vwsubu.w_wx_nxv2i16_nxv2i16_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv2i16_nxv2i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vwsubu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -812,7 +812,7 @@ define @intrinsic_vwsubu.w_wx_nxv4i16_nxv4i16_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv4i16_nxv4i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vwsubu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vwsubu.w_wx_nxv8i16_nxv8i16_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv8i16_nxv8i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vwsubu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vwsubu.w_wx_nxv16i16_nxv16i16_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv16i16_nxv16i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vwsubu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vwsubu.w_wx_nxv32i16_nxv32i16_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv32i16_nxv32i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vwsubu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vwsubu.w_wx_nxv1i32_nxv1i32_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv1i32_nxv1i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vwsubu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1047,7 +1047,7 @@ define @intrinsic_vwsubu.w_wx_nxv2i32_nxv2i32_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv2i32_nxv2i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vwsubu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1094,7 +1094,7 @@ define @intrinsic_vwsubu.w_wx_nxv4i32_nxv4i32_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv4i32_nxv4i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vwsubu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1141,7 +1141,7 @@ define @intrinsic_vwsubu.w_wx_nxv8i32_nxv8i32_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv8i32_nxv8i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vwsubu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1188,7 +1188,7 @@ define @intrinsic_vwsubu.w_wx_nxv16i32_nxv16i32_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv16i32_nxv16i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vwsubu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1235,7 +1235,7 @@ define @intrinsic_vwsubu.w_wx_nxv1i64_nxv1i64_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv1i64_nxv1i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vwsubu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1282,7 +1282,7 @@ define @intrinsic_vwsubu.w_wx_nxv2i64_nxv2i64_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv2i64_nxv2i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vwsubu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1329,7 +1329,7 @@ define @intrinsic_vwsubu.w_wx_nxv4i64_nxv4i64_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv4i64_nxv4i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vwsubu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1376,7 +1376,7 @@ define @intrinsic_vwsubu.w_wx_nxv8i64_nxv8i64_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv8i64_nxv8i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vwsubu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1927,7 +1927,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv1i16_nxv1i16_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vwsubu.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1944,7 +1944,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv2i16_nxv2i16_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vwsubu.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1961,7 +1961,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv4i16_nxv4i16_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vwsubu.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1978,7 +1978,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv8i16_nxv8i16_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vwsubu.wv v12, v10, v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -1995,7 +1995,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv16i16_nxv16i16_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vwsubu.wv v16, v12, v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -2012,7 +2012,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv32i16_nxv32i16_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwsubu.wv v24, v16, v8 ; CHECK-NEXT: vmv8r.v v8, v24 ; CHECK-NEXT: ret @@ -2029,7 +2029,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv1i32_nxv1i32_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vwsubu.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2046,7 +2046,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv2i32_nxv2i32_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vwsubu.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2063,7 +2063,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv4i32_nxv4i32_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vwsubu.wv v12, v10, v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -2080,7 +2080,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv8i32_nxv8i32_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vwsubu.wv v16, v12, v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -2097,7 +2097,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv1i64_nxv1i64_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vwsubu.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2114,7 +2114,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv2i64_nxv2i64_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vwsubu.wv v12, v10, v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -2131,7 +2131,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv4i64_nxv4i64_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vwsubu.wv v16, v12, v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -2148,7 +2148,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv8i64_nxv8i64_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vwsubu.wv v24, v16, v8 ; CHECK-NEXT: vmv8r.v v8, v24 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vwsubu.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vwsubu.w_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vwsubu.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vwsubu.w_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vwsubu.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vwsubu.w_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vwsubu.wv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vwsubu.w_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vwsubu.wv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vwsubu.w_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwsubu.wv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -293,7 +293,7 @@ define @intrinsic_vwsubu.w_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vwsubu.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vwsubu.w_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vwsubu.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vwsubu.w_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vwsubu.wv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vwsubu.w_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vwsubu.wv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vwsubu.w_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vwsubu.wv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -529,7 +529,7 @@ define @intrinsic_vwsubu.w_wv_nxv1i64_nxv1i64_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vwsubu.wv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -576,7 +576,7 @@ define @intrinsic_vwsubu.w_wv_nxv2i64_nxv2i64_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vwsubu.wv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vwsubu.w_wv_nxv4i64_nxv4i64_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vwsubu.wv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vwsubu.w_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vwsubu.wv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -718,7 +718,7 @@ define @intrinsic_vwsubu.w_wx_nxv1i16_nxv1i16_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv1i16_nxv1i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vwsubu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -765,7 +765,7 @@ define @intrinsic_vwsubu.w_wx_nxv2i16_nxv2i16_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv2i16_nxv2i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vwsubu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -812,7 +812,7 @@ define @intrinsic_vwsubu.w_wx_nxv4i16_nxv4i16_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv4i16_nxv4i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vwsubu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vwsubu.w_wx_nxv8i16_nxv8i16_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv8i16_nxv8i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vwsubu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vwsubu.w_wx_nxv16i16_nxv16i16_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv16i16_nxv16i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vwsubu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vwsubu.w_wx_nxv32i16_nxv32i16_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv32i16_nxv32i16_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vwsubu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vwsubu.w_wx_nxv1i32_nxv1i32_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv1i32_nxv1i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vwsubu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1047,7 +1047,7 @@ define @intrinsic_vwsubu.w_wx_nxv2i32_nxv2i32_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv2i32_nxv2i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vwsubu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1094,7 +1094,7 @@ define @intrinsic_vwsubu.w_wx_nxv4i32_nxv4i32_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv4i32_nxv4i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vwsubu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1141,7 +1141,7 @@ define @intrinsic_vwsubu.w_wx_nxv8i32_nxv8i32_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv8i32_nxv8i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vwsubu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1188,7 +1188,7 @@ define @intrinsic_vwsubu.w_wx_nxv16i32_nxv16i32_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv16i32_nxv16i32_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vwsubu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1235,7 +1235,7 @@ define @intrinsic_vwsubu.w_wx_nxv1i64_nxv1i64_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv1i64_nxv1i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vwsubu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1282,7 +1282,7 @@ define @intrinsic_vwsubu.w_wx_nxv2i64_nxv2i64_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv2i64_nxv2i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vwsubu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1329,7 +1329,7 @@ define @intrinsic_vwsubu.w_wx_nxv4i64_nxv4i64_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv4i64_nxv4i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vwsubu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1376,7 +1376,7 @@ define @intrinsic_vwsubu.w_wx_nxv8i64_nxv8i64_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv8i64_nxv8i64_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vwsubu.wx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1927,7 +1927,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv1i16_nxv1i16_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv1i16_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vwsubu.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1944,7 +1944,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv2i16_nxv2i16_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv2i16_nxv2i16_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vwsubu.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1961,7 +1961,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv4i16_nxv4i16_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv4i16_nxv4i16_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vwsubu.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -1978,7 +1978,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv8i16_nxv8i16_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv8i16_nxv8i16_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vwsubu.wv v12, v10, v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -1995,7 +1995,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv16i16_nxv16i16_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv16i16_nxv16i16_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vwsubu.wv v16, v12, v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -2012,7 +2012,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv32i16_nxv32i16_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vwsubu.wv v24, v16, v8 ; CHECK-NEXT: vmv8r.v v8, v24 ; CHECK-NEXT: ret @@ -2029,7 +2029,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv1i32_nxv1i32_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv1i32_nxv1i32_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vwsubu.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2046,7 +2046,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv2i32_nxv2i32_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv2i32_nxv2i32_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vwsubu.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2063,7 +2063,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv4i32_nxv4i32_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv4i32_nxv4i32_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vwsubu.wv v12, v10, v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -2080,7 +2080,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv8i32_nxv8i32_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv8i32_nxv8i32_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vwsubu.wv v16, v12, v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -2097,7 +2097,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv1i64_nxv1i64_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv1i64_nxv1i64_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vwsubu.wv v10, v9, v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret @@ -2114,7 +2114,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv2i64_nxv2i64_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv2i64_nxv2i64_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vwsubu.wv v12, v10, v8 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret @@ -2131,7 +2131,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv4i64_nxv4i64_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv4i64_nxv4i64_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vwsubu.wv v16, v12, v8 ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret @@ -2148,7 +2148,7 @@ define @intrinsic_vwsubu.w_wv_untie_nxv8i64_nxv8i64_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vwsubu.wv v24, v16, v8 ; CHECK-NEXT: vmv8r.v v8, v24 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll @@ -10,7 +10,7 @@ define @intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vxor_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vxor_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vxor_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vxor_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vxor_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -292,7 +292,7 @@ define @intrinsic_vxor_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vxor_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vxor_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vxor_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vxor_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vxor_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vxor_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vxor_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vxor_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -717,7 +717,7 @@ define @intrinsic_vxor_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -764,7 +764,7 @@ define @intrinsic_vxor_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -811,7 +811,7 @@ define @intrinsic_vxor_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vxor_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vxor_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vxor_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vxor_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1048,7 +1048,7 @@ define @intrinsic_vxor_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1095,7 +1095,7 @@ define @intrinsic_vxor_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1142,7 +1142,7 @@ define @intrinsic_vxor_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1189,7 +1189,7 @@ define @intrinsic_vxor_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1236,7 +1236,7 @@ define @intrinsic_vxor_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1283,7 +1283,7 @@ define @intrinsic_vxor_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1330,7 +1330,7 @@ define @intrinsic_vxor_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1377,7 +1377,7 @@ define @intrinsic_vxor_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1424,7 +1424,7 @@ define @intrinsic_vxor_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1471,7 +1471,7 @@ define @intrinsic_vxor_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1518,7 +1518,7 @@ define @intrinsic_vxor_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1565,7 +1565,7 @@ define @intrinsic_vxor_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1612,7 +1612,7 @@ define @intrinsic_vxor_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1659,7 +1659,7 @@ define @intrinsic_vxor_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1706,7 +1706,7 @@ define @intrinsic_vxor_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1753,7 +1753,7 @@ define @intrinsic_vxor_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1800,7 +1800,7 @@ define @intrinsic_vxor_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1847,7 +1847,7 @@ define @intrinsic_vxor_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1898,7 +1898,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v9, (a0), zero ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: addi sp, sp, 16 @@ -1957,7 +1957,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vxor.vv v8, v8, v10 ; CHECK-NEXT: addi sp, sp, 16 @@ -2016,7 +2016,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v12, (a0), zero ; CHECK-NEXT: vxor.vv v8, v8, v12 ; CHECK-NEXT: addi sp, sp, 16 @@ -2075,7 +2075,7 @@ ; CHECK-NEXT: sw a1, 12(sp) ; CHECK-NEXT: sw a0, 8(sp) ; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vlse64.v v16, (a0), zero ; CHECK-NEXT: vxor.vv v8, v8, v16 ; CHECK-NEXT: addi sp, sp, 16 @@ -2124,7 +2124,7 @@ define @intrinsic_vxor_vi_nxv1i8_nxv1i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2157,7 +2157,7 @@ define @intrinsic_vxor_vi_nxv2i8_nxv2i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2190,7 +2190,7 @@ define @intrinsic_vxor_vi_nxv4i8_nxv4i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2223,7 +2223,7 @@ define @intrinsic_vxor_vi_nxv8i8_nxv8i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2256,7 +2256,7 @@ define @intrinsic_vxor_vi_nxv16i8_nxv16i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2289,7 +2289,7 @@ define @intrinsic_vxor_vi_nxv32i8_nxv32i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2322,7 +2322,7 @@ define @intrinsic_vxor_vi_nxv64i8_nxv64i8_i8( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2355,7 +2355,7 @@ define @intrinsic_vxor_vi_nxv1i16_nxv1i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2388,7 +2388,7 @@ define @intrinsic_vxor_vi_nxv2i16_nxv2i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2421,7 +2421,7 @@ define @intrinsic_vxor_vi_nxv4i16_nxv4i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2454,7 +2454,7 @@ define @intrinsic_vxor_vi_nxv8i16_nxv8i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2487,7 +2487,7 @@ define @intrinsic_vxor_vi_nxv16i16_nxv16i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2520,7 +2520,7 @@ define @intrinsic_vxor_vi_nxv32i16_nxv32i16_i16( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2553,7 +2553,7 @@ define @intrinsic_vxor_vi_nxv1i32_nxv1i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2586,7 +2586,7 @@ define @intrinsic_vxor_vi_nxv2i32_nxv2i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2619,7 +2619,7 @@ define @intrinsic_vxor_vi_nxv4i32_nxv4i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2652,7 +2652,7 @@ define @intrinsic_vxor_vi_nxv8i32_nxv8i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2685,7 +2685,7 @@ define @intrinsic_vxor_vi_nxv16i32_nxv16i32_i32( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2718,7 +2718,7 @@ define @intrinsic_vxor_vi_nxv1i64_nxv1i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2751,7 +2751,7 @@ define @intrinsic_vxor_vi_nxv2i64_nxv2i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2784,7 +2784,7 @@ define @intrinsic_vxor_vi_nxv4i64_nxv4i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2817,7 +2817,7 @@ define @intrinsic_vxor_vi_nxv8i64_nxv8i64_i64( %0, i32 %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll @@ -10,7 +10,7 @@ define @intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -57,7 +57,7 @@ define @intrinsic_vxor_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv2i8_nxv2i8_nxv2i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -104,7 +104,7 @@ define @intrinsic_vxor_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv4i8_nxv4i8_nxv4i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -151,7 +151,7 @@ define @intrinsic_vxor_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv8i8_nxv8i8_nxv8i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -198,7 +198,7 @@ define @intrinsic_vxor_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv16i8_nxv16i8_nxv16i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -245,7 +245,7 @@ define @intrinsic_vxor_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv32i8_nxv32i8_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -292,7 +292,7 @@ define @intrinsic_vxor_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -340,7 +340,7 @@ define @intrinsic_vxor_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv1i16_nxv1i16_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -387,7 +387,7 @@ define @intrinsic_vxor_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv2i16_nxv2i16_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -434,7 +434,7 @@ define @intrinsic_vxor_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv4i16_nxv4i16_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -481,7 +481,7 @@ define @intrinsic_vxor_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv8i16_nxv8i16_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -528,7 +528,7 @@ define @intrinsic_vxor_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv16i16_nxv16i16_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -575,7 +575,7 @@ define @intrinsic_vxor_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -623,7 +623,7 @@ define @intrinsic_vxor_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv1i32_nxv1i32_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -670,7 +670,7 @@ define @intrinsic_vxor_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv2i32_nxv2i32_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -717,7 +717,7 @@ define @intrinsic_vxor_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv4i32_nxv4i32_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -764,7 +764,7 @@ define @intrinsic_vxor_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv8i32_nxv8i32_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -811,7 +811,7 @@ define @intrinsic_vxor_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -859,7 +859,7 @@ define @intrinsic_vxor_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv1i64_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -906,7 +906,7 @@ define @intrinsic_vxor_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv2i64_nxv2i64_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v10 ; CHECK-NEXT: ret entry: @@ -953,7 +953,7 @@ define @intrinsic_vxor_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv4i64_nxv4i64_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v12 ; CHECK-NEXT: ret entry: @@ -1000,7 +1000,7 @@ define @intrinsic_vxor_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v16 ; CHECK-NEXT: ret entry: @@ -1048,7 +1048,7 @@ define @intrinsic_vxor_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1095,7 +1095,7 @@ define @intrinsic_vxor_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1142,7 +1142,7 @@ define @intrinsic_vxor_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1189,7 +1189,7 @@ define @intrinsic_vxor_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1236,7 +1236,7 @@ define @intrinsic_vxor_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1283,7 +1283,7 @@ define @intrinsic_vxor_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1330,7 +1330,7 @@ define @intrinsic_vxor_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1377,7 +1377,7 @@ define @intrinsic_vxor_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1424,7 +1424,7 @@ define @intrinsic_vxor_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1471,7 +1471,7 @@ define @intrinsic_vxor_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1518,7 +1518,7 @@ define @intrinsic_vxor_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1565,7 +1565,7 @@ define @intrinsic_vxor_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1612,7 +1612,7 @@ define @intrinsic_vxor_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1659,7 +1659,7 @@ define @intrinsic_vxor_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1706,7 +1706,7 @@ define @intrinsic_vxor_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1753,7 +1753,7 @@ define @intrinsic_vxor_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1800,7 +1800,7 @@ define @intrinsic_vxor_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1847,7 +1847,7 @@ define @intrinsic_vxor_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1894,7 +1894,7 @@ define @intrinsic_vxor_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1941,7 +1941,7 @@ define @intrinsic_vxor_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -1988,7 +1988,7 @@ define @intrinsic_vxor_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -2035,7 +2035,7 @@ define @intrinsic_vxor_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { ; CHECK-LABEL: intrinsic_vxor_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret entry: @@ -2076,7 +2076,7 @@ define @intrinsic_vxor_vi_nxv1i8_nxv1i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2109,7 +2109,7 @@ define @intrinsic_vxor_vi_nxv2i8_nxv2i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv2i8_nxv2i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2142,7 +2142,7 @@ define @intrinsic_vxor_vi_nxv4i8_nxv4i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv4i8_nxv4i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2175,7 +2175,7 @@ define @intrinsic_vxor_vi_nxv8i8_nxv8i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv8i8_nxv8i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2208,7 +2208,7 @@ define @intrinsic_vxor_vi_nxv16i8_nxv16i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv16i8_nxv16i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2241,7 +2241,7 @@ define @intrinsic_vxor_vi_nxv32i8_nxv32i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv32i8_nxv32i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2274,7 +2274,7 @@ define @intrinsic_vxor_vi_nxv64i8_nxv64i8_i8( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv64i8_nxv64i8_i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2307,7 +2307,7 @@ define @intrinsic_vxor_vi_nxv1i16_nxv1i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv1i16_nxv1i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2340,7 +2340,7 @@ define @intrinsic_vxor_vi_nxv2i16_nxv2i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv2i16_nxv2i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2373,7 +2373,7 @@ define @intrinsic_vxor_vi_nxv4i16_nxv4i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv4i16_nxv4i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2406,7 +2406,7 @@ define @intrinsic_vxor_vi_nxv8i16_nxv8i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv8i16_nxv8i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2439,7 +2439,7 @@ define @intrinsic_vxor_vi_nxv16i16_nxv16i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv16i16_nxv16i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2472,7 +2472,7 @@ define @intrinsic_vxor_vi_nxv32i16_nxv32i16_i16( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv32i16_nxv32i16_i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2505,7 +2505,7 @@ define @intrinsic_vxor_vi_nxv1i32_nxv1i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv1i32_nxv1i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2538,7 +2538,7 @@ define @intrinsic_vxor_vi_nxv2i32_nxv2i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv2i32_nxv2i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2571,7 +2571,7 @@ define @intrinsic_vxor_vi_nxv4i32_nxv4i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv4i32_nxv4i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2604,7 +2604,7 @@ define @intrinsic_vxor_vi_nxv8i32_nxv8i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv8i32_nxv8i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2637,7 +2637,7 @@ define @intrinsic_vxor_vi_nxv16i32_nxv16i32_i32( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv16i32_nxv16i32_i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2670,7 +2670,7 @@ define @intrinsic_vxor_vi_nxv1i64_nxv1i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2703,7 +2703,7 @@ define @intrinsic_vxor_vi_nxv2i64_nxv2i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2736,7 +2736,7 @@ define @intrinsic_vxor_vi_nxv4i64_nxv4i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: @@ -2769,7 +2769,7 @@ define @intrinsic_vxor_vi_nxv8i64_nxv8i64_i64( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vxor_vi_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 9 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vxor-sdnode.ll @@ -5,7 +5,7 @@ define @vxor_vv_nxv1i8( %va, %vb) { ; CHECK-LABEL: vxor_vv_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = xor %va, %vb @@ -15,7 +15,7 @@ define @vxor_vx_nxv1i8( %va, i8 signext %b) { ; CHECK-LABEL: vxor_vx_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -27,7 +27,7 @@ define @vxor_vi_nxv1i8_0( %va) { ; CHECK-LABEL: vxor_vi_nxv1i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i8 -1, i32 0 @@ -39,7 +39,7 @@ define @vxor_vi_nxv1i8_1( %va) { ; CHECK-LABEL: vxor_vi_nxv1i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 8 ; CHECK-NEXT: ret %head = insertelement poison, i8 8, i32 0 @@ -52,7 +52,7 @@ ; CHECK-LABEL: vxor_vi_nxv1i8_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 16, i32 0 @@ -64,7 +64,7 @@ define @vxor_vv_nxv2i8( %va, %vb) { ; CHECK-LABEL: vxor_vv_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = xor %va, %vb @@ -74,7 +74,7 @@ define @vxor_vx_nxv2i8( %va, i8 signext %b) { ; CHECK-LABEL: vxor_vx_nxv2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -86,7 +86,7 @@ define @vxor_vi_nxv2i8_0( %va) { ; CHECK-LABEL: vxor_vi_nxv2i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i8 -1, i32 0 @@ -98,7 +98,7 @@ define @vxor_vi_nxv2i8_1( %va) { ; CHECK-LABEL: vxor_vi_nxv2i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 8 ; CHECK-NEXT: ret %head = insertelement poison, i8 8, i32 0 @@ -111,7 +111,7 @@ ; CHECK-LABEL: vxor_vi_nxv2i8_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 16, i32 0 @@ -123,7 +123,7 @@ define @vxor_vv_nxv4i8( %va, %vb) { ; CHECK-LABEL: vxor_vv_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = xor %va, %vb @@ -133,7 +133,7 @@ define @vxor_vx_nxv4i8( %va, i8 signext %b) { ; CHECK-LABEL: vxor_vx_nxv4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -145,7 +145,7 @@ define @vxor_vi_nxv4i8_0( %va) { ; CHECK-LABEL: vxor_vi_nxv4i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i8 -1, i32 0 @@ -157,7 +157,7 @@ define @vxor_vi_nxv4i8_1( %va) { ; CHECK-LABEL: vxor_vi_nxv4i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 8 ; CHECK-NEXT: ret %head = insertelement poison, i8 8, i32 0 @@ -170,7 +170,7 @@ ; CHECK-LABEL: vxor_vi_nxv4i8_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 16, i32 0 @@ -182,7 +182,7 @@ define @vxor_vv_nxv8i8( %va, %vb) { ; CHECK-LABEL: vxor_vv_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = xor %va, %vb @@ -192,7 +192,7 @@ define @vxor_vx_nxv8i8( %va, i8 signext %b) { ; CHECK-LABEL: vxor_vx_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -204,7 +204,7 @@ define @vxor_vi_nxv8i8_0( %va) { ; CHECK-LABEL: vxor_vi_nxv8i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i8 -1, i32 0 @@ -216,7 +216,7 @@ define @vxor_vi_nxv8i8_1( %va) { ; CHECK-LABEL: vxor_vi_nxv8i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 8 ; CHECK-NEXT: ret %head = insertelement poison, i8 8, i32 0 @@ -229,7 +229,7 @@ ; CHECK-LABEL: vxor_vi_nxv8i8_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 16, i32 0 @@ -241,7 +241,7 @@ define @vxor_vv_nxv16i8( %va, %vb) { ; CHECK-LABEL: vxor_vv_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = xor %va, %vb @@ -251,7 +251,7 @@ define @vxor_vx_nxv16i8( %va, i8 signext %b) { ; CHECK-LABEL: vxor_vx_nxv16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -263,7 +263,7 @@ define @vxor_vi_nxv16i8_0( %va) { ; CHECK-LABEL: vxor_vi_nxv16i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i8 -1, i32 0 @@ -275,7 +275,7 @@ define @vxor_vi_nxv16i8_1( %va) { ; CHECK-LABEL: vxor_vi_nxv16i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 8 ; CHECK-NEXT: ret %head = insertelement poison, i8 8, i32 0 @@ -288,7 +288,7 @@ ; CHECK-LABEL: vxor_vi_nxv16i8_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 16, i32 0 @@ -300,7 +300,7 @@ define @vxor_vv_nxv32i8( %va, %vb) { ; CHECK-LABEL: vxor_vv_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = xor %va, %vb @@ -310,7 +310,7 @@ define @vxor_vx_nxv32i8( %va, i8 signext %b) { ; CHECK-LABEL: vxor_vx_nxv32i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -322,7 +322,7 @@ define @vxor_vi_nxv32i8_0( %va) { ; CHECK-LABEL: vxor_vi_nxv32i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i8 -1, i32 0 @@ -334,7 +334,7 @@ define @vxor_vi_nxv32i8_1( %va) { ; CHECK-LABEL: vxor_vi_nxv32i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 8 ; CHECK-NEXT: ret %head = insertelement poison, i8 8, i32 0 @@ -347,7 +347,7 @@ ; CHECK-LABEL: vxor_vi_nxv32i8_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 16, i32 0 @@ -359,7 +359,7 @@ define @vxor_vv_nxv64i8( %va, %vb) { ; CHECK-LABEL: vxor_vv_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = xor %va, %vb @@ -369,7 +369,7 @@ define @vxor_vx_nxv64i8( %va, i8 signext %b) { ; CHECK-LABEL: vxor_vx_nxv64i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 %b, i32 0 @@ -381,7 +381,7 @@ define @vxor_vi_nxv64i8_0( %va) { ; CHECK-LABEL: vxor_vi_nxv64i8_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i8 -1, i32 0 @@ -393,7 +393,7 @@ define @vxor_vi_nxv64i8_1( %va) { ; CHECK-LABEL: vxor_vi_nxv64i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 8 ; CHECK-NEXT: ret %head = insertelement poison, i8 8, i32 0 @@ -406,7 +406,7 @@ ; CHECK-LABEL: vxor_vi_nxv64i8_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i8 16, i32 0 @@ -418,7 +418,7 @@ define @vxor_vv_nxv1i16( %va, %vb) { ; CHECK-LABEL: vxor_vv_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = xor %va, %vb @@ -428,7 +428,7 @@ define @vxor_vx_nxv1i16( %va, i16 signext %b) { ; CHECK-LABEL: vxor_vx_nxv1i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -440,7 +440,7 @@ define @vxor_vi_nxv1i16_0( %va) { ; CHECK-LABEL: vxor_vi_nxv1i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i16 -1, i32 0 @@ -452,7 +452,7 @@ define @vxor_vi_nxv1i16_1( %va) { ; CHECK-LABEL: vxor_vi_nxv1i16_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 8 ; CHECK-NEXT: ret %head = insertelement poison, i16 8, i32 0 @@ -465,7 +465,7 @@ ; CHECK-LABEL: vxor_vi_nxv1i16_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 16, i32 0 @@ -477,7 +477,7 @@ define @vxor_vv_nxv2i16( %va, %vb) { ; CHECK-LABEL: vxor_vv_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = xor %va, %vb @@ -487,7 +487,7 @@ define @vxor_vx_nxv2i16( %va, i16 signext %b) { ; CHECK-LABEL: vxor_vx_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -499,7 +499,7 @@ define @vxor_vi_nxv2i16_0( %va) { ; CHECK-LABEL: vxor_vi_nxv2i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i16 -1, i32 0 @@ -511,7 +511,7 @@ define @vxor_vi_nxv2i16_1( %va) { ; CHECK-LABEL: vxor_vi_nxv2i16_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 8 ; CHECK-NEXT: ret %head = insertelement poison, i16 8, i32 0 @@ -524,7 +524,7 @@ ; CHECK-LABEL: vxor_vi_nxv2i16_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 16, i32 0 @@ -536,7 +536,7 @@ define @vxor_vv_nxv4i16( %va, %vb) { ; CHECK-LABEL: vxor_vv_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = xor %va, %vb @@ -546,7 +546,7 @@ define @vxor_vx_nxv4i16( %va, i16 signext %b) { ; CHECK-LABEL: vxor_vx_nxv4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -558,7 +558,7 @@ define @vxor_vi_nxv4i16_0( %va) { ; CHECK-LABEL: vxor_vi_nxv4i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i16 -1, i32 0 @@ -570,7 +570,7 @@ define @vxor_vi_nxv4i16_1( %va) { ; CHECK-LABEL: vxor_vi_nxv4i16_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 8 ; CHECK-NEXT: ret %head = insertelement poison, i16 8, i32 0 @@ -583,7 +583,7 @@ ; CHECK-LABEL: vxor_vi_nxv4i16_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 16, i32 0 @@ -595,7 +595,7 @@ define @vxor_vv_nxv8i16( %va, %vb) { ; CHECK-LABEL: vxor_vv_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = xor %va, %vb @@ -605,7 +605,7 @@ define @vxor_vx_nxv8i16( %va, i16 signext %b) { ; CHECK-LABEL: vxor_vx_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -617,7 +617,7 @@ define @vxor_vi_nxv8i16_0( %va) { ; CHECK-LABEL: vxor_vi_nxv8i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i16 -1, i32 0 @@ -629,7 +629,7 @@ define @vxor_vi_nxv8i16_1( %va) { ; CHECK-LABEL: vxor_vi_nxv8i16_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 8 ; CHECK-NEXT: ret %head = insertelement poison, i16 8, i32 0 @@ -642,7 +642,7 @@ ; CHECK-LABEL: vxor_vi_nxv8i16_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 16, i32 0 @@ -654,7 +654,7 @@ define @vxor_vv_nxv16i16( %va, %vb) { ; CHECK-LABEL: vxor_vv_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = xor %va, %vb @@ -664,7 +664,7 @@ define @vxor_vx_nxv16i16( %va, i16 signext %b) { ; CHECK-LABEL: vxor_vx_nxv16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -676,7 +676,7 @@ define @vxor_vi_nxv16i16_0( %va) { ; CHECK-LABEL: vxor_vi_nxv16i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i16 -1, i32 0 @@ -688,7 +688,7 @@ define @vxor_vi_nxv16i16_1( %va) { ; CHECK-LABEL: vxor_vi_nxv16i16_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 8 ; CHECK-NEXT: ret %head = insertelement poison, i16 8, i32 0 @@ -701,7 +701,7 @@ ; CHECK-LABEL: vxor_vi_nxv16i16_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 16, i32 0 @@ -713,7 +713,7 @@ define @vxor_vv_nxv32i16( %va, %vb) { ; CHECK-LABEL: vxor_vv_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = xor %va, %vb @@ -723,7 +723,7 @@ define @vxor_vx_nxv32i16( %va, i16 signext %b) { ; CHECK-LABEL: vxor_vx_nxv32i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 %b, i32 0 @@ -735,7 +735,7 @@ define @vxor_vi_nxv32i16_0( %va) { ; CHECK-LABEL: vxor_vi_nxv32i16_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i16 -1, i32 0 @@ -747,7 +747,7 @@ define @vxor_vi_nxv32i16_1( %va) { ; CHECK-LABEL: vxor_vi_nxv32i16_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 8 ; CHECK-NEXT: ret %head = insertelement poison, i16 8, i32 0 @@ -760,7 +760,7 @@ ; CHECK-LABEL: vxor_vi_nxv32i16_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i16 16, i32 0 @@ -772,7 +772,7 @@ define @vxor_vv_nxv1i32( %va, %vb) { ; CHECK-LABEL: vxor_vv_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = xor %va, %vb @@ -782,7 +782,7 @@ define @vxor_vx_nxv1i32( %va, i32 signext %b) { ; CHECK-LABEL: vxor_vx_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -794,7 +794,7 @@ define @vxor_vi_nxv1i32_0( %va) { ; CHECK-LABEL: vxor_vi_nxv1i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i32 -1, i32 0 @@ -806,7 +806,7 @@ define @vxor_vi_nxv1i32_1( %va) { ; CHECK-LABEL: vxor_vi_nxv1i32_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 8 ; CHECK-NEXT: ret %head = insertelement poison, i32 8, i32 0 @@ -819,7 +819,7 @@ ; CHECK-LABEL: vxor_vi_nxv1i32_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 16, i32 0 @@ -831,7 +831,7 @@ define @vxor_vv_nxv2i32( %va, %vb) { ; CHECK-LABEL: vxor_vv_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = xor %va, %vb @@ -841,7 +841,7 @@ define @vxor_vx_nxv2i32( %va, i32 signext %b) { ; CHECK-LABEL: vxor_vx_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -853,7 +853,7 @@ define @vxor_vi_nxv2i32_0( %va) { ; CHECK-LABEL: vxor_vi_nxv2i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i32 -1, i32 0 @@ -865,7 +865,7 @@ define @vxor_vi_nxv2i32_1( %va) { ; CHECK-LABEL: vxor_vi_nxv2i32_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 8 ; CHECK-NEXT: ret %head = insertelement poison, i32 8, i32 0 @@ -878,7 +878,7 @@ ; CHECK-LABEL: vxor_vi_nxv2i32_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 16, i32 0 @@ -890,7 +890,7 @@ define @vxor_vv_nxv4i32( %va, %vb) { ; CHECK-LABEL: vxor_vv_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = xor %va, %vb @@ -900,7 +900,7 @@ define @vxor_vx_nxv4i32( %va, i32 signext %b) { ; CHECK-LABEL: vxor_vx_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -912,7 +912,7 @@ define @vxor_vi_nxv4i32_0( %va) { ; CHECK-LABEL: vxor_vi_nxv4i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i32 -1, i32 0 @@ -924,7 +924,7 @@ define @vxor_vi_nxv4i32_1( %va) { ; CHECK-LABEL: vxor_vi_nxv4i32_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 8 ; CHECK-NEXT: ret %head = insertelement poison, i32 8, i32 0 @@ -937,7 +937,7 @@ ; CHECK-LABEL: vxor_vi_nxv4i32_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 16, i32 0 @@ -949,7 +949,7 @@ define @vxor_vv_nxv8i32( %va, %vb) { ; CHECK-LABEL: vxor_vv_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = xor %va, %vb @@ -959,7 +959,7 @@ define @vxor_vx_nxv8i32( %va, i32 signext %b) { ; CHECK-LABEL: vxor_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -971,7 +971,7 @@ define @vxor_vi_nxv8i32_0( %va) { ; CHECK-LABEL: vxor_vi_nxv8i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i32 -1, i32 0 @@ -983,7 +983,7 @@ define @vxor_vi_nxv8i32_1( %va) { ; CHECK-LABEL: vxor_vi_nxv8i32_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 8 ; CHECK-NEXT: ret %head = insertelement poison, i32 8, i32 0 @@ -996,7 +996,7 @@ ; CHECK-LABEL: vxor_vi_nxv8i32_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 16, i32 0 @@ -1008,7 +1008,7 @@ define @vxor_vv_nxv16i32( %va, %vb) { ; CHECK-LABEL: vxor_vv_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = xor %va, %vb @@ -1018,7 +1018,7 @@ define @vxor_vx_nxv16i32( %va, i32 signext %b) { ; CHECK-LABEL: vxor_vx_nxv16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 %b, i32 0 @@ -1030,7 +1030,7 @@ define @vxor_vi_nxv16i32_0( %va) { ; CHECK-LABEL: vxor_vi_nxv16i32_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i32 -1, i32 0 @@ -1042,7 +1042,7 @@ define @vxor_vi_nxv16i32_1( %va) { ; CHECK-LABEL: vxor_vi_nxv16i32_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 8 ; CHECK-NEXT: ret %head = insertelement poison, i32 8, i32 0 @@ -1055,7 +1055,7 @@ ; CHECK-LABEL: vxor_vi_nxv16i32_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i32 16, i32 0 @@ -1067,7 +1067,7 @@ define @vxor_vv_nxv1i64( %va, %vb) { ; CHECK-LABEL: vxor_vv_nxv1i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret %vc = xor %va, %vb @@ -1082,7 +1082,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vxor.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 @@ -1090,7 +1090,7 @@ ; ; RV64-LABEL: vxor_vx_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV64-NEXT: vxor.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -1102,7 +1102,7 @@ define @vxor_vi_nxv1i64_0( %va) { ; CHECK-LABEL: vxor_vi_nxv1i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i64 -1, i32 0 @@ -1114,7 +1114,7 @@ define @vxor_vi_nxv1i64_1( %va) { ; CHECK-LABEL: vxor_vi_nxv1i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 8 ; CHECK-NEXT: ret %head = insertelement poison, i64 8, i32 0 @@ -1127,7 +1127,7 @@ ; CHECK-LABEL: vxor_vi_nxv1i64_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 16, i32 0 @@ -1139,7 +1139,7 @@ define @vxor_vv_nxv2i64( %va, %vb) { ; CHECK-LABEL: vxor_vv_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v10 ; CHECK-NEXT: ret %vc = xor %va, %vb @@ -1154,7 +1154,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vxor.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 @@ -1162,7 +1162,7 @@ ; ; RV64-LABEL: vxor_vx_nxv2i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV64-NEXT: vxor.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -1174,7 +1174,7 @@ define @vxor_vi_nxv2i64_0( %va) { ; CHECK-LABEL: vxor_vi_nxv2i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i64 -1, i32 0 @@ -1186,7 +1186,7 @@ define @vxor_vi_nxv2i64_1( %va) { ; CHECK-LABEL: vxor_vi_nxv2i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 8 ; CHECK-NEXT: ret %head = insertelement poison, i64 8, i32 0 @@ -1199,7 +1199,7 @@ ; CHECK-LABEL: vxor_vi_nxv2i64_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 16, i32 0 @@ -1211,7 +1211,7 @@ define @vxor_vv_nxv4i64( %va, %vb) { ; CHECK-LABEL: vxor_vv_nxv4i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v12 ; CHECK-NEXT: ret %vc = xor %va, %vb @@ -1226,7 +1226,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vxor.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 @@ -1234,7 +1234,7 @@ ; ; RV64-LABEL: vxor_vx_nxv4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV64-NEXT: vxor.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -1246,7 +1246,7 @@ define @vxor_vi_nxv4i64_0( %va) { ; CHECK-LABEL: vxor_vi_nxv4i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i64 -1, i32 0 @@ -1258,7 +1258,7 @@ define @vxor_vi_nxv4i64_1( %va) { ; CHECK-LABEL: vxor_vi_nxv4i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 8 ; CHECK-NEXT: ret %head = insertelement poison, i64 8, i32 0 @@ -1271,7 +1271,7 @@ ; CHECK-LABEL: vxor_vi_nxv4i64_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 16, i32 0 @@ -1283,7 +1283,7 @@ define @vxor_vv_nxv8i64( %va, %vb) { ; CHECK-LABEL: vxor_vv_nxv8i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v16 ; CHECK-NEXT: ret %vc = xor %va, %vb @@ -1298,7 +1298,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vxor.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 @@ -1306,7 +1306,7 @@ ; ; RV64-LABEL: vxor_vx_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vxor.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement poison, i64 %b, i32 0 @@ -1318,7 +1318,7 @@ define @vxor_vi_nxv8i64_0( %va) { ; CHECK-LABEL: vxor_vi_nxv8i64_0: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %head = insertelement poison, i64 -1, i32 0 @@ -1330,7 +1330,7 @@ define @vxor_vi_nxv8i64_1( %va) { ; CHECK-LABEL: vxor_vi_nxv8i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 8 ; CHECK-NEXT: ret %head = insertelement poison, i64 8, i32 0 @@ -1343,7 +1343,7 @@ ; CHECK-LABEL: vxor_vi_nxv8i64_2: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 16 -; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %head = insertelement poison, i64 16, i32 0 @@ -1359,7 +1359,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v8, (a0), zero ; RV32-NEXT: sw a3, 12(sp) ; RV32-NEXT: sw a2, 8(sp) @@ -1371,7 +1371,7 @@ ; RV64-LABEL: vxor_xx_nxv8i64: ; RV64: # %bb.0: ; RV64-NEXT: xor a0, a0, a1 -; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV64-NEXT: vmv.v.x v8, a0 ; RV64-NEXT: ret %head1 = insertelement poison, i64 %a, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll @@ -33,7 +33,7 @@ define @vxor_vv_nxv1i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv1i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -57,7 +57,7 @@ define @vxor_vx_nxv1i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_nxv1i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -83,7 +83,7 @@ define @vxor_vi_nxv1i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv1i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 7, i32 0 @@ -109,7 +109,7 @@ define @vxor_vi_nxv1i8_unmasked_1( %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv1i8_unmasked_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 @@ -135,7 +135,7 @@ define @vxor_vv_nxv2i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -159,7 +159,7 @@ define @vxor_vx_nxv2i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_nxv2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -185,7 +185,7 @@ define @vxor_vi_nxv2i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv2i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 7, i32 0 @@ -211,7 +211,7 @@ define @vxor_vi_nxv2i8_unmasked_1( %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv2i8_unmasked_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 @@ -237,7 +237,7 @@ define @vxor_vv_nxv4i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -261,7 +261,7 @@ define @vxor_vx_nxv4i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_nxv4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -287,7 +287,7 @@ define @vxor_vi_nxv4i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv4i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 7, i32 0 @@ -313,7 +313,7 @@ define @vxor_vi_nxv4i8_unmasked_1( %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv4i8_unmasked_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 @@ -339,7 +339,7 @@ define @vxor_vv_nxv8i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -363,7 +363,7 @@ define @vxor_vx_nxv8i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_nxv8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -389,7 +389,7 @@ define @vxor_vi_nxv8i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv8i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 7, i32 0 @@ -415,7 +415,7 @@ define @vxor_vi_nxv8i8_unmasked_1( %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv8i8_unmasked_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 @@ -441,7 +441,7 @@ define @vxor_vv_nxv15i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv15i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -465,7 +465,7 @@ define @vxor_vx_nxv15i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_nxv15i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -491,7 +491,7 @@ define @vxor_vi_nxv15i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv15i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 7, i32 0 @@ -517,7 +517,7 @@ define @vxor_vi_nxv15i8_unmasked_1( %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv15i8_unmasked_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 @@ -543,7 +543,7 @@ define @vxor_vv_nxv16i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -567,7 +567,7 @@ define @vxor_vx_nxv16i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_nxv16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -593,7 +593,7 @@ define @vxor_vi_nxv16i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv16i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 7, i32 0 @@ -619,7 +619,7 @@ define @vxor_vi_nxv16i8_unmasked_1( %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv16i8_unmasked_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 @@ -645,7 +645,7 @@ define @vxor_vv_nxv32i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv32i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -669,7 +669,7 @@ define @vxor_vx_nxv32i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_nxv32i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -695,7 +695,7 @@ define @vxor_vi_nxv32i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv32i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 7, i32 0 @@ -721,7 +721,7 @@ define @vxor_vi_nxv32i8_unmasked_1( %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv32i8_unmasked_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 @@ -747,7 +747,7 @@ define @vxor_vv_nxv64i8_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv64i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -771,7 +771,7 @@ define @vxor_vx_nxv64i8_unmasked( %va, i8 %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_nxv64i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -797,7 +797,7 @@ define @vxor_vi_nxv64i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv64i8_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 7, i32 0 @@ -823,7 +823,7 @@ define @vxor_vi_nxv64i8_unmasked_1( %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv64i8_unmasked_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 -1, i32 0 @@ -849,7 +849,7 @@ define @vxor_vv_nxv1i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv1i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -885,7 +885,7 @@ define @vxor_vx_nxv1i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_nxv1i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -911,7 +911,7 @@ define @vxor_vi_nxv1i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv1i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 7, i32 0 @@ -937,7 +937,7 @@ define @vxor_vi_nxv1i16_unmasked_1( %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv1i16_unmasked_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 -1, i32 0 @@ -963,7 +963,7 @@ define @vxor_vv_nxv2i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -987,7 +987,7 @@ define @vxor_vx_nxv2i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_nxv2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -1013,7 +1013,7 @@ define @vxor_vi_nxv2i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 7, i32 0 @@ -1039,7 +1039,7 @@ define @vxor_vi_nxv2i16_unmasked_1( %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv2i16_unmasked_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 -1, i32 0 @@ -1065,7 +1065,7 @@ define @vxor_vv_nxv4i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1089,7 +1089,7 @@ define @vxor_vx_nxv4i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_nxv4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -1115,7 +1115,7 @@ define @vxor_vi_nxv4i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv4i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 7, i32 0 @@ -1141,7 +1141,7 @@ define @vxor_vi_nxv4i16_unmasked_1( %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv4i16_unmasked_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 -1, i32 0 @@ -1167,7 +1167,7 @@ define @vxor_vv_nxv8i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1191,7 +1191,7 @@ define @vxor_vx_nxv8i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_nxv8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -1217,7 +1217,7 @@ define @vxor_vi_nxv8i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv8i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 7, i32 0 @@ -1243,7 +1243,7 @@ define @vxor_vi_nxv8i16_unmasked_1( %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv8i16_unmasked_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 -1, i32 0 @@ -1269,7 +1269,7 @@ define @vxor_vv_nxv16i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1293,7 +1293,7 @@ define @vxor_vx_nxv16i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_nxv16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -1319,7 +1319,7 @@ define @vxor_vi_nxv16i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv16i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 7, i32 0 @@ -1345,7 +1345,7 @@ define @vxor_vi_nxv16i16_unmasked_1( %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv16i16_unmasked_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 -1, i32 0 @@ -1371,7 +1371,7 @@ define @vxor_vv_nxv32i16_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv32i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1395,7 +1395,7 @@ define @vxor_vx_nxv32i16_unmasked( %va, i16 %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_nxv32i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 %b, i32 0 @@ -1421,7 +1421,7 @@ define @vxor_vi_nxv32i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv32i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 7, i32 0 @@ -1447,7 +1447,7 @@ define @vxor_vi_nxv32i16_unmasked_1( %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv32i16_unmasked_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i16 -1, i32 0 @@ -1473,7 +1473,7 @@ define @vxor_vv_nxv1i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv1i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1497,7 +1497,7 @@ define @vxor_vx_nxv1i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_nxv1i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1523,7 +1523,7 @@ define @vxor_vi_nxv1i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv1i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 7, i32 0 @@ -1549,7 +1549,7 @@ define @vxor_vi_nxv1i32_unmasked_1( %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv1i32_unmasked_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 -1, i32 0 @@ -1575,7 +1575,7 @@ define @vxor_vv_nxv2i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1599,7 +1599,7 @@ define @vxor_vx_nxv2i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1625,7 +1625,7 @@ define @vxor_vi_nxv2i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 7, i32 0 @@ -1651,7 +1651,7 @@ define @vxor_vi_nxv2i32_unmasked_1( %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv2i32_unmasked_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 -1, i32 0 @@ -1677,7 +1677,7 @@ define @vxor_vv_nxv4i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1701,7 +1701,7 @@ define @vxor_vx_nxv4i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_nxv4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1727,7 +1727,7 @@ define @vxor_vi_nxv4i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv4i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 7, i32 0 @@ -1753,7 +1753,7 @@ define @vxor_vi_nxv4i32_unmasked_1( %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv4i32_unmasked_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 -1, i32 0 @@ -1779,7 +1779,7 @@ define @vxor_vv_nxv8i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1803,7 +1803,7 @@ define @vxor_vx_nxv8i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_nxv8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1829,7 +1829,7 @@ define @vxor_vi_nxv8i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv8i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 7, i32 0 @@ -1855,7 +1855,7 @@ define @vxor_vi_nxv8i32_unmasked_1( %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv8i32_unmasked_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 -1, i32 0 @@ -1881,7 +1881,7 @@ define @vxor_vv_nxv16i32_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -1905,7 +1905,7 @@ define @vxor_vx_nxv16i32_unmasked( %va, i32 %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vx_nxv16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vxor.vx v8, v8, a0 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1931,7 +1931,7 @@ define @vxor_vi_nxv16i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv16i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 7, i32 0 @@ -1957,7 +1957,7 @@ define @vxor_vi_nxv16i32_unmasked_1( %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv16i32_unmasked_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 -1, i32 0 @@ -1983,7 +1983,7 @@ define @vxor_vv_nxv1i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv1i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -2000,7 +2000,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; RV32-NEXT: vxor.vv v8, v8, v9, v0.t @@ -2026,16 +2026,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; RV32-NEXT: vxor.vv v8, v8, v9 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vxor_vx_nxv1i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; RV64-NEXT: vxor.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -2061,7 +2061,7 @@ define @vxor_vi_nxv1i64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv1i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 7, i32 0 @@ -2087,7 +2087,7 @@ define @vxor_vi_nxv1i64_unmasked_1( %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv1i64_unmasked_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 -1, i32 0 @@ -2113,7 +2113,7 @@ define @vxor_vv_nxv2i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v10 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -2130,7 +2130,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; RV32-NEXT: vxor.vv v8, v8, v10, v0.t @@ -2156,16 +2156,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; RV32-NEXT: vxor.vv v8, v8, v10 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vxor_vx_nxv2i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; RV64-NEXT: vxor.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -2191,7 +2191,7 @@ define @vxor_vi_nxv2i64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 7, i32 0 @@ -2217,7 +2217,7 @@ define @vxor_vi_nxv2i64_unmasked_1( %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv2i64_unmasked_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 -1, i32 0 @@ -2243,7 +2243,7 @@ define @vxor_vv_nxv4i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv4i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v12 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -2260,7 +2260,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; RV32-NEXT: vxor.vv v8, v8, v12, v0.t @@ -2286,16 +2286,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; RV32-NEXT: vxor.vv v8, v8, v12 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vxor_vx_nxv4i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma ; RV64-NEXT: vxor.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -2321,7 +2321,7 @@ define @vxor_vi_nxv4i64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv4i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 7, i32 0 @@ -2347,7 +2347,7 @@ define @vxor_vi_nxv4i64_unmasked_1( %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv4i64_unmasked_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 -1, i32 0 @@ -2373,7 +2373,7 @@ define @vxor_vv_nxv8i64_unmasked( %va, %b, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vv_nxv8i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vxor.vv v8, v8, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 @@ -2390,7 +2390,7 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; RV32-NEXT: vxor.vv v8, v8, v16, v0.t @@ -2416,16 +2416,16 @@ ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a0, 8(sp) ; RV32-NEXT: addi a0, sp, 8 -; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a0), zero -; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; RV32-NEXT: vxor.vv v8, v8, v16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: ret ; ; RV64-LABEL: vxor_vx_nxv8i64_unmasked: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vxor.vx v8, v8, a0 ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -2451,7 +2451,7 @@ define @vxor_vi_nxv8i64_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv8i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vxor.vi v8, v8, 7 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 7, i32 0 @@ -2477,7 +2477,7 @@ define @vxor_vi_nxv8i64_unmasked_1( %va, i32 zeroext %evl) { ; CHECK-LABEL: vxor_vi_nxv8i64_unmasked_1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vnot.v v8, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, i64 -1, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vzext-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vzext-vp-mask.ll --- a/llvm/test/CodeGen/RISCV/rvv/vzext-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vzext-vp-mask.ll @@ -7,7 +7,7 @@ define @vzext_nxv2i1_nxv2i16( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vzext_nxv2i1_nxv2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -18,7 +18,7 @@ define @vzext_nxv2i1_nxv2i16_unmasked( %a, i32 zeroext %vl) { ; CHECK-LABEL: vzext_nxv2i1_nxv2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -31,7 +31,7 @@ define @vzext_nxv2i1_nxv2i32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vzext_nxv2i1_nxv2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -42,7 +42,7 @@ define @vzext_nxv2i1_nxv2i32_unmasked( %a, i32 zeroext %vl) { ; CHECK-LABEL: vzext_nxv2i1_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -55,7 +55,7 @@ define @vzext_nxv2i1_nxv2i64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vzext_nxv2i1_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret @@ -66,7 +66,7 @@ define @vzext_nxv2i1_nxv2i64_unmasked( %a, i32 zeroext %vl) { ; CHECK-LABEL: vzext_nxv2i1_nxv2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll @@ -18,7 +18,7 @@ define @vzext_nxv2i8_nxv2i16_unmasked( %a, i32 zeroext %vl) { ; CHECK-LABEL: vzext_nxv2i8_nxv2i16_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vzext.vf2 v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -42,7 +42,7 @@ define @vzext_nxv2i8_nxv2i32_unmasked( %a, i32 zeroext %vl) { ; CHECK-LABEL: vzext_nxv2i8_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vzext.vf4 v9, v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -66,7 +66,7 @@ define @vzext_nxv2i8_nxv2i64_unmasked( %a, i32 zeroext %vl) { ; CHECK-LABEL: vzext_nxv2i8_nxv2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vzext.vf8 v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -90,7 +90,7 @@ define @vzext_nxv2i16_nxv2i32_unmasked( %a, i32 zeroext %vl) { ; CHECK-LABEL: vzext_nxv2i16_nxv2i32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vzext.vf2 v9, v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -114,7 +114,7 @@ define @vzext_nxv2i16_nxv2i64_unmasked( %a, i32 zeroext %vl) { ; CHECK-LABEL: vzext_nxv2i16_nxv2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vzext.vf4 v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -138,7 +138,7 @@ define @vzext_nxv2i32_nxv2i64_unmasked( %a, i32 zeroext %vl) { ; CHECK-LABEL: vzext_nxv2i32_nxv2i64_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -155,7 +155,7 @@ ; CHECK-NEXT: li a2, 0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: srli a4, a1, 2 -; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma ; CHECK-NEXT: slli a1, a1, 1 ; CHECK-NEXT: sub a3, a0, a1 ; CHECK-NEXT: vslidedown.vx v0, v0, a4 @@ -189,14 +189,14 @@ ; CHECK-NEXT: mv a2, a1 ; CHECK-NEXT: .LBB13_2: ; CHECK-NEXT: li a3, 0 -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: sub a1, a0, a1 ; CHECK-NEXT: vzext.vf4 v24, v8 ; CHECK-NEXT: bltu a0, a1, .LBB13_4 ; CHECK-NEXT: # %bb.3: ; CHECK-NEXT: mv a3, a1 ; CHECK-NEXT: .LBB13_4: -; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; CHECK-NEXT: vzext.vf4 v16, v10 ; CHECK-NEXT: vmv8r.v v8, v24 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vzext.ll b/llvm/test/CodeGen/RISCV/rvv/vzext.ll --- a/llvm/test/CodeGen/RISCV/rvv/vzext.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vzext.ll @@ -11,7 +11,7 @@ define @intrinsic_vzext_vf8_nxv1i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vzext.vf8 v9, v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -55,7 +55,7 @@ define @intrinsic_vzext_vf8_nxv2i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf8_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vzext.vf8 v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -99,7 +99,7 @@ define @intrinsic_vzext_vf8_nxv4i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf8_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vzext.vf8 v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -143,7 +143,7 @@ define @intrinsic_vzext_vf8_nxv8i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf8_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vzext.vf8 v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -187,7 +187,7 @@ define @intrinsic_vzext_vf4_nxv1i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf4_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vzext.vf4 v9, v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -231,7 +231,7 @@ define @intrinsic_vzext_vf4_nxv2i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf4_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vzext.vf4 v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -275,7 +275,7 @@ define @intrinsic_vzext_vf4_nxv4i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf4_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vzext.vf4 v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -319,7 +319,7 @@ define @intrinsic_vzext_vf4_nxv8i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf4_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vzext.vf4 v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -363,7 +363,7 @@ define @intrinsic_vzext_vf4_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf4_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vzext.vf4 v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -407,7 +407,7 @@ define @intrinsic_vzext_vf4_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf4_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vzext.vf4 v9, v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -451,7 +451,7 @@ define @intrinsic_vzext_vf4_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf4_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vzext.vf4 v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -495,7 +495,7 @@ define @intrinsic_vzext_vf4_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf4_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vzext.vf4 v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -539,7 +539,7 @@ define @intrinsic_vzext_vf4_nxv16i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf4_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vzext.vf4 v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -583,7 +583,7 @@ define @intrinsic_vzext_vf2_nxv1i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf2_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vzext.vf2 v9, v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -627,7 +627,7 @@ define @intrinsic_vzext_vf2_nxv2i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf2_nxv2i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -671,7 +671,7 @@ define @intrinsic_vzext_vf2_nxv4i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf2_nxv4i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vzext.vf2 v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -715,7 +715,7 @@ define @intrinsic_vzext_vf2_nxv8i64( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf2_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vzext.vf2 v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -759,7 +759,7 @@ define @intrinsic_vzext_vf2_nxv1i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf2_nxv1i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vzext.vf2 v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -803,7 +803,7 @@ define @intrinsic_vzext_vf2_nxv2i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf2_nxv2i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vzext.vf2 v9, v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -847,7 +847,7 @@ define @intrinsic_vzext_vf2_nxv4i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf2_nxv4i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -891,7 +891,7 @@ define @intrinsic_vzext_vf2_nxv8i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf2_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vzext.vf2 v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -935,7 +935,7 @@ define @intrinsic_vzext_vf2_nxv16i32( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf2_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vzext.vf2 v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret @@ -979,7 +979,7 @@ define @intrinsic_vzext_vf2_nxv1i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf2_nxv1i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vzext.vf2 v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1023,7 +1023,7 @@ define @intrinsic_vzext_vf2_nxv2i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf2_nxv2i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vzext.vf2 v9, v8 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -1067,7 +1067,7 @@ define @intrinsic_vzext_vf2_nxv4i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf2_nxv4i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vzext.vf2 v9, v8 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret @@ -1111,7 +1111,7 @@ define @intrinsic_vzext_vf2_nxv8i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf2_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret @@ -1155,7 +1155,7 @@ define @intrinsic_vzext_vf2_nxv16i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf2_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vzext.vf2 v12, v8 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret @@ -1199,7 +1199,7 @@ define @intrinsic_vzext_vf2_nxv32i16( %0, iXLen %1) nounwind { ; CHECK-LABEL: intrinsic_vzext_vf2_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vzext.vf2 v16, v8 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/wrong-chain-fixed-load.ll b/llvm/test/CodeGen/RISCV/rvv/wrong-chain-fixed-load.ll --- a/llvm/test/CodeGen/RISCV/rvv/wrong-chain-fixed-load.ll +++ b/llvm/test/CodeGen/RISCV/rvv/wrong-chain-fixed-load.ll @@ -9,7 +9,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: lui a0, %hi(c) ; CHECK-NEXT: addi a0, a0, %lo(c) -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: addi a1, a0, 16 ; CHECK-NEXT: vle64.v v9, (a1) diff --git a/llvm/test/CodeGen/RISCV/rvv/zve32-types.ll b/llvm/test/CodeGen/RISCV/rvv/zve32-types.ll --- a/llvm/test/CodeGen/RISCV/rvv/zve32-types.ll +++ b/llvm/test/CodeGen/RISCV/rvv/zve32-types.ll @@ -15,12 +15,12 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: srli a2, a2, 3 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vle8.v v9, (a1) -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9 -; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret %va = load , * %pa @@ -35,12 +35,12 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: srli a2, a2, 3 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v9, (a1) -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9 -; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret %va = load , * %pa @@ -55,12 +55,12 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: srli a2, a2, 3 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v9, (a1) -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret %va = load , * %pa @@ -75,12 +75,12 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: srli a2, a2, 3 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v9, (a1) -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v9 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret %va = load , * %pa diff --git a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir --- a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir +++ b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir @@ -26,7 +26,7 @@ ; CHECK-NEXT: $x12 = frame-setup PseudoReadVLENB ; CHECK-NEXT: $x12 = frame-setup SLLI killed $x12, 3 ; CHECK-NEXT: $x2 = frame-setup SUB $x2, killed $x12 - ; CHECK-NEXT: dead $x0 = PseudoVSETVLI killed renamable $x11, 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: dead $x0 = PseudoVSETVLI killed renamable $x11, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: $v0_v1_v2_v3_v4_v5_v6 = PseudoVLSEG7E64_V_M1 renamable $x10, $noreg, 6 /* e64 */, implicit $vl, implicit $vtype ; CHECK-NEXT: $x11 = ADDI $x2, 16 ; CHECK-NEXT: $x12 = PseudoReadVLENB diff --git a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-zero-vl.ll b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-zero-vl.ll --- a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-zero-vl.ll +++ b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-zero-vl.ll @@ -90,7 +90,7 @@ define @test_vlseg2ff_nxv16i16(i16* %base, i64* %outvl) { ; CHECK-LABEL: test_vlseg2ff_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, ma ; CHECK-NEXT: vlseg2e16ff.v v4, (a0) ; CHECK-NEXT: csrr a0, vl ; CHECK-NEXT: sd a0, 0(a1) @@ -130,7 +130,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -143,7 +143,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, ma ; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: @@ -159,7 +159,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, ma ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1 ; CHECK-NEXT: ret entry: @@ -172,7 +172,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, ma ; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: @@ -189,7 +189,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -203,7 +203,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, ma ; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: @@ -220,7 +220,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16 ; CHECK-NEXT: ret entry: @@ -234,7 +234,7 @@ ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4 ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vmv4r.v v12, v8 -; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, ma ; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/shuffle-reverse.ll b/llvm/test/CodeGen/RISCV/shuffle-reverse.ll --- a/llvm/test/CodeGen/RISCV/shuffle-reverse.ll +++ b/llvm/test/CodeGen/RISCV/shuffle-reverse.ll @@ -5,9 +5,9 @@ define <2 x i8> @v2i8(<2 x i8> %a) { ; CHECK-LABEL: v2i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vslidedown.vi v9, v8, 1 -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 1 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -18,15 +18,15 @@ define <4 x i8> @v2i8_2(<2 x i8> %a, <2 x i8> %b) { ; CHECK-LABEL: v2i8_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vslidedown.vi v10, v8, 1 -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, tu, ma ; CHECK-NEXT: vslideup.vi v10, v8, 1 -; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v9, 1 -; CHECK-NEXT: vsetivli zero, 2, e8, mf8, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 1 -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 2 ; CHECK-NEXT: ret %v4i8 = shufflevector <2 x i8> %a, <2 x i8> %b, <4 x i32> @@ -36,7 +36,7 @@ define <4 x i8> @v4i8(<4 x i8> %a) { ; CHECK-LABEL: v4i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vi v10, v9, 3 ; CHECK-NEXT: vrgather.vv v9, v8, v10 @@ -66,7 +66,7 @@ define <8 x i8> @v8i8(<8 x i8> %a) { ; CHECK-LABEL: v8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vi v10, v9, 7 ; CHECK-NEXT: vrgather.vv v9, v8, v10 @@ -79,12 +79,12 @@ define <16 x i8> @v8i8_2(<8 x i8> %a, <8 x i8> %b) { ; CHECK-LABEL: v8i8_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vid.v v11 ; CHECK-NEXT: vrsub.vi v12, v11, 15 ; CHECK-NEXT: vrgather.vv v10, v8, v12 ; CHECK-NEXT: li a0, 255 -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vmv.s.x v0, a0 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; CHECK-NEXT: vrsub.vi v8, v11, 7 @@ -98,7 +98,7 @@ define <16 x i8> @v16i8(<16 x i8> %a) { ; CHECK-LABEL: v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vi v10, v9, 15 ; CHECK-NEXT: vrgather.vv v9, v8, v10 @@ -114,7 +114,7 @@ ; RV32-NEXT: lui a0, %hi(.LCPI7_0) ; RV32-NEXT: addi a0, a0, %lo(.LCPI7_0) ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; RV32-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; RV32-NEXT: vle8.v v12, (a0) ; RV32-NEXT: vmv1r.v v14, v9 ; RV32-NEXT: # kill: def $v8 killed $v8 def $v8m2 @@ -123,7 +123,7 @@ ; RV32-NEXT: vrsub.vi v8, v8, 15 ; RV32-NEXT: lui a0, 16 ; RV32-NEXT: addi a0, a0, -1 -; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV32-NEXT: vmv.s.x v0, a0 ; RV32-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; RV32-NEXT: vrgather.vv v10, v14, v8, v0.t @@ -135,7 +135,7 @@ ; RV64-NEXT: lui a0, %hi(.LCPI7_0) ; RV64-NEXT: addi a0, a0, %lo(.LCPI7_0) ; RV64-NEXT: li a1, 32 -; RV64-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; RV64-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; RV64-NEXT: vle8.v v12, (a0) ; RV64-NEXT: vmv1r.v v14, v9 ; RV64-NEXT: # kill: def $v8 killed $v8 def $v8m2 @@ -144,7 +144,7 @@ ; RV64-NEXT: vrsub.vi v8, v8, 15 ; RV64-NEXT: lui a0, 16 ; RV64-NEXT: addiw a0, a0, -1 -; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV64-NEXT: vmv.s.x v0, a0 ; RV64-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; RV64-NEXT: vrgather.vv v10, v14, v8, v0.t @@ -157,9 +157,9 @@ define <2 x i16> @v2i16(<2 x i16> %a) { ; CHECK-LABEL: v2i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v9, v8, 1 -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 1 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -170,15 +170,15 @@ define <4 x i16> @v2i16_2(<2 x i16> %a, <2 x i16> %b) { ; CHECK-LABEL: v2i16_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v10, v8, 1 -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, tu, ma ; CHECK-NEXT: vslideup.vi v10, v8, 1 -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v9, 1 -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 1 -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 2 ; CHECK-NEXT: ret %v4i16 = shufflevector <2 x i16> %a, <2 x i16> %b, <4 x i32> @@ -188,7 +188,7 @@ define <4 x i16> @v4i16(<4 x i16> %a) { ; CHECK-LABEL: v4i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vi v10, v9, 3 ; CHECK-NEXT: vrgather.vv v9, v8, v10 @@ -218,7 +218,7 @@ define <8 x i16> @v8i16(<8 x i16> %a) { ; CHECK-LABEL: v8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vi v10, v9, 7 ; CHECK-NEXT: vrgather.vv v9, v8, v10 @@ -250,7 +250,7 @@ define <16 x i16> @v16i16(<16 x i16> %a) { ; CHECK-LABEL: v16i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vid.v v10 ; CHECK-NEXT: vrsub.vi v12, v10, 15 ; CHECK-NEXT: vrgather.vv v10, v8, v12 @@ -266,7 +266,7 @@ ; RV32-NEXT: lui a0, %hi(.LCPI15_0) ; RV32-NEXT: addi a0, a0, %lo(.LCPI15_0) ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; RV32-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; RV32-NEXT: vle16.v v16, (a0) ; RV32-NEXT: vmv2r.v v20, v10 ; RV32-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m4 @@ -275,7 +275,7 @@ ; RV32-NEXT: vrsub.vi v8, v8, 15 ; RV32-NEXT: lui a0, 16 ; RV32-NEXT: addi a0, a0, -1 -; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV32-NEXT: vmv.s.x v0, a0 ; RV32-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; RV32-NEXT: vrgather.vv v12, v20, v8, v0.t @@ -287,7 +287,7 @@ ; RV64-NEXT: lui a0, %hi(.LCPI15_0) ; RV64-NEXT: addi a0, a0, %lo(.LCPI15_0) ; RV64-NEXT: li a1, 32 -; RV64-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; RV64-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; RV64-NEXT: vle16.v v16, (a0) ; RV64-NEXT: vmv2r.v v20, v10 ; RV64-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m4 @@ -296,7 +296,7 @@ ; RV64-NEXT: vrsub.vi v8, v8, 15 ; RV64-NEXT: lui a0, 16 ; RV64-NEXT: addiw a0, a0, -1 -; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV64-NEXT: vmv.s.x v0, a0 ; RV64-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; RV64-NEXT: vrgather.vv v12, v20, v8, v0.t @@ -309,9 +309,9 @@ define <2 x i32> @v2i32(<2 x i32> %a) { ; CHECK-LABEL: v2i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v9, v8, 1 -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 1 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -322,15 +322,15 @@ define <4 x i32> @v2i32_2(<2 x i32> %a, < 2 x i32> %b) { ; CHECK-LABEL: v2i32_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v10, v8, 1 -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v10, v8, 1 -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v9, 1 -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 1 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 2 ; CHECK-NEXT: ret %v4i32 = shufflevector <2 x i32> %a, <2 x i32> %b, <4 x i32> @@ -340,7 +340,7 @@ define <4 x i32> @v4i32(<4 x i32> %a) { ; CHECK-LABEL: v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vi v10, v9, 3 ; CHECK-NEXT: vrgather.vv v9, v8, v10 @@ -372,7 +372,7 @@ define <8 x i32> @v8i32(<8 x i32> %a) { ; CHECK-LABEL: v8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vid.v v10 ; CHECK-NEXT: vrsub.vi v12, v10, 7 ; CHECK-NEXT: vrgather.vv v10, v8, v12 @@ -404,7 +404,7 @@ define <16 x i32> @v16i32(<16 x i32> %a) { ; CHECK-LABEL: v16i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vid.v v12 ; CHECK-NEXT: vrsub.vi v16, v12, 15 ; CHECK-NEXT: vrgather.vv v12, v8, v16 @@ -420,7 +420,7 @@ ; RV32-NEXT: lui a0, %hi(.LCPI23_0) ; RV32-NEXT: addi a0, a0, %lo(.LCPI23_0) ; RV32-NEXT: li a1, 32 -; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; RV32-NEXT: vle32.v v0, (a0) ; RV32-NEXT: vmv4r.v v24, v12 ; RV32-NEXT: vmv4r.v v16, v8 @@ -429,7 +429,7 @@ ; RV32-NEXT: vrsub.vi v16, v16, 15 ; RV32-NEXT: lui a0, 16 ; RV32-NEXT: addi a0, a0, -1 -; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV32-NEXT: vmv.s.x v0, a0 ; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; RV32-NEXT: vrgather.vv v8, v24, v16, v0.t @@ -440,7 +440,7 @@ ; RV64-NEXT: lui a0, %hi(.LCPI23_0) ; RV64-NEXT: addi a0, a0, %lo(.LCPI23_0) ; RV64-NEXT: li a1, 32 -; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; RV64-NEXT: vle32.v v0, (a0) ; RV64-NEXT: vmv4r.v v24, v12 ; RV64-NEXT: vmv4r.v v16, v8 @@ -449,7 +449,7 @@ ; RV64-NEXT: vrsub.vi v16, v16, 15 ; RV64-NEXT: lui a0, 16 ; RV64-NEXT: addiw a0, a0, -1 -; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV64-NEXT: vmv.s.x v0, a0 ; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; RV64-NEXT: vrgather.vv v8, v24, v16, v0.t @@ -461,9 +461,9 @@ define <2 x i64> @v2i64(<2 x i64> %a) { ; CHECK-LABEL: v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v9, v8, 1 -; CHECK-NEXT: vsetivli zero, 2, e64, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 1 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -474,15 +474,15 @@ define <4 x i64> @v2i64_2(<2 x i64> %a, < 2 x i64> %b) { ; CHECK-LABEL: v2i64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v12, v8, 1 -; CHECK-NEXT: vsetivli zero, 2, e64, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, tu, ma ; CHECK-NEXT: vslideup.vi v12, v8, 1 -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v10, v9, 1 -; CHECK-NEXT: vsetivli zero, 2, e64, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, tu, ma ; CHECK-NEXT: vslideup.vi v10, v9, 1 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, ma ; CHECK-NEXT: vslideup.vi v10, v12, 2 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -493,17 +493,17 @@ define <4 x i64> @v4i64(<4 x i64> %a) { ; RV32-LABEL: v4i64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; RV32-NEXT: vid.v v10 ; RV32-NEXT: vrsub.vi v12, v10, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; RV32-NEXT: vrgatherei16.vv v10, v8, v12 ; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: v4i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-NEXT: vid.v v10 ; RV64-NEXT: vrsub.vi v12, v10, 3 ; RV64-NEXT: vrgather.vv v10, v8, v12 @@ -518,14 +518,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vmv2r.v v16, v10 ; RV32-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m4 -; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV32-NEXT: vid.v v20 ; RV32-NEXT: vrsub.vi v21, v20, 7 -; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; RV32-NEXT: vrgatherei16.vv v12, v8, v21 ; RV32-NEXT: li a0, 15 ; RV32-NEXT: vmv.s.x v0, a0 -; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV32-NEXT: vrsub.vi v8, v20, 3 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vrgatherei16.vv v12, v16, v8, v0.t @@ -553,9 +553,9 @@ define <2 x half> @v2f16(<2 x half> %a) { ; CHECK-LABEL: v2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v9, v8, 1 -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 1 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -566,15 +566,15 @@ define <4 x half> @v2f16_2(<2 x half> %a, <2 x half> %b) { ; CHECK-LABEL: v2f16_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v10, v8, 1 -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, tu, ma ; CHECK-NEXT: vslideup.vi v10, v8, 1 -; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v9, 1 -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 1 -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 2 ; CHECK-NEXT: ret %v4f16 = shufflevector <2 x half> %a, <2 x half> %b, <4 x i32> @@ -584,7 +584,7 @@ define <4 x half> @v4f16(<4 x half> %a) { ; CHECK-LABEL: v4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vi v10, v9, 3 ; CHECK-NEXT: vrgather.vv v9, v8, v10 @@ -614,7 +614,7 @@ define <8 x half> @v8f16(<8 x half> %a) { ; CHECK-LABEL: v8f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vi v10, v9, 7 ; CHECK-NEXT: vrgather.vv v9, v8, v10 @@ -646,7 +646,7 @@ define <16 x half> @v16f16(<16 x half> %a) { ; CHECK-LABEL: v16f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vid.v v10 ; CHECK-NEXT: vrsub.vi v12, v10, 15 ; CHECK-NEXT: vrgather.vv v10, v8, v12 @@ -663,12 +663,12 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI35_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI35_0) ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vle16.v v12, (a0) ; CHECK-NEXT: vmv.v.i v16, 0 -; CHECK-NEXT: vsetivli zero, 16, e16, m4, tu, mu +; CHECK-NEXT: vsetivli zero, 16, e16, m4, tu, ma ; CHECK-NEXT: vslideup.vi v16, v8, 0 -; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma ; CHECK-NEXT: vrgather.vv v8, v16, v12 ; CHECK-NEXT: ret %v32f16 = shufflevector <16 x half> %a, <16 x half> undef, <32 x i32> @@ -678,9 +678,9 @@ define <2 x float> @v2f32(<2 x float> %a) { ; CHECK-LABEL: v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v9, v8, 1 -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 1 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -691,15 +691,15 @@ define <4 x float> @v2f32_2(<2 x float> %a, <2 x float> %b) { ; CHECK-LABEL: v2f32_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v10, v8, 1 -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v10, v8, 1 -; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v9, 1 -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 1 -; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 2 ; CHECK-NEXT: ret %v4f32 = shufflevector <2 x float> %a, <2 x float> %b, <4 x i32> @@ -709,7 +709,7 @@ define <4 x float> @v4f32(<4 x float> %a) { ; CHECK-LABEL: v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vi v10, v9, 3 ; CHECK-NEXT: vrgather.vv v9, v8, v10 @@ -741,7 +741,7 @@ define <8 x float> @v8f32(<8 x float> %a) { ; CHECK-LABEL: v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vid.v v10 ; CHECK-NEXT: vrsub.vi v12, v10, 7 ; CHECK-NEXT: vrgather.vv v10, v8, v12 @@ -773,9 +773,9 @@ define <2 x double> @v2f64(<2 x double> %a) { ; CHECK-LABEL: v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v9, v8, 1 -; CHECK-NEXT: vsetivli zero, 2, e64, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 1 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret @@ -786,15 +786,15 @@ define <4 x double> @v2f64_2(<2 x double> %a, < 2 x double> %b) { ; CHECK-LABEL: v2f64_2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v12, v8, 1 -; CHECK-NEXT: vsetivli zero, 2, e64, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, tu, ma ; CHECK-NEXT: vslideup.vi v12, v8, 1 -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v10, v9, 1 -; CHECK-NEXT: vsetivli zero, 2, e64, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 2, e64, m1, tu, ma ; CHECK-NEXT: vslideup.vi v10, v9, 1 -; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, mu +; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, ma ; CHECK-NEXT: vslideup.vi v10, v12, 2 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret @@ -805,17 +805,17 @@ define <4 x double> @v4f64(<4 x double> %a) { ; RV32-LABEL: v4f64: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; RV32-NEXT: vid.v v10 ; RV32-NEXT: vrsub.vi v12, v10, 3 -; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; RV32-NEXT: vrgatherei16.vv v10, v8, v12 ; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: v4f64: ; RV64: # %bb.0: -; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-NEXT: vid.v v10 ; RV64-NEXT: vrsub.vi v12, v10, 3 ; RV64-NEXT: vrgather.vv v10, v8, v12 @@ -830,14 +830,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vmv2r.v v16, v10 ; RV32-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m4 -; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV32-NEXT: vid.v v20 ; RV32-NEXT: vrsub.vi v21, v20, 7 -; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu +; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; RV32-NEXT: vrgatherei16.vv v12, v8, v21 ; RV32-NEXT: li a0, 15 ; RV32-NEXT: vmv.s.x v0, a0 -; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu +; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV32-NEXT: vrsub.vi v8, v20, 3 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vrgatherei16.vv v12, v16, v8, v0.t @@ -868,7 +868,7 @@ ; CHECK-NEXT: lui a0, %hi(.LCPI46_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI46_0) ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vle8.v v12, (a0) ; CHECK-NEXT: vrgather.vv v10, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v10 diff --git a/llvm/test/CodeGen/RISCV/spill-fpr-scalar.ll b/llvm/test/CodeGen/RISCV/spill-fpr-scalar.ll --- a/llvm/test/CodeGen/RISCV/spill-fpr-scalar.ll +++ b/llvm/test/CodeGen/RISCV/spill-fpr-scalar.ll @@ -15,12 +15,12 @@ ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, ma ; CHECK-NEXT: vfmv.f.s ft0, v8 ; CHECK-NEXT: fsh ft0, 14(sp) # 2-byte Folded Spill ; CHECK-NEXT: #APP ; CHECK-NEXT: #NO_APP -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: flh ft0, 14(sp) # 2-byte Folded Reload ; CHECK-NEXT: vfmv.v.f v8, ft0 ; CHECK-NEXT: addi sp, sp, 16 @@ -36,12 +36,12 @@ ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, ma ; CHECK-NEXT: vfmv.f.s ft0, v8 ; CHECK-NEXT: fsw ft0, 12(sp) # 4-byte Folded Spill ; CHECK-NEXT: #APP ; CHECK-NEXT: #NO_APP -; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: flw ft0, 12(sp) # 4-byte Folded Reload ; CHECK-NEXT: vfmv.v.f v8, ft0 ; CHECK-NEXT: addi sp, sp, 16 @@ -57,12 +57,12 @@ ; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv1f64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, ma ; CHECK-NEXT: vfmv.f.s ft0, v8 ; CHECK-NEXT: fsd ft0, 8(sp) # 8-byte Folded Spill ; CHECK-NEXT: #APP ; CHECK-NEXT: #NO_APP -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: fld ft0, 8(sp) # 8-byte Folded Reload ; CHECK-NEXT: vfmv.v.f v8, ft0 ; CHECK-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll --- a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll +++ b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll @@ -648,9 +648,9 @@ ; RV32MV-NEXT: sw a1, 12(sp) ; RV32MV-NEXT: sw a0, 8(sp) ; RV32MV-NEXT: li a0, 85 -; RV32MV-NEXT: vsetivli zero, 1, e8, mf8, ta, mu +; RV32MV-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; RV32MV-NEXT: vmv.s.x v0, a0 -; RV32MV-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; RV32MV-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32MV-NEXT: mv a0, sp ; RV32MV-NEXT: vle32.v v8, (a0) ; RV32MV-NEXT: vmv.v.i v10, 1 @@ -661,15 +661,15 @@ ; RV32MV-NEXT: li a0, 1 ; RV32MV-NEXT: vmv.s.x v12, a0 ; RV32MV-NEXT: vmv.v.i v14, 0 -; RV32MV-NEXT: vsetivli zero, 3, e32, m2, tu, mu +; RV32MV-NEXT: vsetivli zero, 3, e32, m2, tu, ma ; RV32MV-NEXT: vslideup.vi v14, v12, 2 -; RV32MV-NEXT: vsetivli zero, 5, e32, m2, tu, mu +; RV32MV-NEXT: vsetivli zero, 5, e32, m2, tu, ma ; RV32MV-NEXT: vslideup.vi v14, v10, 4 -; RV32MV-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32MV-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32MV-NEXT: vmsne.vv v0, v8, v14 ; RV32MV-NEXT: vmv.v.i v8, 0 ; RV32MV-NEXT: vmerge.vim v8, v8, -1, v0 -; RV32MV-NEXT: vsetivli zero, 1, e32, m2, ta, mu +; RV32MV-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32MV-NEXT: vse32.v v8, (s2) ; RV32MV-NEXT: vslidedown.vi v10, v8, 1 ; RV32MV-NEXT: vmv.x.s a0, v10 @@ -758,7 +758,7 @@ ; RV64MV-NEXT: add a1, a3, a1 ; RV64MV-NEXT: sd a1, 8(sp) ; RV64MV-NEXT: mv a1, sp -; RV64MV-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64MV-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64MV-NEXT: vle64.v v8, (a1) ; RV64MV-NEXT: lui a1, %hi(.LCPI3_3) ; RV64MV-NEXT: addi a1, a1, %lo(.LCPI3_3) @@ -769,7 +769,7 @@ ; RV64MV-NEXT: vmsne.vv v0, v8, v10 ; RV64MV-NEXT: vmv.v.i v8, 0 ; RV64MV-NEXT: vmerge.vim v8, v8, -1, v0 -; RV64MV-NEXT: vsetivli zero, 1, e64, m2, ta, mu +; RV64MV-NEXT: vsetivli zero, 1, e64, m2, ta, ma ; RV64MV-NEXT: vslidedown.vi v10, v8, 2 ; RV64MV-NEXT: vmv.x.s a2, v10 ; RV64MV-NEXT: slli a3, a2, 31 diff --git a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll --- a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll +++ b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll @@ -535,13 +535,13 @@ ; RV32MV-NEXT: andi a1, a1, 2047 ; RV32MV-NEXT: sh a1, 12(sp) ; RV32MV-NEXT: addi a1, sp, 8 -; RV32MV-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; RV32MV-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; RV32MV-NEXT: vle16.v v8, (a1) ; RV32MV-NEXT: vmv.v.i v9, 10 ; RV32MV-NEXT: li a1, 9 -; RV32MV-NEXT: vsetvli zero, zero, e16, mf2, tu, mu +; RV32MV-NEXT: vsetvli zero, zero, e16, mf2, tu, ma ; RV32MV-NEXT: vmv.s.x v9, a1 -; RV32MV-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV32MV-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV32MV-NEXT: lui a1, %hi(.LCPI4_0) ; RV32MV-NEXT: addi a1, a1, %lo(.LCPI4_0) ; RV32MV-NEXT: vle16.v v10, (a1) @@ -552,11 +552,11 @@ ; RV32MV-NEXT: vsll.vv v9, v10, v9 ; RV32MV-NEXT: vmv.v.i v10, 0 ; RV32MV-NEXT: li a1, 1 -; RV32MV-NEXT: vsetvli zero, zero, e16, mf2, tu, mu +; RV32MV-NEXT: vsetvli zero, zero, e16, mf2, tu, ma ; RV32MV-NEXT: vmv1r.v v11, v10 ; RV32MV-NEXT: vmv.s.x v11, a1 ; RV32MV-NEXT: li a1, 2047 -; RV32MV-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV32MV-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV32MV-NEXT: vand.vx v8, v8, a1 ; RV32MV-NEXT: lui a2, %hi(.LCPI4_1) ; RV32MV-NEXT: addi a2, a2, %lo(.LCPI4_1) @@ -566,7 +566,7 @@ ; RV32MV-NEXT: vand.vx v8, v8, a1 ; RV32MV-NEXT: vmsltu.vv v0, v12, v8 ; RV32MV-NEXT: vmerge.vim v8, v10, -1, v0 -; RV32MV-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV32MV-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV32MV-NEXT: vslidedown.vi v9, v8, 2 ; RV32MV-NEXT: vmv.x.s a1, v9 ; RV32MV-NEXT: slli a2, a1, 21 @@ -600,13 +600,13 @@ ; RV64MV-NEXT: srli a1, a1, 53 ; RV64MV-NEXT: sh a1, 10(sp) ; RV64MV-NEXT: addi a1, sp, 8 -; RV64MV-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; RV64MV-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; RV64MV-NEXT: vle16.v v8, (a1) ; RV64MV-NEXT: vmv.v.i v9, 10 ; RV64MV-NEXT: li a1, 9 -; RV64MV-NEXT: vsetvli zero, zero, e16, mf2, tu, mu +; RV64MV-NEXT: vsetvli zero, zero, e16, mf2, tu, ma ; RV64MV-NEXT: vmv.s.x v9, a1 -; RV64MV-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV64MV-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV64MV-NEXT: lui a1, %hi(.LCPI4_0) ; RV64MV-NEXT: addi a1, a1, %lo(.LCPI4_0) ; RV64MV-NEXT: vle16.v v10, (a1) @@ -617,11 +617,11 @@ ; RV64MV-NEXT: vsll.vv v9, v10, v9 ; RV64MV-NEXT: vmv.v.i v10, 0 ; RV64MV-NEXT: li a1, 1 -; RV64MV-NEXT: vsetvli zero, zero, e16, mf2, tu, mu +; RV64MV-NEXT: vsetvli zero, zero, e16, mf2, tu, ma ; RV64MV-NEXT: vmv1r.v v11, v10 ; RV64MV-NEXT: vmv.s.x v11, a1 ; RV64MV-NEXT: li a1, 2047 -; RV64MV-NEXT: vsetvli zero, zero, e16, mf2, ta, mu +; RV64MV-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV64MV-NEXT: vand.vx v8, v8, a1 ; RV64MV-NEXT: lui a2, %hi(.LCPI4_1) ; RV64MV-NEXT: addi a2, a2, %lo(.LCPI4_1) @@ -633,7 +633,7 @@ ; RV64MV-NEXT: vmerge.vim v8, v10, -1, v0 ; RV64MV-NEXT: vmv.x.s a1, v8 ; RV64MV-NEXT: andi a1, a1, 2047 -; RV64MV-NEXT: vsetivli zero, 1, e16, mf2, ta, mu +; RV64MV-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64MV-NEXT: vslidedown.vi v9, v8, 1 ; RV64MV-NEXT: vmv.x.s a2, v9 ; RV64MV-NEXT: andi a2, a2, 2047 diff --git a/llvm/test/CodeGen/RISCV/vadd-vp-mask.ll b/llvm/test/CodeGen/RISCV/vadd-vp-mask.ll --- a/llvm/test/CodeGen/RISCV/vadd-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/vadd-vp-mask.ll @@ -10,7 +10,7 @@ define @vadd_vv_nxv2i1( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv2i1( %va, %b, %m, i32 %evl) @@ -22,7 +22,7 @@ define @vadd_vv_nxv4i1( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv4i1( %va, %b, %m, i32 %evl) @@ -34,7 +34,7 @@ define @vadd_vv_nxv8i1( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv8i1( %va, %b, %m, i32 %evl) @@ -46,7 +46,7 @@ define @vadd_vv_nxv16i1( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv16i1( %va, %b, %m, i32 %evl) @@ -58,7 +58,7 @@ define @vadd_vv_nxv32i1( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vadd_vv_nxv32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.add.nxv32i1( %va, %b, %m, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/vmul-vp-mask.ll b/llvm/test/CodeGen/RISCV/vmul-vp-mask.ll --- a/llvm/test/CodeGen/RISCV/vmul-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/vmul-vp-mask.ll @@ -10,7 +10,7 @@ define @vmul_vv_nxv2i1( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmand.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv2i1( %va, %b, %m, i32 %evl) @@ -22,7 +22,7 @@ define @vmul_vv_nxv4i1( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmand.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv4i1( %va, %b, %m, i32 %evl) @@ -34,7 +34,7 @@ define @vmul_vv_nxv8i1( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmand.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv8i1( %va, %b, %m, i32 %evl) @@ -46,7 +46,7 @@ define @vmul_vv_nxv16i1( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmand.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv16i1( %va, %b, %m, i32 %evl) @@ -58,7 +58,7 @@ define @vmul_vv_nxv32i1( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vmul_vv_nxv32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmand.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.mul.nxv32i1( %va, %b, %m, i32 %evl) diff --git a/llvm/test/CodeGen/RISCV/vsub-vp-mask.ll b/llvm/test/CodeGen/RISCV/vsub-vp-mask.ll --- a/llvm/test/CodeGen/RISCV/vsub-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/vsub-vp-mask.ll @@ -10,7 +10,7 @@ define @vsub_vv_nxv2i1( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv2i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv2i1( %va, %b, %m, i32 %evl) @@ -22,7 +22,7 @@ define @vsub_vv_nxv4i1( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv4i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv4i1( %va, %b, %m, i32 %evl) @@ -34,7 +34,7 @@ define @vsub_vv_nxv8i1( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv8i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv8i1( %va, %b, %m, i32 %evl) @@ -46,7 +46,7 @@ define @vsub_vv_nxv16i1( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv16i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv16i1( %va, %b, %m, i32 %evl) @@ -58,7 +58,7 @@ define @vsub_vv_nxv32i1( %va, %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsub_vv_nxv32i1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmxor.mm v0, v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.sub.nxv32i1( %va, %b, %m, i32 %evl)